]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.5-201504270827.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.5-201504270827.patch
CommitLineData
7c1a0698
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..6eabd3c 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2283,6 +2290,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2584,6 +2595,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 633b5f0..10aa54f 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -879,7 +947,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -926,6 +994,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1095,6 +1168,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer
547+ signing_key.x509.signer \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1215,7 +1293,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1521,17 +1601,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1543,11 +1627,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index d8f9b7e..f6222fa 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -102,6 +102,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index 97d07ed..2931f2b 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..abe7041 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1367+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 891a56b..48f337e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -36,7 +36,7 @@ struct outer_cache_fns {
1842
1843 /* This is an ARM L2C thing */
1844 void (*write_sec)(unsigned long, unsigned);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index f027941..f36ce30 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -126,6 +126,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a31ecdad..95e98d4 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -81,6 +81,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -92,10 +93,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index d5cac54..906ea3e 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index d890e41..3921292 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -78,9 +78,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 .restart_block = { \
2125 .fn = do_no_restart_syscall, \
2126 }, \
2127@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index 4767eb9..bf00668 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a,b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x,p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x,p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check(x,p); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x,p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x,p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check(x,p); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2260 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x,ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x),(ptr),__gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x,ptr,err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x),(ptr),err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x,ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x),(ptr),__pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x,ptr,err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x),(ptr),err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 2f5555d..d493c91 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -47,6 +47,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -89,11 +170,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -478,7 +575,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -512,11 +611,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 664eee8..f470938 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -437,7 +437,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index bea7db9..a210d10 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 5038960..4aa71d8 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index e55408e..14d9998 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3058 asm("mrc p15, 0, %0, c0, c1, 4"
3059 : "=r" (mmfr0));
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 8aa6f1b..0899e08 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 0b0d58a..988cb45 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3438index ce25e85..3dd7850 100644
3439--- a/arch/arm/mach-at91/setup.c
3440+++ b/arch/arm/mach-at91/setup.c
3441@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3442
3443 desc->pfn = __phys_to_pfn(base);
3444 desc->length = length;
3445- desc->type = MT_MEMORY_RWX_NONCACHED;
3446+ desc->type = MT_MEMORY_RW_NONCACHED;
3447
3448 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3449 base, length, desc->virtual);
3450diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3451index f8e7dcd..17ee921 100644
3452--- a/arch/arm/mach-exynos/suspend.c
3453+++ b/arch/arm/mach-exynos/suspend.c
3454@@ -18,6 +18,7 @@
3455 #include <linux/syscore_ops.h>
3456 #include <linux/cpu_pm.h>
3457 #include <linux/io.h>
3458+#include <linux/irq.h>
3459 #include <linux/irqchip/arm-gic.h>
3460 #include <linux/err.h>
3461 #include <linux/regulator/machine.h>
3462@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3463 tmp |= pm_data->wake_disable_mask;
3464 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3465
3466- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_open_kernel();
3469+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3470+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3471+ pax_close_kernel();
3472
3473 register_syscore_ops(&exynos_pm_syscore_ops);
3474 suspend_set_ops(&exynos_suspend_ops);
3475diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3476index 7f352de..6dc0929 100644
3477--- a/arch/arm/mach-keystone/keystone.c
3478+++ b/arch/arm/mach-keystone/keystone.c
3479@@ -27,7 +27,7 @@
3480
3481 #include "keystone.h"
3482
3483-static struct notifier_block platform_nb;
3484+static notifier_block_no_const platform_nb;
3485 static unsigned long keystone_dma_pfn_offset __read_mostly;
3486
3487 static int keystone_platform_notifier(struct notifier_block *nb,
3488diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3489index ccef880..5dfad80 100644
3490--- a/arch/arm/mach-mvebu/coherency.c
3491+++ b/arch/arm/mach-mvebu/coherency.c
3492@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3493
3494 /*
3495 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3496- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3497+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3498 * is needed as a workaround for a deadlock issue between the PCIe
3499 * interface and the cache controller.
3500 */
3501@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3502 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3503
3504 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3505- mtype = MT_UNCACHED;
3506+ mtype = MT_UNCACHED_RW;
3507
3508 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3509 }
3510diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3511index b6443a4..20a0b74 100644
3512--- a/arch/arm/mach-omap2/board-n8x0.c
3513+++ b/arch/arm/mach-omap2/board-n8x0.c
3514@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3515 }
3516 #endif
3517
3518-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3519+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3520 .late_init = n8x0_menelaus_late_init,
3521 };
3522
3523diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524index 79f49d9..70bf184 100644
3525--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3526+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3527@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3528 void (*resume)(void);
3529 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3530 void (*hotplug_restart)(void);
3531-};
3532+} __no_const;
3533
3534 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3535 static struct powerdomain *mpuss_pd;
3536@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3537 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3538 {}
3539
3540-struct cpu_pm_ops omap_pm_ops = {
3541+static struct cpu_pm_ops omap_pm_ops __read_only = {
3542 .finish_suspend = default_finish_suspend,
3543 .resume = dummy_cpu_resume,
3544 .scu_prepare = dummy_scu_prepare,
3545diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3546index 5305ec7..6d74045 100644
3547--- a/arch/arm/mach-omap2/omap-smp.c
3548+++ b/arch/arm/mach-omap2/omap-smp.c
3549@@ -19,6 +19,7 @@
3550 #include <linux/device.h>
3551 #include <linux/smp.h>
3552 #include <linux/io.h>
3553+#include <linux/irq.h>
3554 #include <linux/irqchip/arm-gic.h>
3555
3556 #include <asm/smp_scu.h>
3557diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3558index f961c46..4a453dc 100644
3559--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3560+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3561@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3562 return NOTIFY_OK;
3563 }
3564
3565-static struct notifier_block __refdata irq_hotplug_notifier = {
3566+static struct notifier_block irq_hotplug_notifier = {
3567 .notifier_call = irq_cpu_hotplug_notify,
3568 };
3569
3570diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3571index be9541e..821805f 100644
3572--- a/arch/arm/mach-omap2/omap_device.c
3573+++ b/arch/arm/mach-omap2/omap_device.c
3574@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3575 struct platform_device __init *omap_device_build(const char *pdev_name,
3576 int pdev_id,
3577 struct omap_hwmod *oh,
3578- void *pdata, int pdata_len)
3579+ const void *pdata, int pdata_len)
3580 {
3581 struct omap_hwmod *ohs[] = { oh };
3582
3583@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3584 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3585 int pdev_id,
3586 struct omap_hwmod **ohs,
3587- int oh_cnt, void *pdata,
3588+ int oh_cnt, const void *pdata,
3589 int pdata_len)
3590 {
3591 int ret = -ENOMEM;
3592diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3593index 78c02b3..c94109a 100644
3594--- a/arch/arm/mach-omap2/omap_device.h
3595+++ b/arch/arm/mach-omap2/omap_device.h
3596@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3597 /* Core code interface */
3598
3599 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3600- struct omap_hwmod *oh, void *pdata,
3601+ struct omap_hwmod *oh, const void *pdata,
3602 int pdata_len);
3603
3604 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3605 struct omap_hwmod **oh, int oh_cnt,
3606- void *pdata, int pdata_len);
3607+ const void *pdata, int pdata_len);
3608
3609 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3610 struct omap_hwmod **ohs, int oh_cnt);
3611diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3612index 9025fff..3555702 100644
3613--- a/arch/arm/mach-omap2/omap_hwmod.c
3614+++ b/arch/arm/mach-omap2/omap_hwmod.c
3615@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3616 int (*init_clkdm)(struct omap_hwmod *oh);
3617 void (*update_context_lost)(struct omap_hwmod *oh);
3618 int (*get_context_lost)(struct omap_hwmod *oh);
3619-};
3620+} __no_const;
3621
3622 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3623-static struct omap_hwmod_soc_ops soc_ops;
3624+static struct omap_hwmod_soc_ops soc_ops __read_only;
3625
3626 /* omap_hwmod_list contains all registered struct omap_hwmods */
3627 static LIST_HEAD(omap_hwmod_list);
3628diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629index 95fee54..cfa9cf1 100644
3630--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3631+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3632@@ -10,6 +10,7 @@
3633
3634 #include <linux/kernel.h>
3635 #include <linux/init.h>
3636+#include <asm/pgtable.h>
3637
3638 #include "powerdomain.h"
3639
3640@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3641
3642 void __init am43xx_powerdomains_init(void)
3643 {
3644- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3645+ pax_open_kernel();
3646+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3647+ pax_close_kernel();
3648 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3649 pwrdm_register_pwrdms(powerdomains_am43xx);
3650 pwrdm_complete_init();
3651diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3652index ff0a68c..b312aa0 100644
3653--- a/arch/arm/mach-omap2/wd_timer.c
3654+++ b/arch/arm/mach-omap2/wd_timer.c
3655@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3656 struct omap_hwmod *oh;
3657 char *oh_name = "wd_timer2";
3658 char *dev_name = "omap_wdt";
3659- struct omap_wd_timer_platform_data pdata;
3660+ static struct omap_wd_timer_platform_data pdata = {
3661+ .read_reset_sources = prm_read_reset_sources
3662+ };
3663
3664 if (!cpu_class_is_omap2() || of_have_populated_dt())
3665 return 0;
3666@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3667 return -EINVAL;
3668 }
3669
3670- pdata.read_reset_sources = prm_read_reset_sources;
3671-
3672 pdev = omap_device_build(dev_name, id, oh, &pdata,
3673 sizeof(struct omap_wd_timer_platform_data));
3674 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3675diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676index 4f25a7c..a81be85 100644
3677--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3678+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3679@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3680 bool entered_lp2 = false;
3681
3682 if (tegra_pending_sgi())
3683- ACCESS_ONCE(abort_flag) = true;
3684+ ACCESS_ONCE_RW(abort_flag) = true;
3685
3686 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3687
3688diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3689index ab95f53..4b977a7 100644
3690--- a/arch/arm/mach-tegra/irq.c
3691+++ b/arch/arm/mach-tegra/irq.c
3692@@ -20,6 +20,7 @@
3693 #include <linux/cpu_pm.h>
3694 #include <linux/interrupt.h>
3695 #include <linux/io.h>
3696+#include <linux/irq.h>
3697 #include <linux/irqchip/arm-gic.h>
3698 #include <linux/irq.h>
3699 #include <linux/kernel.h>
3700diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3701index 2cb587b..6ddfebf 100644
3702--- a/arch/arm/mach-ux500/pm.c
3703+++ b/arch/arm/mach-ux500/pm.c
3704@@ -10,6 +10,7 @@
3705 */
3706
3707 #include <linux/kernel.h>
3708+#include <linux/irq.h>
3709 #include <linux/irqchip/arm-gic.h>
3710 #include <linux/delay.h>
3711 #include <linux/io.h>
3712diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3713index 2dea8b5..6499da2 100644
3714--- a/arch/arm/mach-ux500/setup.h
3715+++ b/arch/arm/mach-ux500/setup.h
3716@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3717 .type = MT_DEVICE, \
3718 }
3719
3720-#define __MEM_DEV_DESC(x, sz) { \
3721- .virtual = IO_ADDRESS(x), \
3722- .pfn = __phys_to_pfn(x), \
3723- .length = sz, \
3724- .type = MT_MEMORY_RWX, \
3725-}
3726-
3727 extern struct smp_operations ux500_smp_ops;
3728 extern void ux500_cpu_die(unsigned int cpu);
3729
3730diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3731index 52d768f..5f93180 100644
3732--- a/arch/arm/mach-zynq/platsmp.c
3733+++ b/arch/arm/mach-zynq/platsmp.c
3734@@ -24,6 +24,7 @@
3735 #include <linux/io.h>
3736 #include <asm/cacheflush.h>
3737 #include <asm/smp_scu.h>
3738+#include <linux/irq.h>
3739 #include <linux/irqchip/arm-gic.h>
3740 #include "common.h"
3741
3742diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3743index c43c714..4f8f7b9 100644
3744--- a/arch/arm/mm/Kconfig
3745+++ b/arch/arm/mm/Kconfig
3746@@ -446,6 +446,7 @@ config CPU_32v5
3747
3748 config CPU_32v6
3749 bool
3750+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3751 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3752
3753 config CPU_32v6K
3754@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3755
3756 config CPU_USE_DOMAINS
3757 bool
3758+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3759 help
3760 This option enables or disables the use of domain switching
3761 via the set_fs() function.
3762@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3763
3764 config KUSER_HELPERS
3765 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3766- depends on MMU
3767+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3768 default y
3769 help
3770 Warning: disabling this option may break user programs.
3771@@ -812,7 +814,7 @@ config KUSER_HELPERS
3772 See Documentation/arm/kernel_user_helpers.txt for details.
3773
3774 However, the fixed address nature of these helpers can be used
3775- by ROP (return orientated programming) authors when creating
3776+ by ROP (Return Oriented Programming) authors when creating
3777 exploits.
3778
3779 If all of the binaries and libraries which run on your platform
3780diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3781index 2c0c541..4585df9 100644
3782--- a/arch/arm/mm/alignment.c
3783+++ b/arch/arm/mm/alignment.c
3784@@ -216,10 +216,12 @@ union offset_union {
3785 #define __get16_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 8 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 8); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -233,6 +235,7 @@ union offset_union {
3798 #define __get32_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v, a = addr; \
3801+ pax_open_userland(); \
3802 __get8_unaligned_check(ins,v,a,err); \
3803 val = v << ((BE) ? 24 : 0); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805@@ -241,6 +244,7 @@ union offset_union {
3806 val |= v << ((BE) ? 8 : 16); \
3807 __get8_unaligned_check(ins,v,a,err); \
3808 val |= v << ((BE) ? 0 : 24); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -254,6 +258,7 @@ union offset_union {
3814 #define __put16_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_16 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -273,6 +278,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829@@ -286,6 +292,7 @@ union offset_union {
3830 #define __put32_unaligned_check(ins,val,addr) \
3831 do { \
3832 unsigned int err = 0, v = val, a = addr; \
3833+ pax_open_userland(); \
3834 __asm__( FIRST_BYTE_32 \
3835 ARM( "1: "ins" %1, [%2], #1\n" ) \
3836 THUMB( "1: "ins" %1, [%2]\n" ) \
3837@@ -315,6 +322,7 @@ union offset_union {
3838 " .popsection\n" \
3839 : "=r" (err), "=&r" (v), "=&r" (a) \
3840 : "0" (err), "1" (v), "2" (a)); \
3841+ pax_close_userland(); \
3842 if (err) \
3843 goto fault; \
3844 } while (0)
3845diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3846index 5e65ca8..879e7b3 100644
3847--- a/arch/arm/mm/cache-l2x0.c
3848+++ b/arch/arm/mm/cache-l2x0.c
3849@@ -42,7 +42,7 @@ struct l2c_init_data {
3850 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3851 void (*save)(void __iomem *);
3852 struct outer_cache_fns outer_cache;
3853-};
3854+} __do_const;
3855
3856 #define CACHE_LINE_SIZE 32
3857
3858diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3859index 845769e..4278fd7 100644
3860--- a/arch/arm/mm/context.c
3861+++ b/arch/arm/mm/context.c
3862@@ -43,7 +43,7 @@
3863 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3864
3865 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3866-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3867+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3868 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3869
3870 static DEFINE_PER_CPU(atomic64_t, active_asids);
3871@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3872 {
3873 static u32 cur_idx = 1;
3874 u64 asid = atomic64_read(&mm->context.id);
3875- u64 generation = atomic64_read(&asid_generation);
3876+ u64 generation = atomic64_read_unchecked(&asid_generation);
3877
3878 if (asid != 0) {
3879 /*
3880@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3881 */
3882 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3883 if (asid == NUM_USER_ASIDS) {
3884- generation = atomic64_add_return(ASID_FIRST_VERSION,
3885+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3886 &asid_generation);
3887 flush_context(cpu);
3888 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3889@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3890 cpu_set_reserved_ttbr0();
3891
3892 asid = atomic64_read(&mm->context.id);
3893- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3894+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3895 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3896 goto switch_mm_fastpath;
3897
3898 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3899 /* Check that our ASID belongs to the current generation. */
3900 asid = atomic64_read(&mm->context.id);
3901- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3902+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3903 asid = new_context(mm, cpu);
3904 atomic64_set(&mm->context.id, asid);
3905 }
3906diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3907index a982dc3..2d9f5f7 100644
3908--- a/arch/arm/mm/fault.c
3909+++ b/arch/arm/mm/fault.c
3910@@ -25,6 +25,7 @@
3911 #include <asm/system_misc.h>
3912 #include <asm/system_info.h>
3913 #include <asm/tlbflush.h>
3914+#include <asm/sections.h>
3915
3916 #include "fault.h"
3917
3918@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3919 if (fixup_exception(regs))
3920 return;
3921
3922+#ifdef CONFIG_PAX_MEMORY_UDEREF
3923+ if (addr < TASK_SIZE) {
3924+ if (current->signal->curr_ip)
3925+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ else
3928+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3930+ }
3931+#endif
3932+
3933+#ifdef CONFIG_PAX_KERNEXEC
3934+ if ((fsr & FSR_WRITE) &&
3935+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3936+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3937+ {
3938+ if (current->signal->curr_ip)
3939+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ else
3942+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3944+ }
3945+#endif
3946+
3947 /*
3948 * No handler, we'll have to terminate things with extreme prejudice.
3949 */
3950@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3951 }
3952 #endif
3953
3954+#ifdef CONFIG_PAX_PAGEEXEC
3955+ if (fsr & FSR_LNX_PF) {
3956+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3957+ do_group_exit(SIGKILL);
3958+ }
3959+#endif
3960+
3961 tsk->thread.address = addr;
3962 tsk->thread.error_code = fsr;
3963 tsk->thread.trap_no = 14;
3964@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3965 }
3966 #endif /* CONFIG_MMU */
3967
3968+#ifdef CONFIG_PAX_PAGEEXEC
3969+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3970+{
3971+ long i;
3972+
3973+ printk(KERN_ERR "PAX: bytes at PC: ");
3974+ for (i = 0; i < 20; i++) {
3975+ unsigned char c;
3976+ if (get_user(c, (__force unsigned char __user *)pc+i))
3977+ printk(KERN_CONT "?? ");
3978+ else
3979+ printk(KERN_CONT "%02x ", c);
3980+ }
3981+ printk("\n");
3982+
3983+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3984+ for (i = -1; i < 20; i++) {
3985+ unsigned long c;
3986+ if (get_user(c, (__force unsigned long __user *)sp+i))
3987+ printk(KERN_CONT "???????? ");
3988+ else
3989+ printk(KERN_CONT "%08lx ", c);
3990+ }
3991+ printk("\n");
3992+}
3993+#endif
3994+
3995 /*
3996 * First Level Translation Fault Handler
3997 *
3998@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3999 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4000 struct siginfo info;
4001
4002+#ifdef CONFIG_PAX_MEMORY_UDEREF
4003+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4004+ if (current->signal->curr_ip)
4005+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ else
4008+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4009+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4010+ goto die;
4011+ }
4012+#endif
4013+
4014 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4015 return;
4016
4017+die:
4018 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4019 inf->name, fsr, addr);
4020
4021@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4022 ifsr_info[nr].name = name;
4023 }
4024
4025+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4026+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4027+
4028 asmlinkage void __exception
4029 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4030 {
4031 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4032 struct siginfo info;
4033+ unsigned long pc = instruction_pointer(regs);
4034+
4035+ if (user_mode(regs)) {
4036+ unsigned long sigpage = current->mm->context.sigpage;
4037+
4038+ if (sigpage <= pc && pc < sigpage + 7*4) {
4039+ if (pc < sigpage + 3*4)
4040+ sys_sigreturn(regs);
4041+ else
4042+ sys_rt_sigreturn(regs);
4043+ return;
4044+ }
4045+ if (pc == 0xffff0f60UL) {
4046+ /*
4047+ * PaX: __kuser_cmpxchg64 emulation
4048+ */
4049+ // TODO
4050+ //regs->ARM_pc = regs->ARM_lr;
4051+ //return;
4052+ }
4053+ if (pc == 0xffff0fa0UL) {
4054+ /*
4055+ * PaX: __kuser_memory_barrier emulation
4056+ */
4057+ // dmb(); implied by the exception
4058+ regs->ARM_pc = regs->ARM_lr;
4059+ return;
4060+ }
4061+ if (pc == 0xffff0fc0UL) {
4062+ /*
4063+ * PaX: __kuser_cmpxchg emulation
4064+ */
4065+ // TODO
4066+ //long new;
4067+ //int op;
4068+
4069+ //op = FUTEX_OP_SET << 28;
4070+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4071+ //regs->ARM_r0 = old != new;
4072+ //regs->ARM_pc = regs->ARM_lr;
4073+ //return;
4074+ }
4075+ if (pc == 0xffff0fe0UL) {
4076+ /*
4077+ * PaX: __kuser_get_tls emulation
4078+ */
4079+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4080+ regs->ARM_pc = regs->ARM_lr;
4081+ return;
4082+ }
4083+ }
4084+
4085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4086+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4087+ if (current->signal->curr_ip)
4088+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4089+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4090+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4091+ else
4092+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4094+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4095+ goto die;
4096+ }
4097+#endif
4098+
4099+#ifdef CONFIG_PAX_REFCOUNT
4100+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4101+#ifdef CONFIG_THUMB2_KERNEL
4102+ unsigned short bkpt;
4103+
4104+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4105+#else
4106+ unsigned int bkpt;
4107+
4108+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4109+#endif
4110+ current->thread.error_code = ifsr;
4111+ current->thread.trap_no = 0;
4112+ pax_report_refcount_overflow(regs);
4113+ fixup_exception(regs);
4114+ return;
4115+ }
4116+ }
4117+#endif
4118
4119 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4120 return;
4121
4122+die:
4123 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4124 inf->name, ifsr, addr);
4125
4126diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4127index cf08bdf..772656c 100644
4128--- a/arch/arm/mm/fault.h
4129+++ b/arch/arm/mm/fault.h
4130@@ -3,6 +3,7 @@
4131
4132 /*
4133 * Fault status register encodings. We steal bit 31 for our own purposes.
4134+ * Set when the FSR value is from an instruction fault.
4135 */
4136 #define FSR_LNX_PF (1 << 31)
4137 #define FSR_WRITE (1 << 11)
4138@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4139 }
4140 #endif
4141
4142+/* valid for LPAE and !LPAE */
4143+static inline int is_xn_fault(unsigned int fsr)
4144+{
4145+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4146+}
4147+
4148+static inline int is_domain_fault(unsigned int fsr)
4149+{
4150+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4151+}
4152+
4153 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4154 unsigned long search_exception_table(unsigned long addr);
4155
4156diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4157index 2495c8c..415b7fc 100644
4158--- a/arch/arm/mm/init.c
4159+++ b/arch/arm/mm/init.c
4160@@ -758,7 +758,46 @@ void free_tcmmem(void)
4161 {
4162 #ifdef CONFIG_HAVE_TCM
4163 extern char __tcm_start, __tcm_end;
4164+#endif
4165
4166+#ifdef CONFIG_PAX_KERNEXEC
4167+ unsigned long addr;
4168+ pgd_t *pgd;
4169+ pud_t *pud;
4170+ pmd_t *pmd;
4171+ int cpu_arch = cpu_architecture();
4172+ unsigned int cr = get_cr();
4173+
4174+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4175+ /* make pages tables, etc before .text NX */
4176+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4177+ pgd = pgd_offset_k(addr);
4178+ pud = pud_offset(pgd, addr);
4179+ pmd = pmd_offset(pud, addr);
4180+ __section_update(pmd, addr, PMD_SECT_XN);
4181+ }
4182+ /* make init NX */
4183+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4184+ pgd = pgd_offset_k(addr);
4185+ pud = pud_offset(pgd, addr);
4186+ pmd = pmd_offset(pud, addr);
4187+ __section_update(pmd, addr, PMD_SECT_XN);
4188+ }
4189+ /* make kernel code/rodata RX */
4190+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4191+ pgd = pgd_offset_k(addr);
4192+ pud = pud_offset(pgd, addr);
4193+ pmd = pmd_offset(pud, addr);
4194+#ifdef CONFIG_ARM_LPAE
4195+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4196+#else
4197+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4198+#endif
4199+ }
4200+ }
4201+#endif
4202+
4203+#ifdef CONFIG_HAVE_TCM
4204 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4205 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4206 #endif
4207diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4208index d1e5ad7..84dcbf2 100644
4209--- a/arch/arm/mm/ioremap.c
4210+++ b/arch/arm/mm/ioremap.c
4211@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4212 unsigned int mtype;
4213
4214 if (cached)
4215- mtype = MT_MEMORY_RWX;
4216+ mtype = MT_MEMORY_RX;
4217 else
4218- mtype = MT_MEMORY_RWX_NONCACHED;
4219+ mtype = MT_MEMORY_RX_NONCACHED;
4220
4221 return __arm_ioremap_caller(phys_addr, size, mtype,
4222 __builtin_return_address(0));
4223diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4224index 5e85ed3..b10a7ed 100644
4225--- a/arch/arm/mm/mmap.c
4226+++ b/arch/arm/mm/mmap.c
4227@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4228 struct vm_area_struct *vma;
4229 int do_align = 0;
4230 int aliasing = cache_is_vipt_aliasing();
4231+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4232 struct vm_unmapped_area_info info;
4233
4234 /*
4235@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 if (len > TASK_SIZE)
4237 return -ENOMEM;
4238
4239+#ifdef CONFIG_PAX_RANDMMAP
4240+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4241+#endif
4242+
4243 if (addr) {
4244 if (do_align)
4245 addr = COLOUR_ALIGN(addr, pgoff);
4246@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4247 addr = PAGE_ALIGN(addr);
4248
4249 vma = find_vma(mm, addr);
4250- if (TASK_SIZE - len >= addr &&
4251- (!vma || addr + len <= vma->vm_start))
4252+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4253 return addr;
4254 }
4255
4256@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4257 info.high_limit = TASK_SIZE;
4258 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4259 info.align_offset = pgoff << PAGE_SHIFT;
4260+ info.threadstack_offset = offset;
4261 return vm_unmapped_area(&info);
4262 }
4263
4264@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4265 unsigned long addr = addr0;
4266 int do_align = 0;
4267 int aliasing = cache_is_vipt_aliasing();
4268+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4269 struct vm_unmapped_area_info info;
4270
4271 /*
4272@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 return addr;
4274 }
4275
4276+#ifdef CONFIG_PAX_RANDMMAP
4277+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4278+#endif
4279+
4280 /* requesting a specific address */
4281 if (addr) {
4282 if (do_align)
4283@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4284 else
4285 addr = PAGE_ALIGN(addr);
4286 vma = find_vma(mm, addr);
4287- if (TASK_SIZE - len >= addr &&
4288- (!vma || addr + len <= vma->vm_start))
4289+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4290 return addr;
4291 }
4292
4293@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4294 info.high_limit = mm->mmap_base;
4295 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4296 info.align_offset = pgoff << PAGE_SHIFT;
4297+ info.threadstack_offset = offset;
4298 addr = vm_unmapped_area(&info);
4299
4300 /*
4301@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302 {
4303 unsigned long random_factor = 0UL;
4304
4305+#ifdef CONFIG_PAX_RANDMMAP
4306+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4307+#endif
4308+
4309 /* 8 bits of randomness in 20 address space bits */
4310 if ((current->flags & PF_RANDOMIZE) &&
4311 !(current->personality & ADDR_NO_RANDOMIZE))
4312@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4313
4314 if (mmap_is_legacy()) {
4315 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4316+
4317+#ifdef CONFIG_PAX_RANDMMAP
4318+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4319+ mm->mmap_base += mm->delta_mmap;
4320+#endif
4321+
4322 mm->get_unmapped_area = arch_get_unmapped_area;
4323 } else {
4324 mm->mmap_base = mmap_base(random_factor);
4325+
4326+#ifdef CONFIG_PAX_RANDMMAP
4327+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4328+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4329+#endif
4330+
4331 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4332 }
4333 }
4334diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4335index 4e6ef89..21c27f2 100644
4336--- a/arch/arm/mm/mmu.c
4337+++ b/arch/arm/mm/mmu.c
4338@@ -41,6 +41,22 @@
4339 #include "mm.h"
4340 #include "tcm.h"
4341
4342+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4343+void modify_domain(unsigned int dom, unsigned int type)
4344+{
4345+ struct thread_info *thread = current_thread_info();
4346+ unsigned int domain = thread->cpu_domain;
4347+ /*
4348+ * DOMAIN_MANAGER might be defined to some other value,
4349+ * use the arch-defined constant
4350+ */
4351+ domain &= ~domain_val(dom, 3);
4352+ thread->cpu_domain = domain | domain_val(dom, type);
4353+ set_domain(thread->cpu_domain);
4354+}
4355+EXPORT_SYMBOL(modify_domain);
4356+#endif
4357+
4358 /*
4359 * empty_zero_page is a special page that is used for
4360 * zero-initialized data and COW.
4361@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4362 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4363 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4364
4365-static struct mem_type mem_types[] = {
4366+#ifdef CONFIG_PAX_KERNEXEC
4367+#define L_PTE_KERNEXEC L_PTE_RDONLY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4369+#else
4370+#define L_PTE_KERNEXEC L_PTE_DIRTY
4371+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4372+#endif
4373+
4374+static struct mem_type mem_types[] __read_only = {
4375 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4376 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4377 L_PTE_SHARED,
4378@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4379 .prot_sect = PROT_SECT_DEVICE,
4380 .domain = DOMAIN_IO,
4381 },
4382- [MT_UNCACHED] = {
4383+ [MT_UNCACHED_RW] = {
4384 .prot_pte = PROT_PTE_DEVICE,
4385 .prot_l1 = PMD_TYPE_TABLE,
4386 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4387 .domain = DOMAIN_IO,
4388 },
4389- [MT_CACHECLEAN] = {
4390- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4391+ [MT_CACHECLEAN_RO] = {
4392+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4393 .domain = DOMAIN_KERNEL,
4394 },
4395 #ifndef CONFIG_ARM_LPAE
4396- [MT_MINICLEAN] = {
4397- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4398+ [MT_MINICLEAN_RO] = {
4399+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4400 .domain = DOMAIN_KERNEL,
4401 },
4402 #endif
4403@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4404 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4405 L_PTE_RDONLY,
4406 .prot_l1 = PMD_TYPE_TABLE,
4407- .domain = DOMAIN_USER,
4408+ .domain = DOMAIN_VECTORS,
4409 },
4410 [MT_HIGH_VECTORS] = {
4411 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4412 L_PTE_USER | L_PTE_RDONLY,
4413 .prot_l1 = PMD_TYPE_TABLE,
4414- .domain = DOMAIN_USER,
4415+ .domain = DOMAIN_VECTORS,
4416 },
4417- [MT_MEMORY_RWX] = {
4418+ [__MT_MEMORY_RWX] = {
4419 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4420 .prot_l1 = PMD_TYPE_TABLE,
4421 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4422@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4423 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4424 .domain = DOMAIN_KERNEL,
4425 },
4426- [MT_ROM] = {
4427- .prot_sect = PMD_TYPE_SECT,
4428+ [MT_MEMORY_RX] = {
4429+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4430+ .prot_l1 = PMD_TYPE_TABLE,
4431+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4432+ .domain = DOMAIN_KERNEL,
4433+ },
4434+ [MT_ROM_RX] = {
4435+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4436 .domain = DOMAIN_KERNEL,
4437 },
4438- [MT_MEMORY_RWX_NONCACHED] = {
4439+ [MT_MEMORY_RW_NONCACHED] = {
4440 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4441 L_PTE_MT_BUFFERABLE,
4442 .prot_l1 = PMD_TYPE_TABLE,
4443 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4444 .domain = DOMAIN_KERNEL,
4445 },
4446+ [MT_MEMORY_RX_NONCACHED] = {
4447+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4448+ L_PTE_MT_BUFFERABLE,
4449+ .prot_l1 = PMD_TYPE_TABLE,
4450+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4451+ .domain = DOMAIN_KERNEL,
4452+ },
4453 [MT_MEMORY_RW_DTCM] = {
4454 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4455 L_PTE_XN,
4456@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4457 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4458 .domain = DOMAIN_KERNEL,
4459 },
4460- [MT_MEMORY_RWX_ITCM] = {
4461- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4462+ [MT_MEMORY_RX_ITCM] = {
4463+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4464 .prot_l1 = PMD_TYPE_TABLE,
4465+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4466 .domain = DOMAIN_KERNEL,
4467 },
4468 [MT_MEMORY_RW_SO] = {
4469@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4470 * Mark cache clean areas and XIP ROM read only
4471 * from SVC mode and no access from userspace.
4472 */
4473- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+#ifdef CONFIG_PAX_KERNEXEC
4478+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481+#endif
4482+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4484 #endif
4485
4486 /*
4487@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4488 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4490 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4491- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4492- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4493+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4494+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4496 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4497+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4498+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4499 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4501- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4502+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4504+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4506 }
4507 }
4508
4509@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4510 if (cpu_arch >= CPU_ARCH_ARMv6) {
4511 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4512 /* Non-cacheable Normal is XCB = 001 */
4513- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4514+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4515+ PMD_SECT_BUFFERED;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4517 PMD_SECT_BUFFERED;
4518 } else {
4519 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4520- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4521+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4522+ PMD_SECT_TEX(1);
4523+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4524 PMD_SECT_TEX(1);
4525 }
4526 } else {
4527- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4528+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4529+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4530 }
4531
4532 #ifdef CONFIG_ARM_LPAE
4533@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4534 user_pgprot |= PTE_EXT_PXN;
4535 #endif
4536
4537+ user_pgprot |= __supported_pte_mask;
4538+
4539 for (i = 0; i < 16; i++) {
4540 pteval_t v = pgprot_val(protection_map[i]);
4541 protection_map[i] = __pgprot(v | user_pgprot);
4542@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4543
4544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4546- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4547- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4548+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4550 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4551 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4552+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4553+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4554 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4555- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4556- mem_types[MT_ROM].prot_sect |= cp->pmd;
4557+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4558+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4559+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4560
4561 switch (cp->pmd) {
4562 case PMD_SECT_WT:
4563- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4564+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4565 break;
4566 case PMD_SECT_WB:
4567 case PMD_SECT_WBWA:
4568- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4569+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4570 break;
4571 }
4572 pr_info("Memory policy: %sData cache %s\n",
4573@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4574 return;
4575 }
4576
4577- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4578+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4579 md->virtual >= PAGE_OFFSET &&
4580 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4581 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4582@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4583 * called function. This means you can't use any function or debugging
4584 * method which may touch any device, otherwise the kernel _will_ crash.
4585 */
4586+
4587+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4588+
4589 static void __init devicemaps_init(const struct machine_desc *mdesc)
4590 {
4591 struct map_desc map;
4592 unsigned long addr;
4593- void *vectors;
4594
4595- /*
4596- * Allocate the vector page early.
4597- */
4598- vectors = early_alloc(PAGE_SIZE * 2);
4599-
4600- early_trap_init(vectors);
4601+ early_trap_init(&vectors);
4602
4603 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4604 pmd_clear(pmd_off_k(addr));
4605@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4606 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4607 map.virtual = MODULES_VADDR;
4608 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4609- map.type = MT_ROM;
4610+ map.type = MT_ROM_RX;
4611 create_mapping(&map);
4612 #endif
4613
4614@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4615 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4616 map.virtual = FLUSH_BASE;
4617 map.length = SZ_1M;
4618- map.type = MT_CACHECLEAN;
4619+ map.type = MT_CACHECLEAN_RO;
4620 create_mapping(&map);
4621 #endif
4622 #ifdef FLUSH_BASE_MINICACHE
4623 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4624 map.virtual = FLUSH_BASE_MINICACHE;
4625 map.length = SZ_1M;
4626- map.type = MT_MINICLEAN;
4627+ map.type = MT_MINICLEAN_RO;
4628 create_mapping(&map);
4629 #endif
4630
4631@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4632 * location (0xffff0000). If we aren't using high-vectors, also
4633 * create a mapping at the low-vectors virtual address.
4634 */
4635- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4636+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4637 map.virtual = 0xffff0000;
4638 map.length = PAGE_SIZE;
4639 #ifdef CONFIG_KUSER_HELPERS
4640@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4641 static void __init map_lowmem(void)
4642 {
4643 struct memblock_region *reg;
4644+#ifndef CONFIG_PAX_KERNEXEC
4645 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4646 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4647+#endif
4648
4649 /* Map all the lowmem memory banks. */
4650 for_each_memblock(memory, reg) {
4651@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4652 if (start >= end)
4653 break;
4654
4655+#ifdef CONFIG_PAX_KERNEXEC
4656+ map.pfn = __phys_to_pfn(start);
4657+ map.virtual = __phys_to_virt(start);
4658+ map.length = end - start;
4659+
4660+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4661+ struct map_desc kernel;
4662+ struct map_desc initmap;
4663+
4664+ /* when freeing initmem we will make this RW */
4665+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4666+ initmap.virtual = (unsigned long)__init_begin;
4667+ initmap.length = _sdata - __init_begin;
4668+ initmap.type = __MT_MEMORY_RWX;
4669+ create_mapping(&initmap);
4670+
4671+ /* when freeing initmem we will make this RX */
4672+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4673+ kernel.virtual = (unsigned long)_stext;
4674+ kernel.length = __init_begin - _stext;
4675+ kernel.type = __MT_MEMORY_RWX;
4676+ create_mapping(&kernel);
4677+
4678+ if (map.virtual < (unsigned long)_stext) {
4679+ map.length = (unsigned long)_stext - map.virtual;
4680+ map.type = __MT_MEMORY_RWX;
4681+ create_mapping(&map);
4682+ }
4683+
4684+ map.pfn = __phys_to_pfn(__pa(_sdata));
4685+ map.virtual = (unsigned long)_sdata;
4686+ map.length = end - __pa(_sdata);
4687+ }
4688+
4689+ map.type = MT_MEMORY_RW;
4690+ create_mapping(&map);
4691+#else
4692 if (end < kernel_x_start) {
4693 map.pfn = __phys_to_pfn(start);
4694 map.virtual = __phys_to_virt(start);
4695 map.length = end - start;
4696- map.type = MT_MEMORY_RWX;
4697+ map.type = __MT_MEMORY_RWX;
4698
4699 create_mapping(&map);
4700 } else if (start >= kernel_x_end) {
4701@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4702 map.pfn = __phys_to_pfn(kernel_x_start);
4703 map.virtual = __phys_to_virt(kernel_x_start);
4704 map.length = kernel_x_end - kernel_x_start;
4705- map.type = MT_MEMORY_RWX;
4706+ map.type = __MT_MEMORY_RWX;
4707
4708 create_mapping(&map);
4709
4710@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4711 create_mapping(&map);
4712 }
4713 }
4714+#endif
4715 }
4716 }
4717
4718diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4719index e1268f9..a9755a7 100644
4720--- a/arch/arm/net/bpf_jit_32.c
4721+++ b/arch/arm/net/bpf_jit_32.c
4722@@ -20,6 +20,7 @@
4723 #include <asm/cacheflush.h>
4724 #include <asm/hwcap.h>
4725 #include <asm/opcodes.h>
4726+#include <asm/pgtable.h>
4727
4728 #include "bpf_jit_32.h"
4729
4730@@ -71,7 +72,11 @@ struct jit_ctx {
4731 #endif
4732 };
4733
4734+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4735+int bpf_jit_enable __read_only;
4736+#else
4737 int bpf_jit_enable __read_mostly;
4738+#endif
4739
4740 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4741 {
4742@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4743 {
4744 u32 *ptr;
4745 /* We are guaranteed to have aligned memory. */
4746+ pax_open_kernel();
4747 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4748 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4749+ pax_close_kernel();
4750 }
4751
4752 static void build_prologue(struct jit_ctx *ctx)
4753diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4754index 5b217f4..c23f40e 100644
4755--- a/arch/arm/plat-iop/setup.c
4756+++ b/arch/arm/plat-iop/setup.c
4757@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4758 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4759 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4760 .length = IOP3XX_PERIPHERAL_SIZE,
4761- .type = MT_UNCACHED,
4762+ .type = MT_UNCACHED_RW,
4763 },
4764 };
4765
4766diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4767index a5bc92d..0bb4730 100644
4768--- a/arch/arm/plat-omap/sram.c
4769+++ b/arch/arm/plat-omap/sram.c
4770@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4771 * Looks like we need to preserve some bootloader code at the
4772 * beginning of SRAM for jumping to flash for reboot to work...
4773 */
4774+ pax_open_kernel();
4775 memset_io(omap_sram_base + omap_sram_skip, 0,
4776 omap_sram_size - omap_sram_skip);
4777+ pax_close_kernel();
4778 }
4779diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780index ce6d763..cfea917 100644
4781--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4782+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4783@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4784 int (*started)(unsigned ch);
4785 int (*flush)(unsigned ch);
4786 int (*stop)(unsigned ch);
4787-};
4788+} __no_const;
4789
4790 extern void *samsung_dmadev_get_ops(void);
4791 extern void *s3c_dma_get_ops(void);
4792diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4793index 7047051..44e8675 100644
4794--- a/arch/arm64/include/asm/atomic.h
4795+++ b/arch/arm64/include/asm/atomic.h
4796@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4797 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4798 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4799
4800+#define atomic64_read_unchecked(v) atomic64_read(v)
4801+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4802+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4803+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4804+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4805+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4806+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4807+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4808+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4809+
4810 #endif
4811 #endif
4812diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4813index a5abb00..9cbca9a 100644
4814--- a/arch/arm64/include/asm/barrier.h
4815+++ b/arch/arm64/include/asm/barrier.h
4816@@ -44,7 +44,7 @@
4817 do { \
4818 compiletime_assert_atomic_type(*p); \
4819 barrier(); \
4820- ACCESS_ONCE(*p) = (v); \
4821+ ACCESS_ONCE_RW(*p) = (v); \
4822 } while (0)
4823
4824 #define smp_load_acquire(p) \
4825diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4826index 4fde8c1..441f84f 100644
4827--- a/arch/arm64/include/asm/percpu.h
4828+++ b/arch/arm64/include/asm/percpu.h
4829@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4830 {
4831 switch (size) {
4832 case 1:
4833- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4834+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4835 break;
4836 case 2:
4837- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4838+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4839 break;
4840 case 4:
4841- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4842+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4843 break;
4844 case 8:
4845- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4846+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4847 break;
4848 default:
4849 BUILD_BUG();
4850diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4851index e20df38..027ede3 100644
4852--- a/arch/arm64/include/asm/pgalloc.h
4853+++ b/arch/arm64/include/asm/pgalloc.h
4854@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4855 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4856 }
4857
4858+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4859+{
4860+ pud_populate(mm, pud, pmd);
4861+}
4862+
4863 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4864
4865 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4866diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4867index 3bf8f4e..5dd5491 100644
4868--- a/arch/arm64/include/asm/uaccess.h
4869+++ b/arch/arm64/include/asm/uaccess.h
4870@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4871 flag; \
4872 })
4873
4874+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4875 #define access_ok(type, addr, size) __range_ok(addr, size)
4876 #define user_addr_max get_fs
4877
4878diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4879index df34a70..5727a75 100644
4880--- a/arch/arm64/mm/dma-mapping.c
4881+++ b/arch/arm64/mm/dma-mapping.c
4882@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4883 phys_to_page(paddr),
4884 size >> PAGE_SHIFT);
4885 if (!freed)
4886- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4887+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4888 }
4889
4890 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
4891diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4892index c3a58a1..78fbf54 100644
4893--- a/arch/avr32/include/asm/cache.h
4894+++ b/arch/avr32/include/asm/cache.h
4895@@ -1,8 +1,10 @@
4896 #ifndef __ASM_AVR32_CACHE_H
4897 #define __ASM_AVR32_CACHE_H
4898
4899+#include <linux/const.h>
4900+
4901 #define L1_CACHE_SHIFT 5
4902-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4903+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4904
4905 /*
4906 * Memory returned by kmalloc() may be used for DMA, so we must make
4907diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4908index d232888..87c8df1 100644
4909--- a/arch/avr32/include/asm/elf.h
4910+++ b/arch/avr32/include/asm/elf.h
4911@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4912 the loader. We need to make sure that it is out of the way of the program
4913 that it will "exec", and that there is sufficient room for the brk. */
4914
4915-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4916+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4917
4918+#ifdef CONFIG_PAX_ASLR
4919+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4920+
4921+#define PAX_DELTA_MMAP_LEN 15
4922+#define PAX_DELTA_STACK_LEN 15
4923+#endif
4924
4925 /* This yields a mask that user programs can use to figure out what
4926 instruction set this CPU supports. This could be done in user space,
4927diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4928index 479330b..53717a8 100644
4929--- a/arch/avr32/include/asm/kmap_types.h
4930+++ b/arch/avr32/include/asm/kmap_types.h
4931@@ -2,9 +2,9 @@
4932 #define __ASM_AVR32_KMAP_TYPES_H
4933
4934 #ifdef CONFIG_DEBUG_HIGHMEM
4935-# define KM_TYPE_NR 29
4936+# define KM_TYPE_NR 30
4937 #else
4938-# define KM_TYPE_NR 14
4939+# define KM_TYPE_NR 15
4940 #endif
4941
4942 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4943diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4944index d223a8b..69c5210 100644
4945--- a/arch/avr32/mm/fault.c
4946+++ b/arch/avr32/mm/fault.c
4947@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4948
4949 int exception_trace = 1;
4950
4951+#ifdef CONFIG_PAX_PAGEEXEC
4952+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4953+{
4954+ unsigned long i;
4955+
4956+ printk(KERN_ERR "PAX: bytes at PC: ");
4957+ for (i = 0; i < 20; i++) {
4958+ unsigned char c;
4959+ if (get_user(c, (unsigned char *)pc+i))
4960+ printk(KERN_CONT "???????? ");
4961+ else
4962+ printk(KERN_CONT "%02x ", c);
4963+ }
4964+ printk("\n");
4965+}
4966+#endif
4967+
4968 /*
4969 * This routine handles page faults. It determines the address and the
4970 * problem, and then passes it off to one of the appropriate routines.
4971@@ -178,6 +195,16 @@ bad_area:
4972 up_read(&mm->mmap_sem);
4973
4974 if (user_mode(regs)) {
4975+
4976+#ifdef CONFIG_PAX_PAGEEXEC
4977+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4978+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4979+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4980+ do_group_exit(SIGKILL);
4981+ }
4982+ }
4983+#endif
4984+
4985 if (exception_trace && printk_ratelimit())
4986 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4987 "sp %08lx ecr %lu\n",
4988diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4989index 568885a..f8008df 100644
4990--- a/arch/blackfin/include/asm/cache.h
4991+++ b/arch/blackfin/include/asm/cache.h
4992@@ -7,6 +7,7 @@
4993 #ifndef __ARCH_BLACKFIN_CACHE_H
4994 #define __ARCH_BLACKFIN_CACHE_H
4995
4996+#include <linux/const.h>
4997 #include <linux/linkage.h> /* for asmlinkage */
4998
4999 /*
5000@@ -14,7 +15,7 @@
5001 * Blackfin loads 32 bytes for cache
5002 */
5003 #define L1_CACHE_SHIFT 5
5004-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5005+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5006 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5007
5008 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5009diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5010index aea2718..3639a60 100644
5011--- a/arch/cris/include/arch-v10/arch/cache.h
5012+++ b/arch/cris/include/arch-v10/arch/cache.h
5013@@ -1,8 +1,9 @@
5014 #ifndef _ASM_ARCH_CACHE_H
5015 #define _ASM_ARCH_CACHE_H
5016
5017+#include <linux/const.h>
5018 /* Etrax 100LX have 32-byte cache-lines. */
5019-#define L1_CACHE_BYTES 32
5020 #define L1_CACHE_SHIFT 5
5021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5022
5023 #endif /* _ASM_ARCH_CACHE_H */
5024diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5025index 7caf25d..ee65ac5 100644
5026--- a/arch/cris/include/arch-v32/arch/cache.h
5027+++ b/arch/cris/include/arch-v32/arch/cache.h
5028@@ -1,11 +1,12 @@
5029 #ifndef _ASM_CRIS_ARCH_CACHE_H
5030 #define _ASM_CRIS_ARCH_CACHE_H
5031
5032+#include <linux/const.h>
5033 #include <arch/hwregs/dma.h>
5034
5035 /* A cache-line is 32 bytes. */
5036-#define L1_CACHE_BYTES 32
5037 #define L1_CACHE_SHIFT 5
5038+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5039
5040 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5041
5042diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5043index 102190a..5334cea 100644
5044--- a/arch/frv/include/asm/atomic.h
5045+++ b/arch/frv/include/asm/atomic.h
5046@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5047 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5048 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5049
5050+#define atomic64_read_unchecked(v) atomic64_read(v)
5051+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5052+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5053+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5054+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5055+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5056+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5057+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5058+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5059+
5060 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5061 {
5062 int c, old;
5063diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5064index 2797163..c2a401df9 100644
5065--- a/arch/frv/include/asm/cache.h
5066+++ b/arch/frv/include/asm/cache.h
5067@@ -12,10 +12,11 @@
5068 #ifndef __ASM_CACHE_H
5069 #define __ASM_CACHE_H
5070
5071+#include <linux/const.h>
5072
5073 /* bytes per L1 cache line */
5074 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5075-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5076+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5077
5078 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5079 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5080diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5081index 43901f2..0d8b865 100644
5082--- a/arch/frv/include/asm/kmap_types.h
5083+++ b/arch/frv/include/asm/kmap_types.h
5084@@ -2,6 +2,6 @@
5085 #ifndef _ASM_KMAP_TYPES_H
5086 #define _ASM_KMAP_TYPES_H
5087
5088-#define KM_TYPE_NR 17
5089+#define KM_TYPE_NR 18
5090
5091 #endif
5092diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5093index 836f147..4cf23f5 100644
5094--- a/arch/frv/mm/elf-fdpic.c
5095+++ b/arch/frv/mm/elf-fdpic.c
5096@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5097 {
5098 struct vm_area_struct *vma;
5099 struct vm_unmapped_area_info info;
5100+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5101
5102 if (len > TASK_SIZE)
5103 return -ENOMEM;
5104@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5105 if (addr) {
5106 addr = PAGE_ALIGN(addr);
5107 vma = find_vma(current->mm, addr);
5108- if (TASK_SIZE - len >= addr &&
5109- (!vma || addr + len <= vma->vm_start))
5110+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5111 goto success;
5112 }
5113
5114@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5115 info.high_limit = (current->mm->start_stack - 0x00200000);
5116 info.align_mask = 0;
5117 info.align_offset = 0;
5118+ info.threadstack_offset = offset;
5119 addr = vm_unmapped_area(&info);
5120 if (!(addr & ~PAGE_MASK))
5121 goto success;
5122diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5123index 69952c1..4fa2908 100644
5124--- a/arch/hexagon/include/asm/cache.h
5125+++ b/arch/hexagon/include/asm/cache.h
5126@@ -21,9 +21,11 @@
5127 #ifndef __ASM_CACHE_H
5128 #define __ASM_CACHE_H
5129
5130+#include <linux/const.h>
5131+
5132 /* Bytes per L1 cache line */
5133-#define L1_CACHE_SHIFT (5)
5134-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5135+#define L1_CACHE_SHIFT 5
5136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5137
5138 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5139
5140diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5141index 074e52b..76afdac 100644
5142--- a/arch/ia64/Kconfig
5143+++ b/arch/ia64/Kconfig
5144@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5145 config KEXEC
5146 bool "kexec system call"
5147 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5148+ depends on !GRKERNSEC_KMEM
5149 help
5150 kexec is a system call that implements the ability to shutdown your
5151 current kernel, and to start another kernel. It is like a reboot
5152diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5153index 970d0bd..e750b9b 100644
5154--- a/arch/ia64/Makefile
5155+++ b/arch/ia64/Makefile
5156@@ -98,5 +98,6 @@ endef
5157 archprepare: make_nr_irqs_h FORCE
5158 PHONY += make_nr_irqs_h FORCE
5159
5160+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5161 make_nr_irqs_h: FORCE
5162 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5163diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5164index 0bf0350..2ad1957 100644
5165--- a/arch/ia64/include/asm/atomic.h
5166+++ b/arch/ia64/include/asm/atomic.h
5167@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5168 #define atomic64_inc(v) atomic64_add(1, (v))
5169 #define atomic64_dec(v) atomic64_sub(1, (v))
5170
5171+#define atomic64_read_unchecked(v) atomic64_read(v)
5172+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5173+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5174+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5175+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5176+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5177+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5178+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5179+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5180+
5181 #endif /* _ASM_IA64_ATOMIC_H */
5182diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5183index f6769eb..1cdb590 100644
5184--- a/arch/ia64/include/asm/barrier.h
5185+++ b/arch/ia64/include/asm/barrier.h
5186@@ -66,7 +66,7 @@
5187 do { \
5188 compiletime_assert_atomic_type(*p); \
5189 barrier(); \
5190- ACCESS_ONCE(*p) = (v); \
5191+ ACCESS_ONCE_RW(*p) = (v); \
5192 } while (0)
5193
5194 #define smp_load_acquire(p) \
5195diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5196index 988254a..e1ee885 100644
5197--- a/arch/ia64/include/asm/cache.h
5198+++ b/arch/ia64/include/asm/cache.h
5199@@ -1,6 +1,7 @@
5200 #ifndef _ASM_IA64_CACHE_H
5201 #define _ASM_IA64_CACHE_H
5202
5203+#include <linux/const.h>
5204
5205 /*
5206 * Copyright (C) 1998-2000 Hewlett-Packard Co
5207@@ -9,7 +10,7 @@
5208
5209 /* Bytes per L1 (data) cache line. */
5210 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5211-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5213
5214 #ifdef CONFIG_SMP
5215 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5216diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5217index 5a83c5c..4d7f553 100644
5218--- a/arch/ia64/include/asm/elf.h
5219+++ b/arch/ia64/include/asm/elf.h
5220@@ -42,6 +42,13 @@
5221 */
5222 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5223
5224+#ifdef CONFIG_PAX_ASLR
5225+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5226+
5227+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5228+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5229+#endif
5230+
5231 #define PT_IA_64_UNWIND 0x70000001
5232
5233 /* IA-64 relocations: */
5234diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5235index 5767cdf..7462574 100644
5236--- a/arch/ia64/include/asm/pgalloc.h
5237+++ b/arch/ia64/include/asm/pgalloc.h
5238@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5239 pgd_val(*pgd_entry) = __pa(pud);
5240 }
5241
5242+static inline void
5243+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5244+{
5245+ pgd_populate(mm, pgd_entry, pud);
5246+}
5247+
5248 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5249 {
5250 return quicklist_alloc(0, GFP_KERNEL, NULL);
5251@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5252 pud_val(*pud_entry) = __pa(pmd);
5253 }
5254
5255+static inline void
5256+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5257+{
5258+ pud_populate(mm, pud_entry, pmd);
5259+}
5260+
5261 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5262 {
5263 return quicklist_alloc(0, GFP_KERNEL, NULL);
5264diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5265index 7935115..c0eca6a 100644
5266--- a/arch/ia64/include/asm/pgtable.h
5267+++ b/arch/ia64/include/asm/pgtable.h
5268@@ -12,7 +12,7 @@
5269 * David Mosberger-Tang <davidm@hpl.hp.com>
5270 */
5271
5272-
5273+#include <linux/const.h>
5274 #include <asm/mman.h>
5275 #include <asm/page.h>
5276 #include <asm/processor.h>
5277@@ -142,6 +142,17 @@
5278 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5279 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5280 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5281+
5282+#ifdef CONFIG_PAX_PAGEEXEC
5283+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5284+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5285+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5286+#else
5287+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5288+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5289+# define PAGE_COPY_NOEXEC PAGE_COPY
5290+#endif
5291+
5292 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5293 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5294 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5295diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5296index 45698cd..e8e2dbc 100644
5297--- a/arch/ia64/include/asm/spinlock.h
5298+++ b/arch/ia64/include/asm/spinlock.h
5299@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5300 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5301
5302 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5303- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5304+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5305 }
5306
5307 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5308diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5309index 103bedc..0210597 100644
5310--- a/arch/ia64/include/asm/uaccess.h
5311+++ b/arch/ia64/include/asm/uaccess.h
5312@@ -70,6 +70,7 @@
5313 && ((segment).seg == KERNEL_DS.seg \
5314 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5315 })
5316+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5317 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5318
5319 /*
5320@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5321 static inline unsigned long
5322 __copy_to_user (void __user *to, const void *from, unsigned long count)
5323 {
5324+ if (count > INT_MAX)
5325+ return count;
5326+
5327+ if (!__builtin_constant_p(count))
5328+ check_object_size(from, count, true);
5329+
5330 return __copy_user(to, (__force void __user *) from, count);
5331 }
5332
5333 static inline unsigned long
5334 __copy_from_user (void *to, const void __user *from, unsigned long count)
5335 {
5336+ if (count > INT_MAX)
5337+ return count;
5338+
5339+ if (!__builtin_constant_p(count))
5340+ check_object_size(to, count, false);
5341+
5342 return __copy_user((__force void __user *) to, from, count);
5343 }
5344
5345@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5346 ({ \
5347 void __user *__cu_to = (to); \
5348 const void *__cu_from = (from); \
5349- long __cu_len = (n); \
5350+ unsigned long __cu_len = (n); \
5351 \
5352- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5353+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5354+ if (!__builtin_constant_p(n)) \
5355+ check_object_size(__cu_from, __cu_len, true); \
5356 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5357+ } \
5358 __cu_len; \
5359 })
5360
5361@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5362 ({ \
5363 void *__cu_to = (to); \
5364 const void __user *__cu_from = (from); \
5365- long __cu_len = (n); \
5366+ unsigned long __cu_len = (n); \
5367 \
5368 __chk_user_ptr(__cu_from); \
5369- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5370+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5371+ if (!__builtin_constant_p(n)) \
5372+ check_object_size(__cu_to, __cu_len, false); \
5373 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5374+ } \
5375 __cu_len; \
5376 })
5377
5378diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5379index 29754aa..06d2838 100644
5380--- a/arch/ia64/kernel/module.c
5381+++ b/arch/ia64/kernel/module.c
5382@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5383 }
5384
5385 static inline int
5386+in_init_rx (const struct module *mod, uint64_t addr)
5387+{
5388+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5389+}
5390+
5391+static inline int
5392+in_init_rw (const struct module *mod, uint64_t addr)
5393+{
5394+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5395+}
5396+
5397+static inline int
5398 in_init (const struct module *mod, uint64_t addr)
5399 {
5400- return addr - (uint64_t) mod->module_init < mod->init_size;
5401+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5402+}
5403+
5404+static inline int
5405+in_core_rx (const struct module *mod, uint64_t addr)
5406+{
5407+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5408+}
5409+
5410+static inline int
5411+in_core_rw (const struct module *mod, uint64_t addr)
5412+{
5413+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5414 }
5415
5416 static inline int
5417 in_core (const struct module *mod, uint64_t addr)
5418 {
5419- return addr - (uint64_t) mod->module_core < mod->core_size;
5420+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5421 }
5422
5423 static inline int
5424@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5425 break;
5426
5427 case RV_BDREL:
5428- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5429+ if (in_init_rx(mod, val))
5430+ val -= (uint64_t) mod->module_init_rx;
5431+ else if (in_init_rw(mod, val))
5432+ val -= (uint64_t) mod->module_init_rw;
5433+ else if (in_core_rx(mod, val))
5434+ val -= (uint64_t) mod->module_core_rx;
5435+ else if (in_core_rw(mod, val))
5436+ val -= (uint64_t) mod->module_core_rw;
5437 break;
5438
5439 case RV_LTV:
5440@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5441 * addresses have been selected...
5442 */
5443 uint64_t gp;
5444- if (mod->core_size > MAX_LTOFF)
5445+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5446 /*
5447 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5448 * at the end of the module.
5449 */
5450- gp = mod->core_size - MAX_LTOFF / 2;
5451+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5452 else
5453- gp = mod->core_size / 2;
5454- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5455+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5456+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5457 mod->arch.gp = gp;
5458 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5459 }
5460diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5461index c39c3cd..3c77738 100644
5462--- a/arch/ia64/kernel/palinfo.c
5463+++ b/arch/ia64/kernel/palinfo.c
5464@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5465 return NOTIFY_OK;
5466 }
5467
5468-static struct notifier_block __refdata palinfo_cpu_notifier =
5469+static struct notifier_block palinfo_cpu_notifier =
5470 {
5471 .notifier_call = palinfo_cpu_callback,
5472 .priority = 0,
5473diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5474index 41e33f8..65180b2a 100644
5475--- a/arch/ia64/kernel/sys_ia64.c
5476+++ b/arch/ia64/kernel/sys_ia64.c
5477@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5478 unsigned long align_mask = 0;
5479 struct mm_struct *mm = current->mm;
5480 struct vm_unmapped_area_info info;
5481+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5482
5483 if (len > RGN_MAP_LIMIT)
5484 return -ENOMEM;
5485@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5486 if (REGION_NUMBER(addr) == RGN_HPAGE)
5487 addr = 0;
5488 #endif
5489+
5490+#ifdef CONFIG_PAX_RANDMMAP
5491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5492+ addr = mm->free_area_cache;
5493+ else
5494+#endif
5495+
5496 if (!addr)
5497 addr = TASK_UNMAPPED_BASE;
5498
5499@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5500 info.high_limit = TASK_SIZE;
5501 info.align_mask = align_mask;
5502 info.align_offset = 0;
5503+ info.threadstack_offset = offset;
5504 return vm_unmapped_area(&info);
5505 }
5506
5507diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5508index 84f8a52..7c76178 100644
5509--- a/arch/ia64/kernel/vmlinux.lds.S
5510+++ b/arch/ia64/kernel/vmlinux.lds.S
5511@@ -192,7 +192,7 @@ SECTIONS {
5512 /* Per-cpu data: */
5513 . = ALIGN(PERCPU_PAGE_SIZE);
5514 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5515- __phys_per_cpu_start = __per_cpu_load;
5516+ __phys_per_cpu_start = per_cpu_load;
5517 /*
5518 * ensure percpu data fits
5519 * into percpu page size
5520diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5521index ba5ba7a..36e9d3a 100644
5522--- a/arch/ia64/mm/fault.c
5523+++ b/arch/ia64/mm/fault.c
5524@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5525 return pte_present(pte);
5526 }
5527
5528+#ifdef CONFIG_PAX_PAGEEXEC
5529+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5530+{
5531+ unsigned long i;
5532+
5533+ printk(KERN_ERR "PAX: bytes at PC: ");
5534+ for (i = 0; i < 8; i++) {
5535+ unsigned int c;
5536+ if (get_user(c, (unsigned int *)pc+i))
5537+ printk(KERN_CONT "???????? ");
5538+ else
5539+ printk(KERN_CONT "%08x ", c);
5540+ }
5541+ printk("\n");
5542+}
5543+#endif
5544+
5545 # define VM_READ_BIT 0
5546 # define VM_WRITE_BIT 1
5547 # define VM_EXEC_BIT 2
5548@@ -151,8 +168,21 @@ retry:
5549 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5550 goto bad_area;
5551
5552- if ((vma->vm_flags & mask) != mask)
5553+ if ((vma->vm_flags & mask) != mask) {
5554+
5555+#ifdef CONFIG_PAX_PAGEEXEC
5556+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5557+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5558+ goto bad_area;
5559+
5560+ up_read(&mm->mmap_sem);
5561+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5562+ do_group_exit(SIGKILL);
5563+ }
5564+#endif
5565+
5566 goto bad_area;
5567+ }
5568
5569 /*
5570 * If for any reason at all we couldn't handle the fault, make
5571diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5572index 76069c1..c2aa816 100644
5573--- a/arch/ia64/mm/hugetlbpage.c
5574+++ b/arch/ia64/mm/hugetlbpage.c
5575@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5576 unsigned long pgoff, unsigned long flags)
5577 {
5578 struct vm_unmapped_area_info info;
5579+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5580
5581 if (len > RGN_MAP_LIMIT)
5582 return -ENOMEM;
5583@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5584 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5585 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5586 info.align_offset = 0;
5587+ info.threadstack_offset = offset;
5588 return vm_unmapped_area(&info);
5589 }
5590
5591diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5592index 6b33457..88b5124 100644
5593--- a/arch/ia64/mm/init.c
5594+++ b/arch/ia64/mm/init.c
5595@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5596 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5597 vma->vm_end = vma->vm_start + PAGE_SIZE;
5598 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5599+
5600+#ifdef CONFIG_PAX_PAGEEXEC
5601+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5602+ vma->vm_flags &= ~VM_EXEC;
5603+
5604+#ifdef CONFIG_PAX_MPROTECT
5605+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5606+ vma->vm_flags &= ~VM_MAYEXEC;
5607+#endif
5608+
5609+ }
5610+#endif
5611+
5612 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5613 down_write(&current->mm->mmap_sem);
5614 if (insert_vm_struct(current->mm, vma)) {
5615@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5616 gate_vma.vm_start = FIXADDR_USER_START;
5617 gate_vma.vm_end = FIXADDR_USER_END;
5618 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5619- gate_vma.vm_page_prot = __P101;
5620+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5621
5622 return 0;
5623 }
5624diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5625index 40b3ee98..8c2c112 100644
5626--- a/arch/m32r/include/asm/cache.h
5627+++ b/arch/m32r/include/asm/cache.h
5628@@ -1,8 +1,10 @@
5629 #ifndef _ASM_M32R_CACHE_H
5630 #define _ASM_M32R_CACHE_H
5631
5632+#include <linux/const.h>
5633+
5634 /* L1 cache line size */
5635 #define L1_CACHE_SHIFT 4
5636-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5637+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5638
5639 #endif /* _ASM_M32R_CACHE_H */
5640diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5641index 82abd15..d95ae5d 100644
5642--- a/arch/m32r/lib/usercopy.c
5643+++ b/arch/m32r/lib/usercopy.c
5644@@ -14,6 +14,9 @@
5645 unsigned long
5646 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5647 {
5648+ if ((long)n < 0)
5649+ return n;
5650+
5651 prefetch(from);
5652 if (access_ok(VERIFY_WRITE, to, n))
5653 __copy_user(to,from,n);
5654@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5655 unsigned long
5656 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5657 {
5658+ if ((long)n < 0)
5659+ return n;
5660+
5661 prefetchw(to);
5662 if (access_ok(VERIFY_READ, from, n))
5663 __copy_user_zeroing(to,from,n);
5664diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5665index 0395c51..5f26031 100644
5666--- a/arch/m68k/include/asm/cache.h
5667+++ b/arch/m68k/include/asm/cache.h
5668@@ -4,9 +4,11 @@
5669 #ifndef __ARCH_M68K_CACHE_H
5670 #define __ARCH_M68K_CACHE_H
5671
5672+#include <linux/const.h>
5673+
5674 /* bytes per L1 cache line */
5675 #define L1_CACHE_SHIFT 4
5676-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5677+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5678
5679 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5680
5681diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5682index d703d8e..a8e2d70 100644
5683--- a/arch/metag/include/asm/barrier.h
5684+++ b/arch/metag/include/asm/barrier.h
5685@@ -90,7 +90,7 @@ static inline void fence(void)
5686 do { \
5687 compiletime_assert_atomic_type(*p); \
5688 smp_mb(); \
5689- ACCESS_ONCE(*p) = (v); \
5690+ ACCESS_ONCE_RW(*p) = (v); \
5691 } while (0)
5692
5693 #define smp_load_acquire(p) \
5694diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5695index 3c32075..ae0ae75 100644
5696--- a/arch/metag/mm/hugetlbpage.c
5697+++ b/arch/metag/mm/hugetlbpage.c
5698@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5699 info.high_limit = TASK_SIZE;
5700 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5701 info.align_offset = 0;
5702+ info.threadstack_offset = 0;
5703 return vm_unmapped_area(&info);
5704 }
5705
5706diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5707index 4efe96a..60e8699 100644
5708--- a/arch/microblaze/include/asm/cache.h
5709+++ b/arch/microblaze/include/asm/cache.h
5710@@ -13,11 +13,12 @@
5711 #ifndef _ASM_MICROBLAZE_CACHE_H
5712 #define _ASM_MICROBLAZE_CACHE_H
5713
5714+#include <linux/const.h>
5715 #include <asm/registers.h>
5716
5717 #define L1_CACHE_SHIFT 5
5718 /* word-granular cache in microblaze */
5719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5721
5722 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5723
5724diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5725index 843713c..b6a87b9 100644
5726--- a/arch/mips/Kconfig
5727+++ b/arch/mips/Kconfig
5728@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5729
5730 config KEXEC
5731 bool "Kexec system call"
5732+ depends on !GRKERNSEC_KMEM
5733 help
5734 kexec is a system call that implements the ability to shutdown your
5735 current kernel, and to start another kernel. It is like a reboot
5736diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5737index 3778655..1dff0a9 100644
5738--- a/arch/mips/cavium-octeon/dma-octeon.c
5739+++ b/arch/mips/cavium-octeon/dma-octeon.c
5740@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5741 if (dma_release_from_coherent(dev, order, vaddr))
5742 return;
5743
5744- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5745+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5746 }
5747
5748 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5749diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5750index 857da84..0fee5e2 100644
5751--- a/arch/mips/include/asm/atomic.h
5752+++ b/arch/mips/include/asm/atomic.h
5753@@ -22,15 +22,39 @@
5754 #include <asm/cmpxchg.h>
5755 #include <asm/war.h>
5756
5757+#ifdef CONFIG_GENERIC_ATOMIC64
5758+#include <asm-generic/atomic64.h>
5759+#endif
5760+
5761 #define ATOMIC_INIT(i) { (i) }
5762
5763+#ifdef CONFIG_64BIT
5764+#define _ASM_EXTABLE(from, to) \
5765+" .section __ex_table,\"a\"\n" \
5766+" .dword " #from ", " #to"\n" \
5767+" .previous\n"
5768+#else
5769+#define _ASM_EXTABLE(from, to) \
5770+" .section __ex_table,\"a\"\n" \
5771+" .word " #from ", " #to"\n" \
5772+" .previous\n"
5773+#endif
5774+
5775 /*
5776 * atomic_read - read atomic variable
5777 * @v: pointer of type atomic_t
5778 *
5779 * Atomically reads the value of @v.
5780 */
5781-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5782+static inline int atomic_read(const atomic_t *v)
5783+{
5784+ return ACCESS_ONCE(v->counter);
5785+}
5786+
5787+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5788+{
5789+ return ACCESS_ONCE(v->counter);
5790+}
5791
5792 /*
5793 * atomic_set - set atomic variable
5794@@ -39,47 +63,77 @@
5795 *
5796 * Atomically sets the value of @v to @i.
5797 */
5798-#define atomic_set(v, i) ((v)->counter = (i))
5799+static inline void atomic_set(atomic_t *v, int i)
5800+{
5801+ v->counter = i;
5802+}
5803
5804-#define ATOMIC_OP(op, c_op, asm_op) \
5805-static __inline__ void atomic_##op(int i, atomic_t * v) \
5806+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5807+{
5808+ v->counter = i;
5809+}
5810+
5811+#ifdef CONFIG_PAX_REFCOUNT
5812+#define __OVERFLOW_POST \
5813+ " b 4f \n" \
5814+ " .set noreorder \n" \
5815+ "3: b 5f \n" \
5816+ " move %0, %1 \n" \
5817+ " .set reorder \n"
5818+#define __OVERFLOW_EXTABLE \
5819+ "3:\n" \
5820+ _ASM_EXTABLE(2b, 3b)
5821+#else
5822+#define __OVERFLOW_POST
5823+#define __OVERFLOW_EXTABLE
5824+#endif
5825+
5826+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5827+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5828 { \
5829 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5830 int temp; \
5831 \
5832 __asm__ __volatile__( \
5833- " .set arch=r4000 \n" \
5834- "1: ll %0, %1 # atomic_" #op " \n" \
5835- " " #asm_op " %0, %2 \n" \
5836+ " .set mips3 \n" \
5837+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5838+ "2: " #asm_op " %0, %2 \n" \
5839 " sc %0, %1 \n" \
5840 " beqzl %0, 1b \n" \
5841+ extable \
5842 " .set mips0 \n" \
5843 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5844 : "Ir" (i)); \
5845 } else if (kernel_uses_llsc) { \
5846 int temp; \
5847 \
5848- do { \
5849- __asm__ __volatile__( \
5850- " .set arch=r4000 \n" \
5851- " ll %0, %1 # atomic_" #op "\n" \
5852- " " #asm_op " %0, %2 \n" \
5853- " sc %0, %1 \n" \
5854- " .set mips0 \n" \
5855- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5856- : "Ir" (i)); \
5857- } while (unlikely(!temp)); \
5858+ __asm__ __volatile__( \
5859+ " .set mips3 \n" \
5860+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5861+ "2: " #asm_op " %0, %2 \n" \
5862+ " sc %0, %1 \n" \
5863+ " beqz %0, 1b \n" \
5864+ extable \
5865+ " .set mips0 \n" \
5866+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5867+ : "Ir" (i)); \
5868 } else { \
5869 unsigned long flags; \
5870 \
5871 raw_local_irq_save(flags); \
5872- v->counter c_op i; \
5873+ __asm__ __volatile__( \
5874+ "2: " #asm_op " %0, %1 \n" \
5875+ extable \
5876+ : "+r" (v->counter) : "Ir" (i)); \
5877 raw_local_irq_restore(flags); \
5878 } \
5879 }
5880
5881-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5882-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5883+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
5884+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
5885+
5886+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5887+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5888 { \
5889 int result; \
5890 \
5891@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5892 int temp; \
5893 \
5894 __asm__ __volatile__( \
5895- " .set arch=r4000 \n" \
5896- "1: ll %1, %2 # atomic_" #op "_return \n" \
5897- " " #asm_op " %0, %1, %3 \n" \
5898+ " .set mips3 \n" \
5899+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5900+ "2: " #asm_op " %0, %1, %3 \n" \
5901 " sc %0, %2 \n" \
5902 " beqzl %0, 1b \n" \
5903- " " #asm_op " %0, %1, %3 \n" \
5904+ post_op \
5905+ extable \
5906+ "4: " #asm_op " %0, %1, %3 \n" \
5907+ "5: \n" \
5908 " .set mips0 \n" \
5909 : "=&r" (result), "=&r" (temp), \
5910 "+" GCC_OFF12_ASM() (v->counter) \
5911@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5912 } else if (kernel_uses_llsc) { \
5913 int temp; \
5914 \
5915- do { \
5916- __asm__ __volatile__( \
5917- " .set arch=r4000 \n" \
5918- " ll %1, %2 # atomic_" #op "_return \n" \
5919- " " #asm_op " %0, %1, %3 \n" \
5920- " sc %0, %2 \n" \
5921- " .set mips0 \n" \
5922- : "=&r" (result), "=&r" (temp), \
5923- "+" GCC_OFF12_ASM() (v->counter) \
5924- : "Ir" (i)); \
5925- } while (unlikely(!result)); \
5926+ __asm__ __volatile__( \
5927+ " .set mips3 \n" \
5928+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5929+ "2: " #asm_op " %0, %1, %3 \n" \
5930+ " sc %0, %2 \n" \
5931+ post_op \
5932+ extable \
5933+ "4: " #asm_op " %0, %1, %3 \n" \
5934+ "5: \n" \
5935+ " .set mips0 \n" \
5936+ : "=&r" (result), "=&r" (temp), \
5937+ "+" GCC_OFF12_ASM() (v->counter) \
5938+ : "Ir" (i)); \
5939 \
5940 result = temp; result c_op i; \
5941 } else { \
5942 unsigned long flags; \
5943 \
5944 raw_local_irq_save(flags); \
5945- result = v->counter; \
5946- result c_op i; \
5947- v->counter = result; \
5948+ __asm__ __volatile__( \
5949+ " lw %0, %1 \n" \
5950+ "2: " #asm_op " %0, %1, %2 \n" \
5951+ " sw %0, %1 \n" \
5952+ "3: \n" \
5953+ extable \
5954+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5955+ : "Ir" (i)); \
5956 raw_local_irq_restore(flags); \
5957 } \
5958 \
5959@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5960 return result; \
5961 }
5962
5963-#define ATOMIC_OPS(op, c_op, asm_op) \
5964- ATOMIC_OP(op, c_op, asm_op) \
5965- ATOMIC_OP_RETURN(op, c_op, asm_op)
5966+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
5967+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5968
5969-ATOMIC_OPS(add, +=, addu)
5970-ATOMIC_OPS(sub, -=, subu)
5971+#define ATOMIC_OPS(op, asm_op) \
5972+ ATOMIC_OP(op, asm_op) \
5973+ ATOMIC_OP_RETURN(op, asm_op)
5974+
5975+ATOMIC_OPS(add, add)
5976+ATOMIC_OPS(sub, sub)
5977
5978 #undef ATOMIC_OPS
5979 #undef ATOMIC_OP_RETURN
5980+#undef __ATOMIC_OP_RETURN
5981 #undef ATOMIC_OP
5982+#undef __ATOMIC_OP
5983
5984 /*
5985 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5986@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5987 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5988 * The function returns the old value of @v minus @i.
5989 */
5990-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5991+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5992 {
5993 int result;
5994
5995@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5996 return result;
5997 }
5998
5999-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6000-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6001+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6002+{
6003+ return cmpxchg(&v->counter, old, new);
6004+}
6005+
6006+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6007+ int new)
6008+{
6009+ return cmpxchg(&(v->counter), old, new);
6010+}
6011+
6012+static inline int atomic_xchg(atomic_t *v, int new)
6013+{
6014+ return xchg(&v->counter, new);
6015+}
6016+
6017+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6018+{
6019+ return xchg(&(v->counter), new);
6020+}
6021
6022 /**
6023 * __atomic_add_unless - add unless the number is a given value
6024@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6025
6026 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6027 #define atomic_inc_return(v) atomic_add_return(1, (v))
6028+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6029+{
6030+ return atomic_add_return_unchecked(1, v);
6031+}
6032
6033 /*
6034 * atomic_sub_and_test - subtract value from variable and test result
6035@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6036 * other cases.
6037 */
6038 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6039+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6040+{
6041+ return atomic_add_return_unchecked(1, v) == 0;
6042+}
6043
6044 /*
6045 * atomic_dec_and_test - decrement by 1 and test
6046@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6047 * Atomically increments @v by 1.
6048 */
6049 #define atomic_inc(v) atomic_add(1, (v))
6050+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6051+{
6052+ atomic_add_unchecked(1, v);
6053+}
6054
6055 /*
6056 * atomic_dec - decrement and test
6057@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6058 * Atomically decrements @v by 1.
6059 */
6060 #define atomic_dec(v) atomic_sub(1, (v))
6061+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6062+{
6063+ atomic_sub_unchecked(1, v);
6064+}
6065
6066 /*
6067 * atomic_add_negative - add and test if negative
6068@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6069 * @v: pointer of type atomic64_t
6070 *
6071 */
6072-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6073+static inline long atomic64_read(const atomic64_t *v)
6074+{
6075+ return ACCESS_ONCE(v->counter);
6076+}
6077+
6078+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6079+{
6080+ return ACCESS_ONCE(v->counter);
6081+}
6082
6083 /*
6084 * atomic64_set - set atomic variable
6085 * @v: pointer of type atomic64_t
6086 * @i: required value
6087 */
6088-#define atomic64_set(v, i) ((v)->counter = (i))
6089+static inline void atomic64_set(atomic64_t *v, long i)
6090+{
6091+ v->counter = i;
6092+}
6093
6094-#define ATOMIC64_OP(op, c_op, asm_op) \
6095-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6096+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6097+{
6098+ v->counter = i;
6099+}
6100+
6101+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6102+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6103 { \
6104 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6105 long temp; \
6106 \
6107 __asm__ __volatile__( \
6108- " .set arch=r4000 \n" \
6109- "1: lld %0, %1 # atomic64_" #op " \n" \
6110- " " #asm_op " %0, %2 \n" \
6111+ " .set mips3 \n" \
6112+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6113+ "2: " #asm_op " %0, %2 \n" \
6114 " scd %0, %1 \n" \
6115 " beqzl %0, 1b \n" \
6116+ extable \
6117 " .set mips0 \n" \
6118 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6119 : "Ir" (i)); \
6120 } else if (kernel_uses_llsc) { \
6121 long temp; \
6122 \
6123- do { \
6124- __asm__ __volatile__( \
6125- " .set arch=r4000 \n" \
6126- " lld %0, %1 # atomic64_" #op "\n" \
6127- " " #asm_op " %0, %2 \n" \
6128- " scd %0, %1 \n" \
6129- " .set mips0 \n" \
6130- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6131- : "Ir" (i)); \
6132- } while (unlikely(!temp)); \
6133+ __asm__ __volatile__( \
6134+ " .set mips3 \n" \
6135+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6136+ "2: " #asm_op " %0, %2 \n" \
6137+ " scd %0, %1 \n" \
6138+ " beqz %0, 1b \n" \
6139+ extable \
6140+ " .set mips0 \n" \
6141+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6142+ : "Ir" (i)); \
6143 } else { \
6144 unsigned long flags; \
6145 \
6146 raw_local_irq_save(flags); \
6147- v->counter c_op i; \
6148+ __asm__ __volatile__( \
6149+ "2: " #asm_op " %0, %1 \n" \
6150+ extable \
6151+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6152 raw_local_irq_restore(flags); \
6153 } \
6154 }
6155
6156-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6157-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6158+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6159+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6160+
6161+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6162+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6163 { \
6164 long result; \
6165 \
6166@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6167 long temp; \
6168 \
6169 __asm__ __volatile__( \
6170- " .set arch=r4000 \n" \
6171+ " .set mips3 \n" \
6172 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6173- " " #asm_op " %0, %1, %3 \n" \
6174+ "2: " #asm_op " %0, %1, %3 \n" \
6175 " scd %0, %2 \n" \
6176 " beqzl %0, 1b \n" \
6177- " " #asm_op " %0, %1, %3 \n" \
6178+ post_op \
6179+ extable \
6180+ "4: " #asm_op " %0, %1, %3 \n" \
6181+ "5: \n" \
6182 " .set mips0 \n" \
6183 : "=&r" (result), "=&r" (temp), \
6184 "+" GCC_OFF12_ASM() (v->counter) \
6185@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6186 } else if (kernel_uses_llsc) { \
6187 long temp; \
6188 \
6189- do { \
6190- __asm__ __volatile__( \
6191- " .set arch=r4000 \n" \
6192- " lld %1, %2 # atomic64_" #op "_return\n" \
6193- " " #asm_op " %0, %1, %3 \n" \
6194- " scd %0, %2 \n" \
6195- " .set mips0 \n" \
6196- : "=&r" (result), "=&r" (temp), \
6197- "=" GCC_OFF12_ASM() (v->counter) \
6198- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6199- : "memory"); \
6200- } while (unlikely(!result)); \
6201+ __asm__ __volatile__( \
6202+ " .set mips3 \n" \
6203+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6204+ "2: " #asm_op " %0, %1, %3 \n" \
6205+ " scd %0, %2 \n" \
6206+ " beqz %0, 1b \n" \
6207+ post_op \
6208+ extable \
6209+ "4: " #asm_op " %0, %1, %3 \n" \
6210+ "5: \n" \
6211+ " .set mips0 \n" \
6212+ : "=&r" (result), "=&r" (temp), \
6213+ "=" GCC_OFF12_ASM() (v->counter) \
6214+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6215+ : "memory"); \
6216 \
6217 result = temp; result c_op i; \
6218 } else { \
6219 unsigned long flags; \
6220 \
6221 raw_local_irq_save(flags); \
6222- result = v->counter; \
6223- result c_op i; \
6224- v->counter = result; \
6225+ __asm__ __volatile__( \
6226+ " ld %0, %1 \n" \
6227+ "2: " #asm_op " %0, %1, %2 \n" \
6228+ " sd %0, %1 \n" \
6229+ "3: \n" \
6230+ extable \
6231+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6232+ : "Ir" (i)); \
6233 raw_local_irq_restore(flags); \
6234 } \
6235 \
6236@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6237 return result; \
6238 }
6239
6240-#define ATOMIC64_OPS(op, c_op, asm_op) \
6241- ATOMIC64_OP(op, c_op, asm_op) \
6242- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6243+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6244+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6245
6246-ATOMIC64_OPS(add, +=, daddu)
6247-ATOMIC64_OPS(sub, -=, dsubu)
6248+#define ATOMIC64_OPS(op, asm_op) \
6249+ ATOMIC64_OP(op, asm_op) \
6250+ ATOMIC64_OP_RETURN(op, asm_op)
6251+
6252+ATOMIC64_OPS(add, dadd)
6253+ATOMIC64_OPS(sub, dsub)
6254
6255 #undef ATOMIC64_OPS
6256 #undef ATOMIC64_OP_RETURN
6257+#undef __ATOMIC64_OP_RETURN
6258 #undef ATOMIC64_OP
6259+#undef __ATOMIC64_OP
6260+#undef __OVERFLOW_EXTABLE
6261+#undef __OVERFLOW_POST
6262
6263 /*
6264 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6265@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6266 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6267 * The function returns the old value of @v minus @i.
6268 */
6269-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6270+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6271 {
6272 long result;
6273
6274@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6275 return result;
6276 }
6277
6278-#define atomic64_cmpxchg(v, o, n) \
6279- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6280-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6281+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6282+{
6283+ return cmpxchg(&v->counter, old, new);
6284+}
6285+
6286+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6287+ long new)
6288+{
6289+ return cmpxchg(&(v->counter), old, new);
6290+}
6291+
6292+static inline long atomic64_xchg(atomic64_t *v, long new)
6293+{
6294+ return xchg(&v->counter, new);
6295+}
6296+
6297+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6298+{
6299+ return xchg(&(v->counter), new);
6300+}
6301
6302 /**
6303 * atomic64_add_unless - add unless the number is a given value
6304@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305
6306 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6307 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6308+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6309
6310 /*
6311 * atomic64_sub_and_test - subtract value from variable and test result
6312@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * other cases.
6314 */
6315 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6316+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6317
6318 /*
6319 * atomic64_dec_and_test - decrement by 1 and test
6320@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically increments @v by 1.
6322 */
6323 #define atomic64_inc(v) atomic64_add(1, (v))
6324+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_dec - decrement and test
6328@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6329 * Atomically decrements @v by 1.
6330 */
6331 #define atomic64_dec(v) atomic64_sub(1, (v))
6332+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6333
6334 /*
6335 * atomic64_add_negative - add and test if negative
6336diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6337index 2b8bbbc..4556df6 100644
6338--- a/arch/mips/include/asm/barrier.h
6339+++ b/arch/mips/include/asm/barrier.h
6340@@ -133,7 +133,7 @@
6341 do { \
6342 compiletime_assert_atomic_type(*p); \
6343 smp_mb(); \
6344- ACCESS_ONCE(*p) = (v); \
6345+ ACCESS_ONCE_RW(*p) = (v); \
6346 } while (0)
6347
6348 #define smp_load_acquire(p) \
6349diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6350index b4db69f..8f3b093 100644
6351--- a/arch/mips/include/asm/cache.h
6352+++ b/arch/mips/include/asm/cache.h
6353@@ -9,10 +9,11 @@
6354 #ifndef _ASM_CACHE_H
6355 #define _ASM_CACHE_H
6356
6357+#include <linux/const.h>
6358 #include <kmalloc.h>
6359
6360 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6361-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6362+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6363
6364 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6365 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6366diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6367index eb4d95d..f2f7f93 100644
6368--- a/arch/mips/include/asm/elf.h
6369+++ b/arch/mips/include/asm/elf.h
6370@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6371 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6372 #endif
6373
6374+#ifdef CONFIG_PAX_ASLR
6375+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6376+
6377+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6378+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6379+#endif
6380+
6381 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6382 struct linux_binprm;
6383 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6384 int uses_interp);
6385
6386-struct mm_struct;
6387-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6388-#define arch_randomize_brk arch_randomize_brk
6389-
6390 struct arch_elf_state {
6391 int fp_abi;
6392 int interp_fp_abi;
6393diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6394index c1f6afa..38cc6e9 100644
6395--- a/arch/mips/include/asm/exec.h
6396+++ b/arch/mips/include/asm/exec.h
6397@@ -12,6 +12,6 @@
6398 #ifndef _ASM_EXEC_H
6399 #define _ASM_EXEC_H
6400
6401-extern unsigned long arch_align_stack(unsigned long sp);
6402+#define arch_align_stack(x) ((x) & ~0xfUL)
6403
6404 #endif /* _ASM_EXEC_H */
6405diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6406index 9e8ef59..1139d6b 100644
6407--- a/arch/mips/include/asm/hw_irq.h
6408+++ b/arch/mips/include/asm/hw_irq.h
6409@@ -10,7 +10,7 @@
6410
6411 #include <linux/atomic.h>
6412
6413-extern atomic_t irq_err_count;
6414+extern atomic_unchecked_t irq_err_count;
6415
6416 /*
6417 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6418diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6419index 46dfc3c..a16b13a 100644
6420--- a/arch/mips/include/asm/local.h
6421+++ b/arch/mips/include/asm/local.h
6422@@ -12,15 +12,25 @@ typedef struct
6423 atomic_long_t a;
6424 } local_t;
6425
6426+typedef struct {
6427+ atomic_long_unchecked_t a;
6428+} local_unchecked_t;
6429+
6430 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6431
6432 #define local_read(l) atomic_long_read(&(l)->a)
6433+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6434 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6435+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6436
6437 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6438+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6439 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6440+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6441 #define local_inc(l) atomic_long_inc(&(l)->a)
6442+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6443 #define local_dec(l) atomic_long_dec(&(l)->a)
6444+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6445
6446 /*
6447 * Same as above, but return the result value
6448@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6449 return result;
6450 }
6451
6452+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6453+{
6454+ unsigned long result;
6455+
6456+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6457+ unsigned long temp;
6458+
6459+ __asm__ __volatile__(
6460+ " .set mips3 \n"
6461+ "1:" __LL "%1, %2 # local_add_return \n"
6462+ " addu %0, %1, %3 \n"
6463+ __SC "%0, %2 \n"
6464+ " beqzl %0, 1b \n"
6465+ " addu %0, %1, %3 \n"
6466+ " .set mips0 \n"
6467+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6468+ : "Ir" (i), "m" (l->a.counter)
6469+ : "memory");
6470+ } else if (kernel_uses_llsc) {
6471+ unsigned long temp;
6472+
6473+ __asm__ __volatile__(
6474+ " .set mips3 \n"
6475+ "1:" __LL "%1, %2 # local_add_return \n"
6476+ " addu %0, %1, %3 \n"
6477+ __SC "%0, %2 \n"
6478+ " beqz %0, 1b \n"
6479+ " addu %0, %1, %3 \n"
6480+ " .set mips0 \n"
6481+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6482+ : "Ir" (i), "m" (l->a.counter)
6483+ : "memory");
6484+ } else {
6485+ unsigned long flags;
6486+
6487+ local_irq_save(flags);
6488+ result = l->a.counter;
6489+ result += i;
6490+ l->a.counter = result;
6491+ local_irq_restore(flags);
6492+ }
6493+
6494+ return result;
6495+}
6496+
6497 static __inline__ long local_sub_return(long i, local_t * l)
6498 {
6499 unsigned long result;
6500@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6501
6502 #define local_cmpxchg(l, o, n) \
6503 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6504+#define local_cmpxchg_unchecked(l, o, n) \
6505+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6506 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6507
6508 /**
6509diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6510index 154b70a..426ae3d 100644
6511--- a/arch/mips/include/asm/page.h
6512+++ b/arch/mips/include/asm/page.h
6513@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6514 #ifdef CONFIG_CPU_MIPS32
6515 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6516 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6517- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6518+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6519 #else
6520 typedef struct { unsigned long long pte; } pte_t;
6521 #define pte_val(x) ((x).pte)
6522diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6523index b336037..5b874cc 100644
6524--- a/arch/mips/include/asm/pgalloc.h
6525+++ b/arch/mips/include/asm/pgalloc.h
6526@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6527 {
6528 set_pud(pud, __pud((unsigned long)pmd));
6529 }
6530+
6531+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6532+{
6533+ pud_populate(mm, pud, pmd);
6534+}
6535 #endif
6536
6537 /*
6538diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6539index 845016d..3303268 100644
6540--- a/arch/mips/include/asm/pgtable.h
6541+++ b/arch/mips/include/asm/pgtable.h
6542@@ -20,6 +20,9 @@
6543 #include <asm/io.h>
6544 #include <asm/pgtable-bits.h>
6545
6546+#define ktla_ktva(addr) (addr)
6547+#define ktva_ktla(addr) (addr)
6548+
6549 struct mm_struct;
6550 struct vm_area_struct;
6551
6552diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6553index e4440f9..8fb0005 100644
6554--- a/arch/mips/include/asm/thread_info.h
6555+++ b/arch/mips/include/asm/thread_info.h
6556@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6557 #define TIF_SECCOMP 4 /* secure computing */
6558 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6559 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6560+/* li takes a 32bit immediate */
6561+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6562+
6563 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6564 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6565 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6566@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6567 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6568 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6569 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6570+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6571
6572 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6573 _TIF_SYSCALL_AUDIT | \
6574- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6575+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6576+ _TIF_GRSEC_SETXID)
6577
6578 /* work to do in syscall_trace_leave() */
6579 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6580- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6581+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6582
6583 /* work to do on interrupt/exception return */
6584 #define _TIF_WORK_MASK \
6585@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6586 /* work to do on any return to u-space */
6587 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6588 _TIF_WORK_SYSCALL_EXIT | \
6589- _TIF_SYSCALL_TRACEPOINT)
6590+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6591
6592 /*
6593 * We stash processor id into a COP0 register to retrieve it fast
6594diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6595index bf8b324..cec5705 100644
6596--- a/arch/mips/include/asm/uaccess.h
6597+++ b/arch/mips/include/asm/uaccess.h
6598@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6599 __ok == 0; \
6600 })
6601
6602+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6603 #define access_ok(type, addr, size) \
6604 likely(__access_ok((addr), (size), __access_mask))
6605
6606diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6607index 1188e00..41cf144 100644
6608--- a/arch/mips/kernel/binfmt_elfn32.c
6609+++ b/arch/mips/kernel/binfmt_elfn32.c
6610@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6611 #undef ELF_ET_DYN_BASE
6612 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6613
6614+#ifdef CONFIG_PAX_ASLR
6615+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6616+
6617+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6618+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6619+#endif
6620+
6621 #include <asm/processor.h>
6622 #include <linux/module.h>
6623 #include <linux/elfcore.h>
6624diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6625index 9287678..f870e47 100644
6626--- a/arch/mips/kernel/binfmt_elfo32.c
6627+++ b/arch/mips/kernel/binfmt_elfo32.c
6628@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6629 #undef ELF_ET_DYN_BASE
6630 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6631
6632+#ifdef CONFIG_PAX_ASLR
6633+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6634+
6635+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6636+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6637+#endif
6638+
6639 #include <asm/processor.h>
6640
6641 #include <linux/module.h>
6642diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6643index a74ec3a..4f06f18 100644
6644--- a/arch/mips/kernel/i8259.c
6645+++ b/arch/mips/kernel/i8259.c
6646@@ -202,7 +202,7 @@ spurious_8259A_irq:
6647 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6648 spurious_irq_mask |= irqmask;
6649 }
6650- atomic_inc(&irq_err_count);
6651+ atomic_inc_unchecked(&irq_err_count);
6652 /*
6653 * Theoretically we do not have to handle this IRQ,
6654 * but in Linux this does not cause problems and is
6655diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6656index 44a1f79..2bd6aa3 100644
6657--- a/arch/mips/kernel/irq-gt641xx.c
6658+++ b/arch/mips/kernel/irq-gt641xx.c
6659@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6660 }
6661 }
6662
6663- atomic_inc(&irq_err_count);
6664+ atomic_inc_unchecked(&irq_err_count);
6665 }
6666
6667 void __init gt641xx_irq_init(void)
6668diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6669index d2bfbc2..a8eacd2 100644
6670--- a/arch/mips/kernel/irq.c
6671+++ b/arch/mips/kernel/irq.c
6672@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6673 printk("unexpected IRQ # %d\n", irq);
6674 }
6675
6676-atomic_t irq_err_count;
6677+atomic_unchecked_t irq_err_count;
6678
6679 int arch_show_interrupts(struct seq_file *p, int prec)
6680 {
6681- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6682+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6683 return 0;
6684 }
6685
6686 asmlinkage void spurious_interrupt(void)
6687 {
6688- atomic_inc(&irq_err_count);
6689+ atomic_inc_unchecked(&irq_err_count);
6690 }
6691
6692 void __init init_IRQ(void)
6693@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6694 #endif
6695 }
6696
6697+
6698 #ifdef DEBUG_STACKOVERFLOW
6699+extern void gr_handle_kernel_exploit(void);
6700+
6701 static inline void check_stack_overflow(void)
6702 {
6703 unsigned long sp;
6704@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6705 printk("do_IRQ: stack overflow: %ld\n",
6706 sp - sizeof(struct thread_info));
6707 dump_stack();
6708+ gr_handle_kernel_exploit();
6709 }
6710 }
6711 #else
6712diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6713index 0614717..002fa43 100644
6714--- a/arch/mips/kernel/pm-cps.c
6715+++ b/arch/mips/kernel/pm-cps.c
6716@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6717 nc_core_ready_count = nc_addr;
6718
6719 /* Ensure ready_count is zero-initialised before the assembly runs */
6720- ACCESS_ONCE(*nc_core_ready_count) = 0;
6721+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6722 coupled_barrier(&per_cpu(pm_barrier, core), online);
6723
6724 /* Run the generated entry code */
6725diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6726index 85bff5d..39bc202 100644
6727--- a/arch/mips/kernel/process.c
6728+++ b/arch/mips/kernel/process.c
6729@@ -534,18 +534,6 @@ out:
6730 return pc;
6731 }
6732
6733-/*
6734- * Don't forget that the stack pointer must be aligned on a 8 bytes
6735- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6736- */
6737-unsigned long arch_align_stack(unsigned long sp)
6738-{
6739- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6740- sp -= get_random_int() & ~PAGE_MASK;
6741-
6742- return sp & ALMASK;
6743-}
6744-
6745 static void arch_dump_stack(void *info)
6746 {
6747 struct pt_regs *regs;
6748diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6749index 5104528..950bbdc 100644
6750--- a/arch/mips/kernel/ptrace.c
6751+++ b/arch/mips/kernel/ptrace.c
6752@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6753 return ret;
6754 }
6755
6756+#ifdef CONFIG_GRKERNSEC_SETXID
6757+extern void gr_delayed_cred_worker(void);
6758+#endif
6759+
6760 /*
6761 * Notification of system call entry/exit
6762 * - triggered by current->work.syscall_trace
6763@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6764 tracehook_report_syscall_entry(regs))
6765 ret = -1;
6766
6767+#ifdef CONFIG_GRKERNSEC_SETXID
6768+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6769+ gr_delayed_cred_worker();
6770+#endif
6771+
6772 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6773 trace_sys_enter(regs, regs->regs[2]);
6774
6775diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6776index 07fc524..b9d7f28 100644
6777--- a/arch/mips/kernel/reset.c
6778+++ b/arch/mips/kernel/reset.c
6779@@ -13,6 +13,7 @@
6780 #include <linux/reboot.h>
6781
6782 #include <asm/reboot.h>
6783+#include <asm/bug.h>
6784
6785 /*
6786 * Urgs ... Too many MIPS machines to handle this in a generic way.
6787@@ -29,16 +30,19 @@ void machine_restart(char *command)
6788 {
6789 if (_machine_restart)
6790 _machine_restart(command);
6791+ BUG();
6792 }
6793
6794 void machine_halt(void)
6795 {
6796 if (_machine_halt)
6797 _machine_halt();
6798+ BUG();
6799 }
6800
6801 void machine_power_off(void)
6802 {
6803 if (pm_power_off)
6804 pm_power_off();
6805+ BUG();
6806 }
6807diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6808index 2242bdd..b284048 100644
6809--- a/arch/mips/kernel/sync-r4k.c
6810+++ b/arch/mips/kernel/sync-r4k.c
6811@@ -18,8 +18,8 @@
6812 #include <asm/mipsregs.h>
6813
6814 static atomic_t count_start_flag = ATOMIC_INIT(0);
6815-static atomic_t count_count_start = ATOMIC_INIT(0);
6816-static atomic_t count_count_stop = ATOMIC_INIT(0);
6817+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6818+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6819 static atomic_t count_reference = ATOMIC_INIT(0);
6820
6821 #define COUNTON 100
6822@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6823
6824 for (i = 0; i < NR_LOOPS; i++) {
6825 /* slaves loop on '!= 2' */
6826- while (atomic_read(&count_count_start) != 1)
6827+ while (atomic_read_unchecked(&count_count_start) != 1)
6828 mb();
6829- atomic_set(&count_count_stop, 0);
6830+ atomic_set_unchecked(&count_count_stop, 0);
6831 smp_wmb();
6832
6833 /* this lets the slaves write their count register */
6834- atomic_inc(&count_count_start);
6835+ atomic_inc_unchecked(&count_count_start);
6836
6837 /*
6838 * Everyone initialises count in the last loop:
6839@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6840 /*
6841 * Wait for all slaves to leave the synchronization point:
6842 */
6843- while (atomic_read(&count_count_stop) != 1)
6844+ while (atomic_read_unchecked(&count_count_stop) != 1)
6845 mb();
6846- atomic_set(&count_count_start, 0);
6847+ atomic_set_unchecked(&count_count_start, 0);
6848 smp_wmb();
6849- atomic_inc(&count_count_stop);
6850+ atomic_inc_unchecked(&count_count_stop);
6851 }
6852 /* Arrange for an interrupt in a short while */
6853 write_c0_compare(read_c0_count() + COUNTON);
6854@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6855 initcount = atomic_read(&count_reference);
6856
6857 for (i = 0; i < NR_LOOPS; i++) {
6858- atomic_inc(&count_count_start);
6859- while (atomic_read(&count_count_start) != 2)
6860+ atomic_inc_unchecked(&count_count_start);
6861+ while (atomic_read_unchecked(&count_count_start) != 2)
6862 mb();
6863
6864 /*
6865@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6866 if (i == NR_LOOPS-1)
6867 write_c0_count(initcount);
6868
6869- atomic_inc(&count_count_stop);
6870- while (atomic_read(&count_count_stop) != 2)
6871+ atomic_inc_unchecked(&count_count_stop);
6872+ while (atomic_read_unchecked(&count_count_stop) != 2)
6873 mb();
6874 }
6875 /* Arrange for an interrupt in a short while */
6876diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6877index c3b41e2..46c32e9 100644
6878--- a/arch/mips/kernel/traps.c
6879+++ b/arch/mips/kernel/traps.c
6880@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6881 siginfo_t info;
6882
6883 prev_state = exception_enter();
6884- die_if_kernel("Integer overflow", regs);
6885+ if (unlikely(!user_mode(regs))) {
6886+
6887+#ifdef CONFIG_PAX_REFCOUNT
6888+ if (fixup_exception(regs)) {
6889+ pax_report_refcount_overflow(regs);
6890+ exception_exit(prev_state);
6891+ return;
6892+ }
6893+#endif
6894+
6895+ die("Integer overflow", regs);
6896+ }
6897
6898 info.si_code = FPE_INTOVF;
6899 info.si_signo = SIGFPE;
6900diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6901index 270bbd4..c01932a 100644
6902--- a/arch/mips/kvm/mips.c
6903+++ b/arch/mips/kvm/mips.c
6904@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6905 return r;
6906 }
6907
6908-int kvm_arch_init(void *opaque)
6909+int kvm_arch_init(const void *opaque)
6910 {
6911 if (kvm_mips_callbacks) {
6912 kvm_err("kvm: module already exists\n");
6913diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6914index 70ab5d6..62940fe 100644
6915--- a/arch/mips/mm/fault.c
6916+++ b/arch/mips/mm/fault.c
6917@@ -28,6 +28,23 @@
6918 #include <asm/highmem.h> /* For VMALLOC_END */
6919 #include <linux/kdebug.h>
6920
6921+#ifdef CONFIG_PAX_PAGEEXEC
6922+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6923+{
6924+ unsigned long i;
6925+
6926+ printk(KERN_ERR "PAX: bytes at PC: ");
6927+ for (i = 0; i < 5; i++) {
6928+ unsigned int c;
6929+ if (get_user(c, (unsigned int *)pc+i))
6930+ printk(KERN_CONT "???????? ");
6931+ else
6932+ printk(KERN_CONT "%08x ", c);
6933+ }
6934+ printk("\n");
6935+}
6936+#endif
6937+
6938 /*
6939 * This routine handles page faults. It determines the address,
6940 * and the problem, and then passes it off to one of the appropriate
6941@@ -201,6 +218,14 @@ bad_area:
6942 bad_area_nosemaphore:
6943 /* User mode accesses just cause a SIGSEGV */
6944 if (user_mode(regs)) {
6945+
6946+#ifdef CONFIG_PAX_PAGEEXEC
6947+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6948+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6949+ do_group_exit(SIGKILL);
6950+ }
6951+#endif
6952+
6953 tsk->thread.cp0_badvaddr = address;
6954 tsk->thread.error_code = write;
6955 #if 0
6956diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6957index f1baadd..5472dca 100644
6958--- a/arch/mips/mm/mmap.c
6959+++ b/arch/mips/mm/mmap.c
6960@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 struct vm_area_struct *vma;
6962 unsigned long addr = addr0;
6963 int do_color_align;
6964+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6965 struct vm_unmapped_area_info info;
6966
6967 if (unlikely(len > TASK_SIZE))
6968@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6969 do_color_align = 1;
6970
6971 /* requesting a specific address */
6972+
6973+#ifdef CONFIG_PAX_RANDMMAP
6974+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6975+#endif
6976+
6977 if (addr) {
6978 if (do_color_align)
6979 addr = COLOUR_ALIGN(addr, pgoff);
6980@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6981 addr = PAGE_ALIGN(addr);
6982
6983 vma = find_vma(mm, addr);
6984- if (TASK_SIZE - len >= addr &&
6985- (!vma || addr + len <= vma->vm_start))
6986+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6987 return addr;
6988 }
6989
6990 info.length = len;
6991 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6992 info.align_offset = pgoff << PAGE_SHIFT;
6993+ info.threadstack_offset = offset;
6994
6995 if (dir == DOWN) {
6996 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6997@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6998 {
6999 unsigned long random_factor = 0UL;
7000
7001+#ifdef CONFIG_PAX_RANDMMAP
7002+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7003+#endif
7004+
7005 if (current->flags & PF_RANDOMIZE) {
7006 random_factor = get_random_int();
7007 random_factor = random_factor << PAGE_SHIFT;
7008@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7009
7010 if (mmap_is_legacy()) {
7011 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7012+
7013+#ifdef CONFIG_PAX_RANDMMAP
7014+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7015+ mm->mmap_base += mm->delta_mmap;
7016+#endif
7017+
7018 mm->get_unmapped_area = arch_get_unmapped_area;
7019 } else {
7020 mm->mmap_base = mmap_base(random_factor);
7021+
7022+#ifdef CONFIG_PAX_RANDMMAP
7023+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7024+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7025+#endif
7026+
7027 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7028 }
7029 }
7030
7031-static inline unsigned long brk_rnd(void)
7032-{
7033- unsigned long rnd = get_random_int();
7034-
7035- rnd = rnd << PAGE_SHIFT;
7036- /* 8MB for 32bit, 256MB for 64bit */
7037- if (TASK_IS_32BIT_ADDR)
7038- rnd = rnd & 0x7ffffful;
7039- else
7040- rnd = rnd & 0xffffffful;
7041-
7042- return rnd;
7043-}
7044-
7045-unsigned long arch_randomize_brk(struct mm_struct *mm)
7046-{
7047- unsigned long base = mm->brk;
7048- unsigned long ret;
7049-
7050- ret = PAGE_ALIGN(base + brk_rnd());
7051-
7052- if (ret < mm->brk)
7053- return mm->brk;
7054-
7055- return ret;
7056-}
7057-
7058 int __virt_addr_valid(const volatile void *kaddr)
7059 {
7060 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7061diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7062index d07e041..bedb72b 100644
7063--- a/arch/mips/pci/pci-octeon.c
7064+++ b/arch/mips/pci/pci-octeon.c
7065@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7066
7067
7068 static struct pci_ops octeon_pci_ops = {
7069- octeon_read_config,
7070- octeon_write_config,
7071+ .read = octeon_read_config,
7072+ .write = octeon_write_config,
7073 };
7074
7075 static struct resource octeon_pci_mem_resource = {
7076diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7077index 5e36c33..eb4a17b 100644
7078--- a/arch/mips/pci/pcie-octeon.c
7079+++ b/arch/mips/pci/pcie-octeon.c
7080@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7081 }
7082
7083 static struct pci_ops octeon_pcie0_ops = {
7084- octeon_pcie0_read_config,
7085- octeon_pcie0_write_config,
7086+ .read = octeon_pcie0_read_config,
7087+ .write = octeon_pcie0_write_config,
7088 };
7089
7090 static struct resource octeon_pcie0_mem_resource = {
7091@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7092 };
7093
7094 static struct pci_ops octeon_pcie1_ops = {
7095- octeon_pcie1_read_config,
7096- octeon_pcie1_write_config,
7097+ .read = octeon_pcie1_read_config,
7098+ .write = octeon_pcie1_write_config,
7099 };
7100
7101 static struct resource octeon_pcie1_mem_resource = {
7102@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7103 };
7104
7105 static struct pci_ops octeon_dummy_ops = {
7106- octeon_dummy_read_config,
7107- octeon_dummy_write_config,
7108+ .read = octeon_dummy_read_config,
7109+ .write = octeon_dummy_write_config,
7110 };
7111
7112 static struct resource octeon_dummy_mem_resource = {
7113diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7114index a2358b4..7cead4f 100644
7115--- a/arch/mips/sgi-ip27/ip27-nmi.c
7116+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7117@@ -187,9 +187,9 @@ void
7118 cont_nmi_dump(void)
7119 {
7120 #ifndef REAL_NMI_SIGNAL
7121- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7122+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7123
7124- atomic_inc(&nmied_cpus);
7125+ atomic_inc_unchecked(&nmied_cpus);
7126 #endif
7127 /*
7128 * Only allow 1 cpu to proceed
7129@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7130 udelay(10000);
7131 }
7132 #else
7133- while (atomic_read(&nmied_cpus) != num_online_cpus());
7134+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7135 #endif
7136
7137 /*
7138diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7139index a046b30..6799527 100644
7140--- a/arch/mips/sni/rm200.c
7141+++ b/arch/mips/sni/rm200.c
7142@@ -270,7 +270,7 @@ spurious_8259A_irq:
7143 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7144 spurious_irq_mask |= irqmask;
7145 }
7146- atomic_inc(&irq_err_count);
7147+ atomic_inc_unchecked(&irq_err_count);
7148 /*
7149 * Theoretically we do not have to handle this IRQ,
7150 * but in Linux this does not cause problems and is
7151diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7152index 41e873b..34d33a7 100644
7153--- a/arch/mips/vr41xx/common/icu.c
7154+++ b/arch/mips/vr41xx/common/icu.c
7155@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7156
7157 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7158
7159- atomic_inc(&irq_err_count);
7160+ atomic_inc_unchecked(&irq_err_count);
7161
7162 return -1;
7163 }
7164diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7165index ae0e4ee..e8f0692 100644
7166--- a/arch/mips/vr41xx/common/irq.c
7167+++ b/arch/mips/vr41xx/common/irq.c
7168@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7169 irq_cascade_t *cascade;
7170
7171 if (irq >= NR_IRQS) {
7172- atomic_inc(&irq_err_count);
7173+ atomic_inc_unchecked(&irq_err_count);
7174 return;
7175 }
7176
7177@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7178 ret = cascade->get_irq(irq);
7179 irq = ret;
7180 if (ret < 0)
7181- atomic_inc(&irq_err_count);
7182+ atomic_inc_unchecked(&irq_err_count);
7183 else
7184 irq_dispatch(irq);
7185 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7186diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7187index 967d144..db12197 100644
7188--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7189+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7190@@ -11,12 +11,14 @@
7191 #ifndef _ASM_PROC_CACHE_H
7192 #define _ASM_PROC_CACHE_H
7193
7194+#include <linux/const.h>
7195+
7196 /* L1 cache */
7197
7198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7200-#define L1_CACHE_BYTES 16 /* bytes per entry */
7201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7204
7205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7206diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7207index bcb5df2..84fabd2 100644
7208--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7209+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7210@@ -16,13 +16,15 @@
7211 #ifndef _ASM_PROC_CACHE_H
7212 #define _ASM_PROC_CACHE_H
7213
7214+#include <linux/const.h>
7215+
7216 /*
7217 * L1 cache
7218 */
7219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7221-#define L1_CACHE_BYTES 32 /* bytes per entry */
7222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7223+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7225
7226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7227diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7228index 4ce7a01..449202a 100644
7229--- a/arch/openrisc/include/asm/cache.h
7230+++ b/arch/openrisc/include/asm/cache.h
7231@@ -19,11 +19,13 @@
7232 #ifndef __ASM_OPENRISC_CACHE_H
7233 #define __ASM_OPENRISC_CACHE_H
7234
7235+#include <linux/const.h>
7236+
7237 /* FIXME: How can we replace these with values from the CPU...
7238 * they shouldn't be hard-coded!
7239 */
7240
7241-#define L1_CACHE_BYTES 16
7242 #define L1_CACHE_SHIFT 4
7243+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7244
7245 #endif /* __ASM_OPENRISC_CACHE_H */
7246diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7247index 226f8ca..9d9b87d 100644
7248--- a/arch/parisc/include/asm/atomic.h
7249+++ b/arch/parisc/include/asm/atomic.h
7250@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7251 return dec;
7252 }
7253
7254+#define atomic64_read_unchecked(v) atomic64_read(v)
7255+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7256+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7257+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7258+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7259+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7260+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7261+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7262+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7263+
7264 #endif /* !CONFIG_64BIT */
7265
7266
7267diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7268index 47f11c7..3420df2 100644
7269--- a/arch/parisc/include/asm/cache.h
7270+++ b/arch/parisc/include/asm/cache.h
7271@@ -5,6 +5,7 @@
7272 #ifndef __ARCH_PARISC_CACHE_H
7273 #define __ARCH_PARISC_CACHE_H
7274
7275+#include <linux/const.h>
7276
7277 /*
7278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7279@@ -15,13 +16,13 @@
7280 * just ruin performance.
7281 */
7282 #ifdef CONFIG_PA20
7283-#define L1_CACHE_BYTES 64
7284 #define L1_CACHE_SHIFT 6
7285 #else
7286-#define L1_CACHE_BYTES 32
7287 #define L1_CACHE_SHIFT 5
7288 #endif
7289
7290+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7291+
7292 #ifndef __ASSEMBLY__
7293
7294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7295diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7296index 3391d06..c23a2cc 100644
7297--- a/arch/parisc/include/asm/elf.h
7298+++ b/arch/parisc/include/asm/elf.h
7299@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7300
7301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7302
7303+#ifdef CONFIG_PAX_ASLR
7304+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7305+
7306+#define PAX_DELTA_MMAP_LEN 16
7307+#define PAX_DELTA_STACK_LEN 16
7308+#endif
7309+
7310 /* This yields a mask that user programs can use to figure out what
7311 instruction set this CPU supports. This could be done in user space,
7312 but it's not easy, and we've already done it here. */
7313diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7314index f213f5b..0af3e8e 100644
7315--- a/arch/parisc/include/asm/pgalloc.h
7316+++ b/arch/parisc/include/asm/pgalloc.h
7317@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7319 }
7320
7321+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7322+{
7323+ pgd_populate(mm, pgd, pmd);
7324+}
7325+
7326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7327 {
7328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7329@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7331 #define pmd_free(mm, x) do { } while (0)
7332 #define pgd_populate(mm, pmd, pte) BUG()
7333+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7334
7335 #endif
7336
7337diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7338index 22b89d1..ce34230 100644
7339--- a/arch/parisc/include/asm/pgtable.h
7340+++ b/arch/parisc/include/asm/pgtable.h
7341@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7343 #define PAGE_COPY PAGE_EXECREAD
7344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7345+
7346+#ifdef CONFIG_PAX_PAGEEXEC
7347+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7348+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7349+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7350+#else
7351+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7352+# define PAGE_COPY_NOEXEC PAGE_COPY
7353+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7354+#endif
7355+
7356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7359diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7360index a5cb070..8604ddc 100644
7361--- a/arch/parisc/include/asm/uaccess.h
7362+++ b/arch/parisc/include/asm/uaccess.h
7363@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7364 const void __user *from,
7365 unsigned long n)
7366 {
7367- int sz = __compiletime_object_size(to);
7368+ size_t sz = __compiletime_object_size(to);
7369 int ret = -EFAULT;
7370
7371- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7372+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7373 ret = __copy_from_user(to, from, n);
7374 else
7375 copy_from_user_overflow();
7376diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7377index 5822e8e..bc5e638 100644
7378--- a/arch/parisc/kernel/module.c
7379+++ b/arch/parisc/kernel/module.c
7380@@ -98,16 +98,38 @@
7381
7382 /* three functions to determine where in the module core
7383 * or init pieces the location is */
7384+static inline int in_init_rx(struct module *me, void *loc)
7385+{
7386+ return (loc >= me->module_init_rx &&
7387+ loc < (me->module_init_rx + me->init_size_rx));
7388+}
7389+
7390+static inline int in_init_rw(struct module *me, void *loc)
7391+{
7392+ return (loc >= me->module_init_rw &&
7393+ loc < (me->module_init_rw + me->init_size_rw));
7394+}
7395+
7396 static inline int in_init(struct module *me, void *loc)
7397 {
7398- return (loc >= me->module_init &&
7399- loc <= (me->module_init + me->init_size));
7400+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7401+}
7402+
7403+static inline int in_core_rx(struct module *me, void *loc)
7404+{
7405+ return (loc >= me->module_core_rx &&
7406+ loc < (me->module_core_rx + me->core_size_rx));
7407+}
7408+
7409+static inline int in_core_rw(struct module *me, void *loc)
7410+{
7411+ return (loc >= me->module_core_rw &&
7412+ loc < (me->module_core_rw + me->core_size_rw));
7413 }
7414
7415 static inline int in_core(struct module *me, void *loc)
7416 {
7417- return (loc >= me->module_core &&
7418- loc <= (me->module_core + me->core_size));
7419+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7420 }
7421
7422 static inline int in_local(struct module *me, void *loc)
7423@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7424 }
7425
7426 /* align things a bit */
7427- me->core_size = ALIGN(me->core_size, 16);
7428- me->arch.got_offset = me->core_size;
7429- me->core_size += gots * sizeof(struct got_entry);
7430+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7431+ me->arch.got_offset = me->core_size_rw;
7432+ me->core_size_rw += gots * sizeof(struct got_entry);
7433
7434- me->core_size = ALIGN(me->core_size, 16);
7435- me->arch.fdesc_offset = me->core_size;
7436- me->core_size += fdescs * sizeof(Elf_Fdesc);
7437+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7438+ me->arch.fdesc_offset = me->core_size_rw;
7439+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7440
7441 me->arch.got_max = gots;
7442 me->arch.fdesc_max = fdescs;
7443@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7444
7445 BUG_ON(value == 0);
7446
7447- got = me->module_core + me->arch.got_offset;
7448+ got = me->module_core_rw + me->arch.got_offset;
7449 for (i = 0; got[i].addr; i++)
7450 if (got[i].addr == value)
7451 goto out;
7452@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7453 #ifdef CONFIG_64BIT
7454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7455 {
7456- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7457+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7458
7459 if (!value) {
7460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7461@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7462
7463 /* Create new one */
7464 fdesc->addr = value;
7465- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7466+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7467 return (Elf_Addr)fdesc;
7468 }
7469 #endif /* CONFIG_64BIT */
7470@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7471
7472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7473 end = table + sechdrs[me->arch.unwind_section].sh_size;
7474- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7475+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7476
7477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7478 me->arch.unwind_section, table, end, gp);
7479diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7480index e1ffea2..46ed66e 100644
7481--- a/arch/parisc/kernel/sys_parisc.c
7482+++ b/arch/parisc/kernel/sys_parisc.c
7483@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7484 unsigned long task_size = TASK_SIZE;
7485 int do_color_align, last_mmap;
7486 struct vm_unmapped_area_info info;
7487+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7488
7489 if (len > task_size)
7490 return -ENOMEM;
7491@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7492 goto found_addr;
7493 }
7494
7495+#ifdef CONFIG_PAX_RANDMMAP
7496+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7497+#endif
7498+
7499 if (addr) {
7500 if (do_color_align && last_mmap)
7501 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7502@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7503 info.high_limit = mmap_upper_limit();
7504 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7505 info.align_offset = shared_align_offset(last_mmap, pgoff);
7506+ info.threadstack_offset = offset;
7507 addr = vm_unmapped_area(&info);
7508
7509 found_addr:
7510@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7511 unsigned long addr = addr0;
7512 int do_color_align, last_mmap;
7513 struct vm_unmapped_area_info info;
7514+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7515
7516 #ifdef CONFIG_64BIT
7517 /* This should only ever run for 32-bit processes. */
7518@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7519 }
7520
7521 /* requesting a specific address */
7522+#ifdef CONFIG_PAX_RANDMMAP
7523+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7524+#endif
7525+
7526 if (addr) {
7527 if (do_color_align && last_mmap)
7528 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7529@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7530 info.high_limit = mm->mmap_base;
7531 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7532 info.align_offset = shared_align_offset(last_mmap, pgoff);
7533+ info.threadstack_offset = offset;
7534 addr = vm_unmapped_area(&info);
7535 if (!(addr & ~PAGE_MASK))
7536 goto found_addr;
7537@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7538 mm->mmap_legacy_base = mmap_legacy_base();
7539 mm->mmap_base = mmap_upper_limit();
7540
7541+#ifdef CONFIG_PAX_RANDMMAP
7542+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7543+ mm->mmap_legacy_base += mm->delta_mmap;
7544+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7545+ }
7546+#endif
7547+
7548 if (mmap_is_legacy()) {
7549 mm->mmap_base = mm->mmap_legacy_base;
7550 mm->get_unmapped_area = arch_get_unmapped_area;
7551diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7552index 47ee620..1107387 100644
7553--- a/arch/parisc/kernel/traps.c
7554+++ b/arch/parisc/kernel/traps.c
7555@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7556
7557 down_read(&current->mm->mmap_sem);
7558 vma = find_vma(current->mm,regs->iaoq[0]);
7559- if (vma && (regs->iaoq[0] >= vma->vm_start)
7560- && (vma->vm_flags & VM_EXEC)) {
7561-
7562+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7563 fault_address = regs->iaoq[0];
7564 fault_space = regs->iasq[0];
7565
7566diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7567index e5120e6..8ddb5cc 100644
7568--- a/arch/parisc/mm/fault.c
7569+++ b/arch/parisc/mm/fault.c
7570@@ -15,6 +15,7 @@
7571 #include <linux/sched.h>
7572 #include <linux/interrupt.h>
7573 #include <linux/module.h>
7574+#include <linux/unistd.h>
7575
7576 #include <asm/uaccess.h>
7577 #include <asm/traps.h>
7578@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7579 static unsigned long
7580 parisc_acctyp(unsigned long code, unsigned int inst)
7581 {
7582- if (code == 6 || code == 16)
7583+ if (code == 6 || code == 7 || code == 16)
7584 return VM_EXEC;
7585
7586 switch (inst & 0xf0000000) {
7587@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7588 }
7589 #endif
7590
7591+#ifdef CONFIG_PAX_PAGEEXEC
7592+/*
7593+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7594+ *
7595+ * returns 1 when task should be killed
7596+ * 2 when rt_sigreturn trampoline was detected
7597+ * 3 when unpatched PLT trampoline was detected
7598+ */
7599+static int pax_handle_fetch_fault(struct pt_regs *regs)
7600+{
7601+
7602+#ifdef CONFIG_PAX_EMUPLT
7603+ int err;
7604+
7605+ do { /* PaX: unpatched PLT emulation */
7606+ unsigned int bl, depwi;
7607+
7608+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7609+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7610+
7611+ if (err)
7612+ break;
7613+
7614+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7615+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7616+
7617+ err = get_user(ldw, (unsigned int *)addr);
7618+ err |= get_user(bv, (unsigned int *)(addr+4));
7619+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7620+
7621+ if (err)
7622+ break;
7623+
7624+ if (ldw == 0x0E801096U &&
7625+ bv == 0xEAC0C000U &&
7626+ ldw2 == 0x0E881095U)
7627+ {
7628+ unsigned int resolver, map;
7629+
7630+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7631+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7632+ if (err)
7633+ break;
7634+
7635+ regs->gr[20] = instruction_pointer(regs)+8;
7636+ regs->gr[21] = map;
7637+ regs->gr[22] = resolver;
7638+ regs->iaoq[0] = resolver | 3UL;
7639+ regs->iaoq[1] = regs->iaoq[0] + 4;
7640+ return 3;
7641+ }
7642+ }
7643+ } while (0);
7644+#endif
7645+
7646+#ifdef CONFIG_PAX_EMUTRAMP
7647+
7648+#ifndef CONFIG_PAX_EMUSIGRT
7649+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7650+ return 1;
7651+#endif
7652+
7653+ do { /* PaX: rt_sigreturn emulation */
7654+ unsigned int ldi1, ldi2, bel, nop;
7655+
7656+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7657+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7658+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7659+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7660+
7661+ if (err)
7662+ break;
7663+
7664+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7665+ ldi2 == 0x3414015AU &&
7666+ bel == 0xE4008200U &&
7667+ nop == 0x08000240U)
7668+ {
7669+ regs->gr[25] = (ldi1 & 2) >> 1;
7670+ regs->gr[20] = __NR_rt_sigreturn;
7671+ regs->gr[31] = regs->iaoq[1] + 16;
7672+ regs->sr[0] = regs->iasq[1];
7673+ regs->iaoq[0] = 0x100UL;
7674+ regs->iaoq[1] = regs->iaoq[0] + 4;
7675+ regs->iasq[0] = regs->sr[2];
7676+ regs->iasq[1] = regs->sr[2];
7677+ return 2;
7678+ }
7679+ } while (0);
7680+#endif
7681+
7682+ return 1;
7683+}
7684+
7685+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7686+{
7687+ unsigned long i;
7688+
7689+ printk(KERN_ERR "PAX: bytes at PC: ");
7690+ for (i = 0; i < 5; i++) {
7691+ unsigned int c;
7692+ if (get_user(c, (unsigned int *)pc+i))
7693+ printk(KERN_CONT "???????? ");
7694+ else
7695+ printk(KERN_CONT "%08x ", c);
7696+ }
7697+ printk("\n");
7698+}
7699+#endif
7700+
7701 int fixup_exception(struct pt_regs *regs)
7702 {
7703 const struct exception_table_entry *fix;
7704@@ -234,8 +345,33 @@ retry:
7705
7706 good_area:
7707
7708- if ((vma->vm_flags & acc_type) != acc_type)
7709+ if ((vma->vm_flags & acc_type) != acc_type) {
7710+
7711+#ifdef CONFIG_PAX_PAGEEXEC
7712+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7713+ (address & ~3UL) == instruction_pointer(regs))
7714+ {
7715+ up_read(&mm->mmap_sem);
7716+ switch (pax_handle_fetch_fault(regs)) {
7717+
7718+#ifdef CONFIG_PAX_EMUPLT
7719+ case 3:
7720+ return;
7721+#endif
7722+
7723+#ifdef CONFIG_PAX_EMUTRAMP
7724+ case 2:
7725+ return;
7726+#endif
7727+
7728+ }
7729+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7730+ do_group_exit(SIGKILL);
7731+ }
7732+#endif
7733+
7734 goto bad_area;
7735+ }
7736
7737 /*
7738 * If for any reason at all we couldn't handle the fault, make
7739diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7740index a2a168e..e484682 100644
7741--- a/arch/powerpc/Kconfig
7742+++ b/arch/powerpc/Kconfig
7743@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7744 config KEXEC
7745 bool "kexec system call"
7746 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7747+ depends on !GRKERNSEC_KMEM
7748 help
7749 kexec is a system call that implements the ability to shutdown your
7750 current kernel, and to start another kernel. It is like a reboot
7751diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7752index 512d278..d31fadd 100644
7753--- a/arch/powerpc/include/asm/atomic.h
7754+++ b/arch/powerpc/include/asm/atomic.h
7755@@ -12,6 +12,11 @@
7756
7757 #define ATOMIC_INIT(i) { (i) }
7758
7759+#define _ASM_EXTABLE(from, to) \
7760+" .section __ex_table,\"a\"\n" \
7761+ PPC_LONG" " #from ", " #to"\n" \
7762+" .previous\n"
7763+
7764 static __inline__ int atomic_read(const atomic_t *v)
7765 {
7766 int t;
7767@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7768 return t;
7769 }
7770
7771+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7772+{
7773+ int t;
7774+
7775+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7776+
7777+ return t;
7778+}
7779+
7780 static __inline__ void atomic_set(atomic_t *v, int i)
7781 {
7782 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7783 }
7784
7785-#define ATOMIC_OP(op, asm_op) \
7786-static __inline__ void atomic_##op(int a, atomic_t *v) \
7787+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7788+{
7789+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7790+}
7791+
7792+#ifdef CONFIG_PAX_REFCOUNT
7793+#define __REFCOUNT_OP(op) op##o.
7794+#define __OVERFLOW_PRE \
7795+ " mcrxr cr0\n"
7796+#define __OVERFLOW_POST \
7797+ " bf 4*cr0+so, 3f\n" \
7798+ "2: .long 0x00c00b00\n" \
7799+ "3:\n"
7800+#define __OVERFLOW_EXTABLE \
7801+ "\n4:\n"
7802+ _ASM_EXTABLE(2b, 4b)
7803+#else
7804+#define __REFCOUNT_OP(op) op
7805+#define __OVERFLOW_PRE
7806+#define __OVERFLOW_POST
7807+#define __OVERFLOW_EXTABLE
7808+#endif
7809+
7810+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7811+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7812 { \
7813 int t; \
7814 \
7815 __asm__ __volatile__( \
7816-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7817+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7818+ pre_op \
7819 #asm_op " %0,%2,%0\n" \
7820+ post_op \
7821 PPC405_ERR77(0,%3) \
7822 " stwcx. %0,0,%3 \n" \
7823 " bne- 1b\n" \
7824+ extable \
7825 : "=&r" (t), "+m" (v->counter) \
7826 : "r" (a), "r" (&v->counter) \
7827 : "cc"); \
7828 } \
7829
7830-#define ATOMIC_OP_RETURN(op, asm_op) \
7831-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7832+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7833+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7834+
7835+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7836+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7837 { \
7838 int t; \
7839 \
7840 __asm__ __volatile__( \
7841 PPC_ATOMIC_ENTRY_BARRIER \
7842-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7843+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7844+ pre_op \
7845 #asm_op " %0,%1,%0\n" \
7846+ post_op \
7847 PPC405_ERR77(0,%2) \
7848 " stwcx. %0,0,%2 \n" \
7849 " bne- 1b\n" \
7850+ extable \
7851 PPC_ATOMIC_EXIT_BARRIER \
7852 : "=&r" (t) \
7853 : "r" (a), "r" (&v->counter) \
7854@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7855 return t; \
7856 }
7857
7858+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7859+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7860+
7861 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7862
7863 ATOMIC_OPS(add, add)
7864@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7865
7866 #undef ATOMIC_OPS
7867 #undef ATOMIC_OP_RETURN
7868+#undef __ATOMIC_OP_RETURN
7869 #undef ATOMIC_OP
7870+#undef __ATOMIC_OP
7871
7872 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7873
7874-static __inline__ void atomic_inc(atomic_t *v)
7875-{
7876- int t;
7877+/*
7878+ * atomic_inc - increment atomic variable
7879+ * @v: pointer of type atomic_t
7880+ *
7881+ * Automatically increments @v by 1
7882+ */
7883+#define atomic_inc(v) atomic_add(1, (v))
7884+#define atomic_inc_return(v) atomic_add_return(1, (v))
7885
7886- __asm__ __volatile__(
7887-"1: lwarx %0,0,%2 # atomic_inc\n\
7888- addic %0,%0,1\n"
7889- PPC405_ERR77(0,%2)
7890-" stwcx. %0,0,%2 \n\
7891- bne- 1b"
7892- : "=&r" (t), "+m" (v->counter)
7893- : "r" (&v->counter)
7894- : "cc", "xer");
7895+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7896+{
7897+ atomic_add_unchecked(1, v);
7898 }
7899
7900-static __inline__ int atomic_inc_return(atomic_t *v)
7901+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7908- addic %0,%0,1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1 \n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ return atomic_add_return_unchecked(1, v);
7919 }
7920
7921 /*
7922@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7923 */
7924 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7925
7926-static __inline__ void atomic_dec(atomic_t *v)
7927+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7928 {
7929- int t;
7930-
7931- __asm__ __volatile__(
7932-"1: lwarx %0,0,%2 # atomic_dec\n\
7933- addic %0,%0,-1\n"
7934- PPC405_ERR77(0,%2)\
7935-" stwcx. %0,0,%2\n\
7936- bne- 1b"
7937- : "=&r" (t), "+m" (v->counter)
7938- : "r" (&v->counter)
7939- : "cc", "xer");
7940+ return atomic_add_return_unchecked(1, v) == 0;
7941 }
7942
7943-static __inline__ int atomic_dec_return(atomic_t *v)
7944+/*
7945+ * atomic_dec - decrement atomic variable
7946+ * @v: pointer of type atomic_t
7947+ *
7948+ * Atomically decrements @v by 1
7949+ */
7950+#define atomic_dec(v) atomic_sub(1, (v))
7951+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7952+
7953+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7954 {
7955- int t;
7956-
7957- __asm__ __volatile__(
7958- PPC_ATOMIC_ENTRY_BARRIER
7959-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7960- addic %0,%0,-1\n"
7961- PPC405_ERR77(0,%1)
7962-" stwcx. %0,0,%1\n\
7963- bne- 1b"
7964- PPC_ATOMIC_EXIT_BARRIER
7965- : "=&r" (t)
7966- : "r" (&v->counter)
7967- : "cc", "xer", "memory");
7968-
7969- return t;
7970+ atomic_sub_unchecked(1, v);
7971 }
7972
7973 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7974 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7975
7976+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7977+{
7978+ return cmpxchg(&(v->counter), old, new);
7979+}
7980+
7981+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7982+{
7983+ return xchg(&(v->counter), new);
7984+}
7985+
7986 /**
7987 * __atomic_add_unless - add unless the number is a given value
7988 * @v: pointer of type atomic_t
7989@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7990 PPC_ATOMIC_ENTRY_BARRIER
7991 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7992 cmpw 0,%0,%3 \n\
7993- beq- 2f \n\
7994- add %0,%2,%0 \n"
7995+ beq- 2f \n"
7996+
7997+#ifdef CONFIG_PAX_REFCOUNT
7998+" mcrxr cr0\n"
7999+" addo. %0,%2,%0\n"
8000+" bf 4*cr0+so, 4f\n"
8001+"3:.long " "0x00c00b00""\n"
8002+"4:\n"
8003+#else
8004+ "add %0,%2,%0 \n"
8005+#endif
8006+
8007 PPC405_ERR77(0,%2)
8008 " stwcx. %0,0,%1 \n\
8009 bne- 1b \n"
8010+"5:"
8011+
8012+#ifdef CONFIG_PAX_REFCOUNT
8013+ _ASM_EXTABLE(3b, 5b)
8014+#endif
8015+
8016 PPC_ATOMIC_EXIT_BARRIER
8017 " subf %0,%2,%0 \n\
8018 2:"
8019@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8020 }
8021 #define atomic_dec_if_positive atomic_dec_if_positive
8022
8023+#define smp_mb__before_atomic_dec() smp_mb()
8024+#define smp_mb__after_atomic_dec() smp_mb()
8025+#define smp_mb__before_atomic_inc() smp_mb()
8026+#define smp_mb__after_atomic_inc() smp_mb()
8027+
8028 #ifdef __powerpc64__
8029
8030 #define ATOMIC64_INIT(i) { (i) }
8031@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8032 return t;
8033 }
8034
8035+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8036+{
8037+ long t;
8038+
8039+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8040+
8041+ return t;
8042+}
8043+
8044 static __inline__ void atomic64_set(atomic64_t *v, long i)
8045 {
8046 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8047 }
8048
8049-#define ATOMIC64_OP(op, asm_op) \
8050-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
8051+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8052+{
8053+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8054+}
8055+
8056+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8057+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8058 { \
8059 long t; \
8060 \
8061 __asm__ __volatile__( \
8062 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8063+ pre_op \
8064 #asm_op " %0,%2,%0\n" \
8065+ post_op \
8066 " stdcx. %0,0,%3 \n" \
8067 " bne- 1b\n" \
8068+ extable \
8069 : "=&r" (t), "+m" (v->counter) \
8070 : "r" (a), "r" (&v->counter) \
8071 : "cc"); \
8072 }
8073
8074-#define ATOMIC64_OP_RETURN(op, asm_op) \
8075-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8076+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8077+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8078+
8079+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8080+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8081 { \
8082 long t; \
8083 \
8084 __asm__ __volatile__( \
8085 PPC_ATOMIC_ENTRY_BARRIER \
8086 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8087+ pre_op \
8088 #asm_op " %0,%1,%0\n" \
8089+ post_op \
8090 " stdcx. %0,0,%2 \n" \
8091 " bne- 1b\n" \
8092+ extable \
8093 PPC_ATOMIC_EXIT_BARRIER \
8094 : "=&r" (t) \
8095 : "r" (a), "r" (&v->counter) \
8096@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8097 return t; \
8098 }
8099
8100+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8101+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8102+
8103 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8104
8105 ATOMIC64_OPS(add, add)
8106@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8107
8108 #undef ATOMIC64_OPS
8109 #undef ATOMIC64_OP_RETURN
8110+#undef __ATOMIC64_OP_RETURN
8111 #undef ATOMIC64_OP
8112+#undef __ATOMIC64_OP
8113+#undef __OVERFLOW_EXTABLE
8114+#undef __OVERFLOW_POST
8115+#undef __OVERFLOW_PRE
8116+#undef __REFCOUNT_OP
8117
8118 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8119
8120-static __inline__ void atomic64_inc(atomic64_t *v)
8121-{
8122- long t;
8123+/*
8124+ * atomic64_inc - increment atomic variable
8125+ * @v: pointer of type atomic64_t
8126+ *
8127+ * Automatically increments @v by 1
8128+ */
8129+#define atomic64_inc(v) atomic64_add(1, (v))
8130+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8131
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_inc\n\
8134- addic %0,%0,1\n\
8135- stdcx. %0,0,%2 \n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8141+{
8142+ atomic64_add_unchecked(1, v);
8143 }
8144
8145-static __inline__ long atomic64_inc_return(atomic64_t *v)
8146+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8147 {
8148- long t;
8149-
8150- __asm__ __volatile__(
8151- PPC_ATOMIC_ENTRY_BARRIER
8152-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8153- addic %0,%0,1\n\
8154- stdcx. %0,0,%1 \n\
8155- bne- 1b"
8156- PPC_ATOMIC_EXIT_BARRIER
8157- : "=&r" (t)
8158- : "r" (&v->counter)
8159- : "cc", "xer", "memory");
8160-
8161- return t;
8162+ return atomic64_add_return_unchecked(1, v);
8163 }
8164
8165 /*
8166@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8167 */
8168 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8169
8170-static __inline__ void atomic64_dec(atomic64_t *v)
8171+/*
8172+ * atomic64_dec - decrement atomic variable
8173+ * @v: pointer of type atomic64_t
8174+ *
8175+ * Atomically decrements @v by 1
8176+ */
8177+#define atomic64_dec(v) atomic64_sub(1, (v))
8178+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8179+
8180+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8181 {
8182- long t;
8183-
8184- __asm__ __volatile__(
8185-"1: ldarx %0,0,%2 # atomic64_dec\n\
8186- addic %0,%0,-1\n\
8187- stdcx. %0,0,%2\n\
8188- bne- 1b"
8189- : "=&r" (t), "+m" (v->counter)
8190- : "r" (&v->counter)
8191- : "cc", "xer");
8192-}
8193-
8194-static __inline__ long atomic64_dec_return(atomic64_t *v)
8195-{
8196- long t;
8197-
8198- __asm__ __volatile__(
8199- PPC_ATOMIC_ENTRY_BARRIER
8200-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8201- addic %0,%0,-1\n\
8202- stdcx. %0,0,%1\n\
8203- bne- 1b"
8204- PPC_ATOMIC_EXIT_BARRIER
8205- : "=&r" (t)
8206- : "r" (&v->counter)
8207- : "cc", "xer", "memory");
8208-
8209- return t;
8210+ atomic64_sub_unchecked(1, v);
8211 }
8212
8213 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8214@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8215 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8216 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8217
8218+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8219+{
8220+ return cmpxchg(&(v->counter), old, new);
8221+}
8222+
8223+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8224+{
8225+ return xchg(&(v->counter), new);
8226+}
8227+
8228 /**
8229 * atomic64_add_unless - add unless the number is a given value
8230 * @v: pointer of type atomic64_t
8231@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8232
8233 __asm__ __volatile__ (
8234 PPC_ATOMIC_ENTRY_BARRIER
8235-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8236+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8237 cmpd 0,%0,%3 \n\
8238- beq- 2f \n\
8239- add %0,%2,%0 \n"
8240+ beq- 2f \n"
8241+
8242+#ifdef CONFIG_PAX_REFCOUNT
8243+" mcrxr cr0\n"
8244+" addo. %0,%2,%0\n"
8245+" bf 4*cr0+so, 4f\n"
8246+"3:.long " "0x00c00b00""\n"
8247+"4:\n"
8248+#else
8249+ "add %0,%2,%0 \n"
8250+#endif
8251+
8252 " stdcx. %0,0,%1 \n\
8253 bne- 1b \n"
8254 PPC_ATOMIC_EXIT_BARRIER
8255+"5:"
8256+
8257+#ifdef CONFIG_PAX_REFCOUNT
8258+ _ASM_EXTABLE(3b, 5b)
8259+#endif
8260+
8261 " subf %0,%2,%0 \n\
8262 2:"
8263 : "=&r" (t)
8264diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8265index a3bf5be..e03ba81 100644
8266--- a/arch/powerpc/include/asm/barrier.h
8267+++ b/arch/powerpc/include/asm/barrier.h
8268@@ -76,7 +76,7 @@
8269 do { \
8270 compiletime_assert_atomic_type(*p); \
8271 smp_lwsync(); \
8272- ACCESS_ONCE(*p) = (v); \
8273+ ACCESS_ONCE_RW(*p) = (v); \
8274 } while (0)
8275
8276 #define smp_load_acquire(p) \
8277diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8278index 34a05a1..a1f2c67 100644
8279--- a/arch/powerpc/include/asm/cache.h
8280+++ b/arch/powerpc/include/asm/cache.h
8281@@ -4,6 +4,7 @@
8282 #ifdef __KERNEL__
8283
8284 #include <asm/reg.h>
8285+#include <linux/const.h>
8286
8287 /* bytes per L1 cache line */
8288 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8289@@ -23,7 +24,7 @@
8290 #define L1_CACHE_SHIFT 7
8291 #endif
8292
8293-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8294+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8295
8296 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8297
8298diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8299index 57d289a..b36c98c 100644
8300--- a/arch/powerpc/include/asm/elf.h
8301+++ b/arch/powerpc/include/asm/elf.h
8302@@ -30,6 +30,18 @@
8303
8304 #define ELF_ET_DYN_BASE 0x20000000
8305
8306+#ifdef CONFIG_PAX_ASLR
8307+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8308+
8309+#ifdef __powerpc64__
8310+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8311+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8312+#else
8313+#define PAX_DELTA_MMAP_LEN 15
8314+#define PAX_DELTA_STACK_LEN 15
8315+#endif
8316+#endif
8317+
8318 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8319
8320 /*
8321@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8322 (0x7ff >> (PAGE_SHIFT - 12)) : \
8323 (0x3ffff >> (PAGE_SHIFT - 12)))
8324
8325-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8326-#define arch_randomize_brk arch_randomize_brk
8327-
8328-
8329 #ifdef CONFIG_SPU_BASE
8330 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8331 #define NT_SPU 1
8332diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8333index 8196e9c..d83a9f3 100644
8334--- a/arch/powerpc/include/asm/exec.h
8335+++ b/arch/powerpc/include/asm/exec.h
8336@@ -4,6 +4,6 @@
8337 #ifndef _ASM_POWERPC_EXEC_H
8338 #define _ASM_POWERPC_EXEC_H
8339
8340-extern unsigned long arch_align_stack(unsigned long sp);
8341+#define arch_align_stack(x) ((x) & ~0xfUL)
8342
8343 #endif /* _ASM_POWERPC_EXEC_H */
8344diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8345index 5acabbd..7ea14fa 100644
8346--- a/arch/powerpc/include/asm/kmap_types.h
8347+++ b/arch/powerpc/include/asm/kmap_types.h
8348@@ -10,7 +10,7 @@
8349 * 2 of the License, or (at your option) any later version.
8350 */
8351
8352-#define KM_TYPE_NR 16
8353+#define KM_TYPE_NR 17
8354
8355 #endif /* __KERNEL__ */
8356 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8357diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8358index b8da913..c02b593 100644
8359--- a/arch/powerpc/include/asm/local.h
8360+++ b/arch/powerpc/include/asm/local.h
8361@@ -9,21 +9,65 @@ typedef struct
8362 atomic_long_t a;
8363 } local_t;
8364
8365+typedef struct
8366+{
8367+ atomic_long_unchecked_t a;
8368+} local_unchecked_t;
8369+
8370 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8371
8372 #define local_read(l) atomic_long_read(&(l)->a)
8373+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8374 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8375+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8376
8377 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8378+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8379 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8380+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8381 #define local_inc(l) atomic_long_inc(&(l)->a)
8382+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8383 #define local_dec(l) atomic_long_dec(&(l)->a)
8384+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8385
8386 static __inline__ long local_add_return(long a, local_t *l)
8387 {
8388 long t;
8389
8390 __asm__ __volatile__(
8391+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8392+
8393+#ifdef CONFIG_PAX_REFCOUNT
8394+" mcrxr cr0\n"
8395+" addo. %0,%1,%0\n"
8396+" bf 4*cr0+so, 3f\n"
8397+"2:.long " "0x00c00b00""\n"
8398+#else
8399+" add %0,%1,%0\n"
8400+#endif
8401+
8402+"3:\n"
8403+ PPC405_ERR77(0,%2)
8404+ PPC_STLCX "%0,0,%2 \n\
8405+ bne- 1b"
8406+
8407+#ifdef CONFIG_PAX_REFCOUNT
8408+"\n4:\n"
8409+ _ASM_EXTABLE(2b, 4b)
8410+#endif
8411+
8412+ : "=&r" (t)
8413+ : "r" (a), "r" (&(l->a.counter))
8414+ : "cc", "memory");
8415+
8416+ return t;
8417+}
8418+
8419+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8420+{
8421+ long t;
8422+
8423+ __asm__ __volatile__(
8424 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8425 add %0,%1,%0\n"
8426 PPC405_ERR77(0,%2)
8427@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8428
8429 #define local_cmpxchg(l, o, n) \
8430 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8431+#define local_cmpxchg_unchecked(l, o, n) \
8432+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8433 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8434
8435 /**
8436diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8437index 8565c25..2865190 100644
8438--- a/arch/powerpc/include/asm/mman.h
8439+++ b/arch/powerpc/include/asm/mman.h
8440@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8441 }
8442 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8443
8444-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8445+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8446 {
8447 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8448 }
8449diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8450index 69c0598..2c56964 100644
8451--- a/arch/powerpc/include/asm/page.h
8452+++ b/arch/powerpc/include/asm/page.h
8453@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8454 * and needs to be executable. This means the whole heap ends
8455 * up being executable.
8456 */
8457-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8458- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8459+#define VM_DATA_DEFAULT_FLAGS32 \
8460+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8461+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8462
8463 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8464 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8465@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8466 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8467 #endif
8468
8469+#define ktla_ktva(addr) (addr)
8470+#define ktva_ktla(addr) (addr)
8471+
8472 #ifndef CONFIG_PPC_BOOK3S_64
8473 /*
8474 * Use the top bit of the higher-level page table entries to indicate whether
8475diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8476index d908a46..3753f71 100644
8477--- a/arch/powerpc/include/asm/page_64.h
8478+++ b/arch/powerpc/include/asm/page_64.h
8479@@ -172,15 +172,18 @@ do { \
8480 * stack by default, so in the absence of a PT_GNU_STACK program header
8481 * we turn execute permission off.
8482 */
8483-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8484- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8485+#define VM_STACK_DEFAULT_FLAGS32 \
8486+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8487+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8488
8489 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8490 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8491
8492+#ifndef CONFIG_PAX_PAGEEXEC
8493 #define VM_STACK_DEFAULT_FLAGS \
8494 (is_32bit_task() ? \
8495 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8496+#endif
8497
8498 #include <asm-generic/getorder.h>
8499
8500diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8501index 4b0be20..c15a27d 100644
8502--- a/arch/powerpc/include/asm/pgalloc-64.h
8503+++ b/arch/powerpc/include/asm/pgalloc-64.h
8504@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8505 #ifndef CONFIG_PPC_64K_PAGES
8506
8507 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8508+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8509
8510 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8511 {
8512@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8513 pud_set(pud, (unsigned long)pmd);
8514 }
8515
8516+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8517+{
8518+ pud_populate(mm, pud, pmd);
8519+}
8520+
8521 #define pmd_populate(mm, pmd, pte_page) \
8522 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8523 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8524@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8525 #endif
8526
8527 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8528+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8529
8530 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8531 pte_t *pte)
8532diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8533index a8805fe..6d69617 100644
8534--- a/arch/powerpc/include/asm/pgtable.h
8535+++ b/arch/powerpc/include/asm/pgtable.h
8536@@ -2,6 +2,7 @@
8537 #define _ASM_POWERPC_PGTABLE_H
8538 #ifdef __KERNEL__
8539
8540+#include <linux/const.h>
8541 #ifndef __ASSEMBLY__
8542 #include <linux/mmdebug.h>
8543 #include <linux/mmzone.h>
8544diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8545index 4aad413..85d86bf 100644
8546--- a/arch/powerpc/include/asm/pte-hash32.h
8547+++ b/arch/powerpc/include/asm/pte-hash32.h
8548@@ -21,6 +21,7 @@
8549 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8550 #define _PAGE_USER 0x004 /* usermode access allowed */
8551 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8552+#define _PAGE_EXEC _PAGE_GUARDED
8553 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8554 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8555 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8556diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8557index 1c874fb..e8480a4 100644
8558--- a/arch/powerpc/include/asm/reg.h
8559+++ b/arch/powerpc/include/asm/reg.h
8560@@ -253,6 +253,7 @@
8561 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8562 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8563 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8564+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8565 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8566 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8567 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8568diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8569index 5a6614a..d89995d1 100644
8570--- a/arch/powerpc/include/asm/smp.h
8571+++ b/arch/powerpc/include/asm/smp.h
8572@@ -51,7 +51,7 @@ struct smp_ops_t {
8573 int (*cpu_disable)(void);
8574 void (*cpu_die)(unsigned int nr);
8575 int (*cpu_bootable)(unsigned int nr);
8576-};
8577+} __no_const;
8578
8579 extern void smp_send_debugger_break(void);
8580 extern void start_secondary_resume(void);
8581diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8582index 4dbe072..b803275 100644
8583--- a/arch/powerpc/include/asm/spinlock.h
8584+++ b/arch/powerpc/include/asm/spinlock.h
8585@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8586 __asm__ __volatile__(
8587 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8588 __DO_SIGN_EXTEND
8589-" addic. %0,%0,1\n\
8590- ble- 2f\n"
8591+
8592+#ifdef CONFIG_PAX_REFCOUNT
8593+" mcrxr cr0\n"
8594+" addico. %0,%0,1\n"
8595+" bf 4*cr0+so, 3f\n"
8596+"2:.long " "0x00c00b00""\n"
8597+#else
8598+" addic. %0,%0,1\n"
8599+#endif
8600+
8601+"3:\n"
8602+ "ble- 4f\n"
8603 PPC405_ERR77(0,%1)
8604 " stwcx. %0,0,%1\n\
8605 bne- 1b\n"
8606 PPC_ACQUIRE_BARRIER
8607-"2:" : "=&r" (tmp)
8608+"4:"
8609+
8610+#ifdef CONFIG_PAX_REFCOUNT
8611+ _ASM_EXTABLE(2b,4b)
8612+#endif
8613+
8614+ : "=&r" (tmp)
8615 : "r" (&rw->lock)
8616 : "cr0", "xer", "memory");
8617
8618@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8619 __asm__ __volatile__(
8620 "# read_unlock\n\t"
8621 PPC_RELEASE_BARRIER
8622-"1: lwarx %0,0,%1\n\
8623- addic %0,%0,-1\n"
8624+"1: lwarx %0,0,%1\n"
8625+
8626+#ifdef CONFIG_PAX_REFCOUNT
8627+" mcrxr cr0\n"
8628+" addico. %0,%0,-1\n"
8629+" bf 4*cr0+so, 3f\n"
8630+"2:.long " "0x00c00b00""\n"
8631+#else
8632+" addic. %0,%0,-1\n"
8633+#endif
8634+
8635+"3:\n"
8636 PPC405_ERR77(0,%1)
8637 " stwcx. %0,0,%1\n\
8638 bne- 1b"
8639+
8640+#ifdef CONFIG_PAX_REFCOUNT
8641+"\n4:\n"
8642+ _ASM_EXTABLE(2b, 4b)
8643+#endif
8644+
8645 : "=&r"(tmp)
8646 : "r"(&rw->lock)
8647 : "cr0", "xer", "memory");
8648diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8649index 0be6c68..9c3c6ee 100644
8650--- a/arch/powerpc/include/asm/thread_info.h
8651+++ b/arch/powerpc/include/asm/thread_info.h
8652@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8653 #if defined(CONFIG_PPC64)
8654 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8655 #endif
8656+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8657+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8658
8659 /* as above, but as bit values */
8660 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8661@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8662 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8663 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8664 #define _TIF_NOHZ (1<<TIF_NOHZ)
8665+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8666 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8667 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8668- _TIF_NOHZ)
8669+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8670
8671 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8672 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8673diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8674index a0c071d..49cdc7f 100644
8675--- a/arch/powerpc/include/asm/uaccess.h
8676+++ b/arch/powerpc/include/asm/uaccess.h
8677@@ -58,6 +58,7 @@
8678
8679 #endif
8680
8681+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8682 #define access_ok(type, addr, size) \
8683 (__chk_user_ptr(addr), \
8684 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8685@@ -318,52 +319,6 @@ do { \
8686 extern unsigned long __copy_tofrom_user(void __user *to,
8687 const void __user *from, unsigned long size);
8688
8689-#ifndef __powerpc64__
8690-
8691-static inline unsigned long copy_from_user(void *to,
8692- const void __user *from, unsigned long n)
8693-{
8694- unsigned long over;
8695-
8696- if (access_ok(VERIFY_READ, from, n))
8697- return __copy_tofrom_user((__force void __user *)to, from, n);
8698- if ((unsigned long)from < TASK_SIZE) {
8699- over = (unsigned long)from + n - TASK_SIZE;
8700- return __copy_tofrom_user((__force void __user *)to, from,
8701- n - over) + over;
8702- }
8703- return n;
8704-}
8705-
8706-static inline unsigned long copy_to_user(void __user *to,
8707- const void *from, unsigned long n)
8708-{
8709- unsigned long over;
8710-
8711- if (access_ok(VERIFY_WRITE, to, n))
8712- return __copy_tofrom_user(to, (__force void __user *)from, n);
8713- if ((unsigned long)to < TASK_SIZE) {
8714- over = (unsigned long)to + n - TASK_SIZE;
8715- return __copy_tofrom_user(to, (__force void __user *)from,
8716- n - over) + over;
8717- }
8718- return n;
8719-}
8720-
8721-#else /* __powerpc64__ */
8722-
8723-#define __copy_in_user(to, from, size) \
8724- __copy_tofrom_user((to), (from), (size))
8725-
8726-extern unsigned long copy_from_user(void *to, const void __user *from,
8727- unsigned long n);
8728-extern unsigned long copy_to_user(void __user *to, const void *from,
8729- unsigned long n);
8730-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8731- unsigned long n);
8732-
8733-#endif /* __powerpc64__ */
8734-
8735 static inline unsigned long __copy_from_user_inatomic(void *to,
8736 const void __user *from, unsigned long n)
8737 {
8738@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8739 if (ret == 0)
8740 return 0;
8741 }
8742+
8743+ if (!__builtin_constant_p(n))
8744+ check_object_size(to, n, false);
8745+
8746 return __copy_tofrom_user((__force void __user *)to, from, n);
8747 }
8748
8749@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8750 if (ret == 0)
8751 return 0;
8752 }
8753+
8754+ if (!__builtin_constant_p(n))
8755+ check_object_size(from, n, true);
8756+
8757 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8758 }
8759
8760@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8761 return __copy_to_user_inatomic(to, from, size);
8762 }
8763
8764+#ifndef __powerpc64__
8765+
8766+static inline unsigned long __must_check copy_from_user(void *to,
8767+ const void __user *from, unsigned long n)
8768+{
8769+ unsigned long over;
8770+
8771+ if ((long)n < 0)
8772+ return n;
8773+
8774+ if (access_ok(VERIFY_READ, from, n)) {
8775+ if (!__builtin_constant_p(n))
8776+ check_object_size(to, n, false);
8777+ return __copy_tofrom_user((__force void __user *)to, from, n);
8778+ }
8779+ if ((unsigned long)from < TASK_SIZE) {
8780+ over = (unsigned long)from + n - TASK_SIZE;
8781+ if (!__builtin_constant_p(n - over))
8782+ check_object_size(to, n - over, false);
8783+ return __copy_tofrom_user((__force void __user *)to, from,
8784+ n - over) + over;
8785+ }
8786+ return n;
8787+}
8788+
8789+static inline unsigned long __must_check copy_to_user(void __user *to,
8790+ const void *from, unsigned long n)
8791+{
8792+ unsigned long over;
8793+
8794+ if ((long)n < 0)
8795+ return n;
8796+
8797+ if (access_ok(VERIFY_WRITE, to, n)) {
8798+ if (!__builtin_constant_p(n))
8799+ check_object_size(from, n, true);
8800+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8801+ }
8802+ if ((unsigned long)to < TASK_SIZE) {
8803+ over = (unsigned long)to + n - TASK_SIZE;
8804+ if (!__builtin_constant_p(n))
8805+ check_object_size(from, n - over, true);
8806+ return __copy_tofrom_user(to, (__force void __user *)from,
8807+ n - over) + over;
8808+ }
8809+ return n;
8810+}
8811+
8812+#else /* __powerpc64__ */
8813+
8814+#define __copy_in_user(to, from, size) \
8815+ __copy_tofrom_user((to), (from), (size))
8816+
8817+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8818+{
8819+ if ((long)n < 0 || n > INT_MAX)
8820+ return n;
8821+
8822+ if (!__builtin_constant_p(n))
8823+ check_object_size(to, n, false);
8824+
8825+ if (likely(access_ok(VERIFY_READ, from, n)))
8826+ n = __copy_from_user(to, from, n);
8827+ else
8828+ memset(to, 0, n);
8829+ return n;
8830+}
8831+
8832+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8833+{
8834+ if ((long)n < 0 || n > INT_MAX)
8835+ return n;
8836+
8837+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8838+ if (!__builtin_constant_p(n))
8839+ check_object_size(from, n, true);
8840+ n = __copy_to_user(to, from, n);
8841+ }
8842+ return n;
8843+}
8844+
8845+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8846+ unsigned long n);
8847+
8848+#endif /* __powerpc64__ */
8849+
8850 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8851
8852 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8853diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8854index 502cf69..53936a1 100644
8855--- a/arch/powerpc/kernel/Makefile
8856+++ b/arch/powerpc/kernel/Makefile
8857@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8858 CFLAGS_btext.o += -fPIC
8859 endif
8860
8861+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8862+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8863+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8864+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8865+
8866 ifdef CONFIG_FUNCTION_TRACER
8867 # Do not trace early boot code
8868 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8869@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8870 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8871 endif
8872
8873+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8874+
8875 obj-y := cputable.o ptrace.o syscalls.o \
8876 irq.o align.o signal_32.o pmc.o vdso.o \
8877 process.o systbl.o idle.o \
8878diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8879index 3e68d1c..72a5ee6 100644
8880--- a/arch/powerpc/kernel/exceptions-64e.S
8881+++ b/arch/powerpc/kernel/exceptions-64e.S
8882@@ -1010,6 +1010,7 @@ storage_fault_common:
8883 std r14,_DAR(r1)
8884 std r15,_DSISR(r1)
8885 addi r3,r1,STACK_FRAME_OVERHEAD
8886+ bl save_nvgprs
8887 mr r4,r14
8888 mr r5,r15
8889 ld r14,PACA_EXGEN+EX_R14(r13)
8890@@ -1018,8 +1019,7 @@ storage_fault_common:
8891 cmpdi r3,0
8892 bne- 1f
8893 b ret_from_except_lite
8894-1: bl save_nvgprs
8895- mr r5,r3
8896+1: mr r5,r3
8897 addi r3,r1,STACK_FRAME_OVERHEAD
8898 ld r4,_DAR(r1)
8899 bl bad_page_fault
8900diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8901index 9519e6b..13f6c38 100644
8902--- a/arch/powerpc/kernel/exceptions-64s.S
8903+++ b/arch/powerpc/kernel/exceptions-64s.S
8904@@ -1599,10 +1599,10 @@ handle_page_fault:
8905 11: ld r4,_DAR(r1)
8906 ld r5,_DSISR(r1)
8907 addi r3,r1,STACK_FRAME_OVERHEAD
8908+ bl save_nvgprs
8909 bl do_page_fault
8910 cmpdi r3,0
8911 beq+ 12f
8912- bl save_nvgprs
8913 mr r5,r3
8914 addi r3,r1,STACK_FRAME_OVERHEAD
8915 lwz r4,_DAR(r1)
8916diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8917index 4509603..cdb491f 100644
8918--- a/arch/powerpc/kernel/irq.c
8919+++ b/arch/powerpc/kernel/irq.c
8920@@ -460,6 +460,8 @@ void migrate_irqs(void)
8921 }
8922 #endif
8923
8924+extern void gr_handle_kernel_exploit(void);
8925+
8926 static inline void check_stack_overflow(void)
8927 {
8928 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8929@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8930 pr_err("do_IRQ: stack overflow: %ld\n",
8931 sp - sizeof(struct thread_info));
8932 dump_stack();
8933+ gr_handle_kernel_exploit();
8934 }
8935 #endif
8936 }
8937diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8938index c94d2e0..992a9ce 100644
8939--- a/arch/powerpc/kernel/module_32.c
8940+++ b/arch/powerpc/kernel/module_32.c
8941@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8942 me->arch.core_plt_section = i;
8943 }
8944 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8945- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8946+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8947 return -ENOEXEC;
8948 }
8949
8950@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8951
8952 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8953 /* Init, or core PLT? */
8954- if (location >= mod->module_core
8955- && location < mod->module_core + mod->core_size)
8956+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8957+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8958 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8959- else
8960+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8961+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8962 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8963+ else {
8964+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8965+ return ~0UL;
8966+ }
8967
8968 /* Find this entry, or if that fails, the next avail. entry */
8969 while (entry->jump[0]) {
8970@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8971 }
8972 #ifdef CONFIG_DYNAMIC_FTRACE
8973 module->arch.tramp =
8974- do_plt_call(module->module_core,
8975+ do_plt_call(module->module_core_rx,
8976 (unsigned long)ftrace_caller,
8977 sechdrs, module);
8978 #endif
8979diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8980index b4cc7be..1fe8bb3 100644
8981--- a/arch/powerpc/kernel/process.c
8982+++ b/arch/powerpc/kernel/process.c
8983@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8984 * Lookup NIP late so we have the best change of getting the
8985 * above info out without failing
8986 */
8987- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8988- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8989+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8990+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8991 #endif
8992 show_stack(current, (unsigned long *) regs->gpr[1]);
8993 if (!user_mode(regs))
8994@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8995 newsp = stack[0];
8996 ip = stack[STACK_FRAME_LR_SAVE];
8997 if (!firstframe || ip != lr) {
8998- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8999+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9000 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9001 if ((ip == rth) && curr_frame >= 0) {
9002- printk(" (%pS)",
9003+ printk(" (%pA)",
9004 (void *)current->ret_stack[curr_frame].ret);
9005 curr_frame--;
9006 }
9007@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9008 struct pt_regs *regs = (struct pt_regs *)
9009 (sp + STACK_FRAME_OVERHEAD);
9010 lr = regs->link;
9011- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9012+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9013 regs->trap, (void *)regs->nip, (void *)lr);
9014 firstframe = 1;
9015 }
9016@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
9017 mtspr(SPRN_CTRLT, ctrl);
9018 }
9019 #endif /* CONFIG_PPC64 */
9020-
9021-unsigned long arch_align_stack(unsigned long sp)
9022-{
9023- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9024- sp -= get_random_int() & ~PAGE_MASK;
9025- return sp & ~0xf;
9026-}
9027-
9028-static inline unsigned long brk_rnd(void)
9029-{
9030- unsigned long rnd = 0;
9031-
9032- /* 8MB for 32bit, 1GB for 64bit */
9033- if (is_32bit_task())
9034- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9035- else
9036- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9037-
9038- return rnd << PAGE_SHIFT;
9039-}
9040-
9041-unsigned long arch_randomize_brk(struct mm_struct *mm)
9042-{
9043- unsigned long base = mm->brk;
9044- unsigned long ret;
9045-
9046-#ifdef CONFIG_PPC_STD_MMU_64
9047- /*
9048- * If we are using 1TB segments and we are allowed to randomise
9049- * the heap, we can put it above 1TB so it is backed by a 1TB
9050- * segment. Otherwise the heap will be in the bottom 1TB
9051- * which always uses 256MB segments and this may result in a
9052- * performance penalty.
9053- */
9054- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9055- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9056-#endif
9057-
9058- ret = PAGE_ALIGN(base + brk_rnd());
9059-
9060- if (ret < mm->brk)
9061- return mm->brk;
9062-
9063- return ret;
9064-}
9065-
9066diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9067index f21897b..28c0428 100644
9068--- a/arch/powerpc/kernel/ptrace.c
9069+++ b/arch/powerpc/kernel/ptrace.c
9070@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9071 return ret;
9072 }
9073
9074+#ifdef CONFIG_GRKERNSEC_SETXID
9075+extern void gr_delayed_cred_worker(void);
9076+#endif
9077+
9078 /*
9079 * We must return the syscall number to actually look up in the table.
9080 * This can be -1L to skip running any syscall at all.
9081@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9082
9083 secure_computing_strict(regs->gpr[0]);
9084
9085+#ifdef CONFIG_GRKERNSEC_SETXID
9086+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9087+ gr_delayed_cred_worker();
9088+#endif
9089+
9090 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9091 tracehook_report_syscall_entry(regs))
9092 /*
9093@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9094 {
9095 int step;
9096
9097+#ifdef CONFIG_GRKERNSEC_SETXID
9098+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9099+ gr_delayed_cred_worker();
9100+#endif
9101+
9102 audit_syscall_exit(regs);
9103
9104 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9105diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9106index b171001..4ac7ac5 100644
9107--- a/arch/powerpc/kernel/signal_32.c
9108+++ b/arch/powerpc/kernel/signal_32.c
9109@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9110 /* Save user registers on the stack */
9111 frame = &rt_sf->uc.uc_mcontext;
9112 addr = frame;
9113- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9114+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9115 sigret = 0;
9116 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9117 } else {
9118diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9119index 2cb0c94..c0c0bc9 100644
9120--- a/arch/powerpc/kernel/signal_64.c
9121+++ b/arch/powerpc/kernel/signal_64.c
9122@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9123 current->thread.fp_state.fpscr = 0;
9124
9125 /* Set up to return from userspace. */
9126- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9127+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9128 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9129 } else {
9130 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9131diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9132index e6595b7..24bde6e 100644
9133--- a/arch/powerpc/kernel/traps.c
9134+++ b/arch/powerpc/kernel/traps.c
9135@@ -36,6 +36,7 @@
9136 #include <linux/debugfs.h>
9137 #include <linux/ratelimit.h>
9138 #include <linux/context_tracking.h>
9139+#include <linux/uaccess.h>
9140
9141 #include <asm/emulated_ops.h>
9142 #include <asm/pgtable.h>
9143@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9144 return flags;
9145 }
9146
9147+extern void gr_handle_kernel_exploit(void);
9148+
9149 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9150 int signr)
9151 {
9152@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9153 panic("Fatal exception in interrupt");
9154 if (panic_on_oops)
9155 panic("Fatal exception");
9156+
9157+ gr_handle_kernel_exploit();
9158+
9159 do_exit(signr);
9160 }
9161
9162@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9163 enum ctx_state prev_state = exception_enter();
9164 unsigned int reason = get_reason(regs);
9165
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+ unsigned int bkpt;
9168+ const struct exception_table_entry *entry;
9169+
9170+ if (reason & REASON_ILLEGAL) {
9171+ /* Check if PaX bad instruction */
9172+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9173+ current->thread.trap_nr = 0;
9174+ pax_report_refcount_overflow(regs);
9175+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9176+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9177+ regs->nip = entry->fixup;
9178+ return;
9179+ }
9180+ /* fixup_exception() could not handle */
9181+ goto bail;
9182+ }
9183+ }
9184+#endif
9185+
9186 /* We can now get here via a FP Unavailable exception if the core
9187 * has no FPU, in that case the reason flags will be 0 */
9188
9189diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9190index 305eb0d..accc5b40 100644
9191--- a/arch/powerpc/kernel/vdso.c
9192+++ b/arch/powerpc/kernel/vdso.c
9193@@ -34,6 +34,7 @@
9194 #include <asm/vdso.h>
9195 #include <asm/vdso_datapage.h>
9196 #include <asm/setup.h>
9197+#include <asm/mman.h>
9198
9199 #undef DEBUG
9200
9201@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9202 vdso_base = VDSO32_MBASE;
9203 #endif
9204
9205- current->mm->context.vdso_base = 0;
9206+ current->mm->context.vdso_base = ~0UL;
9207
9208 /* vDSO has a problem and was disabled, just don't "enable" it for the
9209 * process
9210@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9211 vdso_base = get_unmapped_area(NULL, vdso_base,
9212 (vdso_pages << PAGE_SHIFT) +
9213 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9214- 0, 0);
9215+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9216 if (IS_ERR_VALUE(vdso_base)) {
9217 rc = vdso_base;
9218 goto fail_mmapsem;
9219diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9220index c45eaab..5f41b57 100644
9221--- a/arch/powerpc/kvm/powerpc.c
9222+++ b/arch/powerpc/kvm/powerpc.c
9223@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9224 }
9225 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9226
9227-int kvm_arch_init(void *opaque)
9228+int kvm_arch_init(const void *opaque)
9229 {
9230 return 0;
9231 }
9232diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9233index 5eea6f3..5d10396 100644
9234--- a/arch/powerpc/lib/usercopy_64.c
9235+++ b/arch/powerpc/lib/usercopy_64.c
9236@@ -9,22 +9,6 @@
9237 #include <linux/module.h>
9238 #include <asm/uaccess.h>
9239
9240-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9241-{
9242- if (likely(access_ok(VERIFY_READ, from, n)))
9243- n = __copy_from_user(to, from, n);
9244- else
9245- memset(to, 0, n);
9246- return n;
9247-}
9248-
9249-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9250-{
9251- if (likely(access_ok(VERIFY_WRITE, to, n)))
9252- n = __copy_to_user(to, from, n);
9253- return n;
9254-}
9255-
9256 unsigned long copy_in_user(void __user *to, const void __user *from,
9257 unsigned long n)
9258 {
9259@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9260 return n;
9261 }
9262
9263-EXPORT_SYMBOL(copy_from_user);
9264-EXPORT_SYMBOL(copy_to_user);
9265 EXPORT_SYMBOL(copy_in_user);
9266
9267diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9268index 6154b0a..4de2b19 100644
9269--- a/arch/powerpc/mm/fault.c
9270+++ b/arch/powerpc/mm/fault.c
9271@@ -33,6 +33,10 @@
9272 #include <linux/ratelimit.h>
9273 #include <linux/context_tracking.h>
9274 #include <linux/hugetlb.h>
9275+#include <linux/slab.h>
9276+#include <linux/pagemap.h>
9277+#include <linux/compiler.h>
9278+#include <linux/unistd.h>
9279
9280 #include <asm/firmware.h>
9281 #include <asm/page.h>
9282@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9283 }
9284 #endif
9285
9286+#ifdef CONFIG_PAX_PAGEEXEC
9287+/*
9288+ * PaX: decide what to do with offenders (regs->nip = fault address)
9289+ *
9290+ * returns 1 when task should be killed
9291+ */
9292+static int pax_handle_fetch_fault(struct pt_regs *regs)
9293+{
9294+ return 1;
9295+}
9296+
9297+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9298+{
9299+ unsigned long i;
9300+
9301+ printk(KERN_ERR "PAX: bytes at PC: ");
9302+ for (i = 0; i < 5; i++) {
9303+ unsigned int c;
9304+ if (get_user(c, (unsigned int __user *)pc+i))
9305+ printk(KERN_CONT "???????? ");
9306+ else
9307+ printk(KERN_CONT "%08x ", c);
9308+ }
9309+ printk("\n");
9310+}
9311+#endif
9312+
9313 /*
9314 * Check whether the instruction at regs->nip is a store using
9315 * an update addressing form which will update r1.
9316@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9317 * indicate errors in DSISR but can validly be set in SRR1.
9318 */
9319 if (trap == 0x400)
9320- error_code &= 0x48200000;
9321+ error_code &= 0x58200000;
9322 else
9323 is_write = error_code & DSISR_ISSTORE;
9324 #else
9325@@ -383,7 +414,7 @@ good_area:
9326 * "undefined". Of those that can be set, this is the only
9327 * one which seems bad.
9328 */
9329- if (error_code & 0x10000000)
9330+ if (error_code & DSISR_GUARDED)
9331 /* Guarded storage error. */
9332 goto bad_area;
9333 #endif /* CONFIG_8xx */
9334@@ -398,7 +429,7 @@ good_area:
9335 * processors use the same I/D cache coherency mechanism
9336 * as embedded.
9337 */
9338- if (error_code & DSISR_PROTFAULT)
9339+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9340 goto bad_area;
9341 #endif /* CONFIG_PPC_STD_MMU */
9342
9343@@ -490,6 +521,23 @@ bad_area:
9344 bad_area_nosemaphore:
9345 /* User mode accesses cause a SIGSEGV */
9346 if (user_mode(regs)) {
9347+
9348+#ifdef CONFIG_PAX_PAGEEXEC
9349+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9350+#ifdef CONFIG_PPC_STD_MMU
9351+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9352+#else
9353+ if (is_exec && regs->nip == address) {
9354+#endif
9355+ switch (pax_handle_fetch_fault(regs)) {
9356+ }
9357+
9358+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9359+ do_group_exit(SIGKILL);
9360+ }
9361+ }
9362+#endif
9363+
9364 _exception(SIGSEGV, regs, code, address);
9365 goto bail;
9366 }
9367diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9368index cb8bdbe..cde4bc7 100644
9369--- a/arch/powerpc/mm/mmap.c
9370+++ b/arch/powerpc/mm/mmap.c
9371@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9372 return sysctl_legacy_va_layout;
9373 }
9374
9375-static unsigned long mmap_rnd(void)
9376+static unsigned long mmap_rnd(struct mm_struct *mm)
9377 {
9378 unsigned long rnd = 0;
9379
9380+#ifdef CONFIG_PAX_RANDMMAP
9381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9382+#endif
9383+
9384 if (current->flags & PF_RANDOMIZE) {
9385 /* 8MB for 32bit, 1GB for 64bit */
9386 if (is_32bit_task())
9387@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9388 return rnd << PAGE_SHIFT;
9389 }
9390
9391-static inline unsigned long mmap_base(void)
9392+static inline unsigned long mmap_base(struct mm_struct *mm)
9393 {
9394 unsigned long gap = rlimit(RLIMIT_STACK);
9395
9396@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9397 else if (gap > MAX_GAP)
9398 gap = MAX_GAP;
9399
9400- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9401+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9402 }
9403
9404 /*
9405@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9406 */
9407 if (mmap_is_legacy()) {
9408 mm->mmap_base = TASK_UNMAPPED_BASE;
9409+
9410+#ifdef CONFIG_PAX_RANDMMAP
9411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9412+ mm->mmap_base += mm->delta_mmap;
9413+#endif
9414+
9415 mm->get_unmapped_area = arch_get_unmapped_area;
9416 } else {
9417- mm->mmap_base = mmap_base();
9418+ mm->mmap_base = mmap_base(mm);
9419+
9420+#ifdef CONFIG_PAX_RANDMMAP
9421+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9422+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9423+#endif
9424+
9425 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9426 }
9427 }
9428diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9429index ded0ea1..f213a9b 100644
9430--- a/arch/powerpc/mm/slice.c
9431+++ b/arch/powerpc/mm/slice.c
9432@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9433 if ((mm->task_size - len) < addr)
9434 return 0;
9435 vma = find_vma(mm, addr);
9436- return (!vma || (addr + len) <= vma->vm_start);
9437+ return check_heap_stack_gap(vma, addr, len, 0);
9438 }
9439
9440 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9441@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9442 info.align_offset = 0;
9443
9444 addr = TASK_UNMAPPED_BASE;
9445+
9446+#ifdef CONFIG_PAX_RANDMMAP
9447+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9448+ addr += mm->delta_mmap;
9449+#endif
9450+
9451 while (addr < TASK_SIZE) {
9452 info.low_limit = addr;
9453 if (!slice_scan_available(addr, available, 1, &addr))
9454@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9455 if (fixed && addr > (mm->task_size - len))
9456 return -ENOMEM;
9457
9458+#ifdef CONFIG_PAX_RANDMMAP
9459+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9460+ addr = 0;
9461+#endif
9462+
9463 /* If hint, make sure it matches our alignment restrictions */
9464 if (!fixed && addr) {
9465 addr = _ALIGN_UP(addr, 1ul << pshift);
9466diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9467index f223875..94170e4 100644
9468--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9469+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9470@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9471 }
9472
9473 static struct pci_ops scc_pciex_pci_ops = {
9474- scc_pciex_read_config,
9475- scc_pciex_write_config,
9476+ .read = scc_pciex_read_config,
9477+ .write = scc_pciex_write_config,
9478 };
9479
9480 static void pciex_clear_intr_all(unsigned int __iomem *base)
9481diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9482index d966bbe..372124a 100644
9483--- a/arch/powerpc/platforms/cell/spufs/file.c
9484+++ b/arch/powerpc/platforms/cell/spufs/file.c
9485@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9486 return VM_FAULT_NOPAGE;
9487 }
9488
9489-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9490+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9491 unsigned long address,
9492- void *buf, int len, int write)
9493+ void *buf, size_t len, int write)
9494 {
9495 struct spu_context *ctx = vma->vm_file->private_data;
9496 unsigned long offset = address - vma->vm_start;
9497diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9498index fa934fe..c296056 100644
9499--- a/arch/s390/include/asm/atomic.h
9500+++ b/arch/s390/include/asm/atomic.h
9501@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9502 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9503 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9504
9505+#define atomic64_read_unchecked(v) atomic64_read(v)
9506+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9507+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9508+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9509+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9510+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9511+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9512+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9513+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9514+
9515 #endif /* __ARCH_S390_ATOMIC__ */
9516diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9517index 8d72471..5322500 100644
9518--- a/arch/s390/include/asm/barrier.h
9519+++ b/arch/s390/include/asm/barrier.h
9520@@ -42,7 +42,7 @@
9521 do { \
9522 compiletime_assert_atomic_type(*p); \
9523 barrier(); \
9524- ACCESS_ONCE(*p) = (v); \
9525+ ACCESS_ONCE_RW(*p) = (v); \
9526 } while (0)
9527
9528 #define smp_load_acquire(p) \
9529diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9530index 4d7ccac..d03d0ad 100644
9531--- a/arch/s390/include/asm/cache.h
9532+++ b/arch/s390/include/asm/cache.h
9533@@ -9,8 +9,10 @@
9534 #ifndef __ARCH_S390_CACHE_H
9535 #define __ARCH_S390_CACHE_H
9536
9537-#define L1_CACHE_BYTES 256
9538+#include <linux/const.h>
9539+
9540 #define L1_CACHE_SHIFT 8
9541+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9542 #define NET_SKB_PAD 32
9543
9544 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9545diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9546index f6e43d3..5f57681 100644
9547--- a/arch/s390/include/asm/elf.h
9548+++ b/arch/s390/include/asm/elf.h
9549@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9550 the loader. We need to make sure that it is out of the way of the program
9551 that it will "exec", and that there is sufficient room for the brk. */
9552
9553-extern unsigned long randomize_et_dyn(unsigned long base);
9554-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9555+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9556+
9557+#ifdef CONFIG_PAX_ASLR
9558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9559+
9560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9562+#endif
9563
9564 /* This yields a mask that user programs can use to figure out what
9565 instruction set this CPU supports. */
9566@@ -223,9 +229,6 @@ struct linux_binprm;
9567 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9568 int arch_setup_additional_pages(struct linux_binprm *, int);
9569
9570-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9571-#define arch_randomize_brk arch_randomize_brk
9572-
9573 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9574
9575 #endif
9576diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9577index c4a93d6..4d2a9b4 100644
9578--- a/arch/s390/include/asm/exec.h
9579+++ b/arch/s390/include/asm/exec.h
9580@@ -7,6 +7,6 @@
9581 #ifndef __ASM_EXEC_H
9582 #define __ASM_EXEC_H
9583
9584-extern unsigned long arch_align_stack(unsigned long sp);
9585+#define arch_align_stack(x) ((x) & ~0xfUL)
9586
9587 #endif /* __ASM_EXEC_H */
9588diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9589index cd4c68e..6764641 100644
9590--- a/arch/s390/include/asm/uaccess.h
9591+++ b/arch/s390/include/asm/uaccess.h
9592@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9593 __range_ok((unsigned long)(addr), (size)); \
9594 })
9595
9596+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9597 #define access_ok(type, addr, size) __access_ok(addr, size)
9598
9599 /*
9600@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9601 copy_to_user(void __user *to, const void *from, unsigned long n)
9602 {
9603 might_fault();
9604+
9605+ if ((long)n < 0)
9606+ return n;
9607+
9608 return __copy_to_user(to, from, n);
9609 }
9610
9611@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9612 static inline unsigned long __must_check
9613 copy_from_user(void *to, const void __user *from, unsigned long n)
9614 {
9615- unsigned int sz = __compiletime_object_size(to);
9616+ size_t sz = __compiletime_object_size(to);
9617
9618 might_fault();
9619- if (unlikely(sz != -1 && sz < n)) {
9620+
9621+ if ((long)n < 0)
9622+ return n;
9623+
9624+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9625 copy_from_user_overflow();
9626 return n;
9627 }
9628diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9629index 409d152..d90d368 100644
9630--- a/arch/s390/kernel/module.c
9631+++ b/arch/s390/kernel/module.c
9632@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9633
9634 /* Increase core size by size of got & plt and set start
9635 offsets for got and plt. */
9636- me->core_size = ALIGN(me->core_size, 4);
9637- me->arch.got_offset = me->core_size;
9638- me->core_size += me->arch.got_size;
9639- me->arch.plt_offset = me->core_size;
9640- me->core_size += me->arch.plt_size;
9641+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9642+ me->arch.got_offset = me->core_size_rw;
9643+ me->core_size_rw += me->arch.got_size;
9644+ me->arch.plt_offset = me->core_size_rx;
9645+ me->core_size_rx += me->arch.plt_size;
9646 return 0;
9647 }
9648
9649@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9650 if (info->got_initialized == 0) {
9651 Elf_Addr *gotent;
9652
9653- gotent = me->module_core + me->arch.got_offset +
9654+ gotent = me->module_core_rw + me->arch.got_offset +
9655 info->got_offset;
9656 *gotent = val;
9657 info->got_initialized = 1;
9658@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9659 rc = apply_rela_bits(loc, val, 0, 64, 0);
9660 else if (r_type == R_390_GOTENT ||
9661 r_type == R_390_GOTPLTENT) {
9662- val += (Elf_Addr) me->module_core - loc;
9663+ val += (Elf_Addr) me->module_core_rw - loc;
9664 rc = apply_rela_bits(loc, val, 1, 32, 1);
9665 }
9666 break;
9667@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9668 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9669 if (info->plt_initialized == 0) {
9670 unsigned int *ip;
9671- ip = me->module_core + me->arch.plt_offset +
9672+ ip = me->module_core_rx + me->arch.plt_offset +
9673 info->plt_offset;
9674 #ifndef CONFIG_64BIT
9675 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9676@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9677 val - loc + 0xffffUL < 0x1ffffeUL) ||
9678 (r_type == R_390_PLT32DBL &&
9679 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9680- val = (Elf_Addr) me->module_core +
9681+ val = (Elf_Addr) me->module_core_rx +
9682 me->arch.plt_offset +
9683 info->plt_offset;
9684 val += rela->r_addend - loc;
9685@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9686 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9687 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9688 val = val + rela->r_addend -
9689- ((Elf_Addr) me->module_core + me->arch.got_offset);
9690+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9691 if (r_type == R_390_GOTOFF16)
9692 rc = apply_rela_bits(loc, val, 0, 16, 0);
9693 else if (r_type == R_390_GOTOFF32)
9694@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9695 break;
9696 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9697 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9698- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9699+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9700 rela->r_addend - loc;
9701 if (r_type == R_390_GOTPC)
9702 rc = apply_rela_bits(loc, val, 1, 32, 0);
9703diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9704index aa7a839..6c2a916 100644
9705--- a/arch/s390/kernel/process.c
9706+++ b/arch/s390/kernel/process.c
9707@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9708 }
9709 return 0;
9710 }
9711-
9712-unsigned long arch_align_stack(unsigned long sp)
9713-{
9714- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9715- sp -= get_random_int() & ~PAGE_MASK;
9716- return sp & ~0xf;
9717-}
9718-
9719-static inline unsigned long brk_rnd(void)
9720-{
9721- /* 8MB for 32bit, 1GB for 64bit */
9722- if (is_32bit_task())
9723- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9724- else
9725- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9726-}
9727-
9728-unsigned long arch_randomize_brk(struct mm_struct *mm)
9729-{
9730- unsigned long ret;
9731-
9732- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9733- return (ret > mm->brk) ? ret : mm->brk;
9734-}
9735-
9736-unsigned long randomize_et_dyn(unsigned long base)
9737-{
9738- unsigned long ret;
9739-
9740- if (!(current->flags & PF_RANDOMIZE))
9741- return base;
9742- ret = PAGE_ALIGN(base + brk_rnd());
9743- return (ret > base) ? ret : base;
9744-}
9745diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9746index 9b436c2..54fbf0a 100644
9747--- a/arch/s390/mm/mmap.c
9748+++ b/arch/s390/mm/mmap.c
9749@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9750 */
9751 if (mmap_is_legacy()) {
9752 mm->mmap_base = mmap_base_legacy();
9753+
9754+#ifdef CONFIG_PAX_RANDMMAP
9755+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9756+ mm->mmap_base += mm->delta_mmap;
9757+#endif
9758+
9759 mm->get_unmapped_area = arch_get_unmapped_area;
9760 } else {
9761 mm->mmap_base = mmap_base();
9762+
9763+#ifdef CONFIG_PAX_RANDMMAP
9764+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9765+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9766+#endif
9767+
9768 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9769 }
9770 }
9771@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9772 */
9773 if (mmap_is_legacy()) {
9774 mm->mmap_base = mmap_base_legacy();
9775+
9776+#ifdef CONFIG_PAX_RANDMMAP
9777+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9778+ mm->mmap_base += mm->delta_mmap;
9779+#endif
9780+
9781 mm->get_unmapped_area = s390_get_unmapped_area;
9782 } else {
9783 mm->mmap_base = mmap_base();
9784+
9785+#ifdef CONFIG_PAX_RANDMMAP
9786+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9787+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9788+#endif
9789+
9790 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9791 }
9792 }
9793diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9794index ae3d59f..f65f075 100644
9795--- a/arch/score/include/asm/cache.h
9796+++ b/arch/score/include/asm/cache.h
9797@@ -1,7 +1,9 @@
9798 #ifndef _ASM_SCORE_CACHE_H
9799 #define _ASM_SCORE_CACHE_H
9800
9801+#include <linux/const.h>
9802+
9803 #define L1_CACHE_SHIFT 4
9804-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9805+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9806
9807 #endif /* _ASM_SCORE_CACHE_H */
9808diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9809index f9f3cd5..58ff438 100644
9810--- a/arch/score/include/asm/exec.h
9811+++ b/arch/score/include/asm/exec.h
9812@@ -1,6 +1,6 @@
9813 #ifndef _ASM_SCORE_EXEC_H
9814 #define _ASM_SCORE_EXEC_H
9815
9816-extern unsigned long arch_align_stack(unsigned long sp);
9817+#define arch_align_stack(x) (x)
9818
9819 #endif /* _ASM_SCORE_EXEC_H */
9820diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9821index a1519ad3..e8ac1ff 100644
9822--- a/arch/score/kernel/process.c
9823+++ b/arch/score/kernel/process.c
9824@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9825
9826 return task_pt_regs(task)->cp0_epc;
9827 }
9828-
9829-unsigned long arch_align_stack(unsigned long sp)
9830-{
9831- return sp;
9832-}
9833diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9834index ef9e555..331bd29 100644
9835--- a/arch/sh/include/asm/cache.h
9836+++ b/arch/sh/include/asm/cache.h
9837@@ -9,10 +9,11 @@
9838 #define __ASM_SH_CACHE_H
9839 #ifdef __KERNEL__
9840
9841+#include <linux/const.h>
9842 #include <linux/init.h>
9843 #include <cpu/cache.h>
9844
9845-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9846+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9847
9848 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9849
9850diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9851index 6777177..cb5e44f 100644
9852--- a/arch/sh/mm/mmap.c
9853+++ b/arch/sh/mm/mmap.c
9854@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9855 struct mm_struct *mm = current->mm;
9856 struct vm_area_struct *vma;
9857 int do_colour_align;
9858+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9859 struct vm_unmapped_area_info info;
9860
9861 if (flags & MAP_FIXED) {
9862@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9863 if (filp || (flags & MAP_SHARED))
9864 do_colour_align = 1;
9865
9866+#ifdef CONFIG_PAX_RANDMMAP
9867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9868+#endif
9869+
9870 if (addr) {
9871 if (do_colour_align)
9872 addr = COLOUR_ALIGN(addr, pgoff);
9873@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9874 addr = PAGE_ALIGN(addr);
9875
9876 vma = find_vma(mm, addr);
9877- if (TASK_SIZE - len >= addr &&
9878- (!vma || addr + len <= vma->vm_start))
9879+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9880 return addr;
9881 }
9882
9883 info.flags = 0;
9884 info.length = len;
9885- info.low_limit = TASK_UNMAPPED_BASE;
9886+ info.low_limit = mm->mmap_base;
9887 info.high_limit = TASK_SIZE;
9888 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9889 info.align_offset = pgoff << PAGE_SHIFT;
9890@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9891 struct mm_struct *mm = current->mm;
9892 unsigned long addr = addr0;
9893 int do_colour_align;
9894+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9895 struct vm_unmapped_area_info info;
9896
9897 if (flags & MAP_FIXED) {
9898@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9899 if (filp || (flags & MAP_SHARED))
9900 do_colour_align = 1;
9901
9902+#ifdef CONFIG_PAX_RANDMMAP
9903+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9904+#endif
9905+
9906 /* requesting a specific address */
9907 if (addr) {
9908 if (do_colour_align)
9909@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9910 addr = PAGE_ALIGN(addr);
9911
9912 vma = find_vma(mm, addr);
9913- if (TASK_SIZE - len >= addr &&
9914- (!vma || addr + len <= vma->vm_start))
9915+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9916 return addr;
9917 }
9918
9919@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9920 VM_BUG_ON(addr != -ENOMEM);
9921 info.flags = 0;
9922 info.low_limit = TASK_UNMAPPED_BASE;
9923+
9924+#ifdef CONFIG_PAX_RANDMMAP
9925+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9926+ info.low_limit += mm->delta_mmap;
9927+#endif
9928+
9929 info.high_limit = TASK_SIZE;
9930 addr = vm_unmapped_area(&info);
9931 }
9932diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9933index 4082749..fd97781 100644
9934--- a/arch/sparc/include/asm/atomic_64.h
9935+++ b/arch/sparc/include/asm/atomic_64.h
9936@@ -15,18 +15,38 @@
9937 #define ATOMIC64_INIT(i) { (i) }
9938
9939 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9940+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9941+{
9942+ return ACCESS_ONCE(v->counter);
9943+}
9944 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9945+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9946+{
9947+ return ACCESS_ONCE(v->counter);
9948+}
9949
9950 #define atomic_set(v, i) (((v)->counter) = i)
9951+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9952+{
9953+ v->counter = i;
9954+}
9955 #define atomic64_set(v, i) (((v)->counter) = i)
9956+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9957+{
9958+ v->counter = i;
9959+}
9960
9961-#define ATOMIC_OP(op) \
9962-void atomic_##op(int, atomic_t *); \
9963-void atomic64_##op(long, atomic64_t *);
9964+#define __ATOMIC_OP(op, suffix) \
9965+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9966+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9967
9968-#define ATOMIC_OP_RETURN(op) \
9969-int atomic_##op##_return(int, atomic_t *); \
9970-long atomic64_##op##_return(long, atomic64_t *);
9971+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9972+
9973+#define __ATOMIC_OP_RETURN(op, suffix) \
9974+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9975+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9976+
9977+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9978
9979 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9980
9981@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9982
9983 #undef ATOMIC_OPS
9984 #undef ATOMIC_OP_RETURN
9985+#undef __ATOMIC_OP_RETURN
9986 #undef ATOMIC_OP
9987+#undef __ATOMIC_OP
9988
9989 #define atomic_dec_return(v) atomic_sub_return(1, v)
9990 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9991
9992 #define atomic_inc_return(v) atomic_add_return(1, v)
9993+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9994+{
9995+ return atomic_add_return_unchecked(1, v);
9996+}
9997 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9998+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9999+{
10000+ return atomic64_add_return_unchecked(1, v);
10001+}
10002
10003 /*
10004 * atomic_inc_and_test - increment and test
10005@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
10006 * other cases.
10007 */
10008 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10009+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10010+{
10011+ return atomic_inc_return_unchecked(v) == 0;
10012+}
10013 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10014
10015 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
10016@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
10017 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
10018
10019 #define atomic_inc(v) atomic_add(1, v)
10020+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10021+{
10022+ atomic_add_unchecked(1, v);
10023+}
10024 #define atomic64_inc(v) atomic64_add(1, v)
10025+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10026+{
10027+ atomic64_add_unchecked(1, v);
10028+}
10029
10030 #define atomic_dec(v) atomic_sub(1, v)
10031+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10032+{
10033+ atomic_sub_unchecked(1, v);
10034+}
10035 #define atomic64_dec(v) atomic64_sub(1, v)
10036+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10037+{
10038+ atomic64_sub_unchecked(1, v);
10039+}
10040
10041 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
10042 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
10043
10044 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10045+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10046+{
10047+ return cmpxchg(&v->counter, old, new);
10048+}
10049 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10050+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10051+{
10052+ return xchg(&v->counter, new);
10053+}
10054
10055 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10056 {
10057- int c, old;
10058+ int c, old, new;
10059 c = atomic_read(v);
10060 for (;;) {
10061- if (unlikely(c == (u)))
10062+ if (unlikely(c == u))
10063 break;
10064- old = atomic_cmpxchg((v), c, c + (a));
10065+
10066+ asm volatile("addcc %2, %0, %0\n"
10067+
10068+#ifdef CONFIG_PAX_REFCOUNT
10069+ "tvs %%icc, 6\n"
10070+#endif
10071+
10072+ : "=r" (new)
10073+ : "0" (c), "ir" (a)
10074+ : "cc");
10075+
10076+ old = atomic_cmpxchg(v, c, new);
10077 if (likely(old == c))
10078 break;
10079 c = old;
10080@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10081 #define atomic64_cmpxchg(v, o, n) \
10082 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10083 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10084+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10085+{
10086+ return xchg(&v->counter, new);
10087+}
10088
10089 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10090 {
10091- long c, old;
10092+ long c, old, new;
10093 c = atomic64_read(v);
10094 for (;;) {
10095- if (unlikely(c == (u)))
10096+ if (unlikely(c == u))
10097 break;
10098- old = atomic64_cmpxchg((v), c, c + (a));
10099+
10100+ asm volatile("addcc %2, %0, %0\n"
10101+
10102+#ifdef CONFIG_PAX_REFCOUNT
10103+ "tvs %%xcc, 6\n"
10104+#endif
10105+
10106+ : "=r" (new)
10107+ : "0" (c), "ir" (a)
10108+ : "cc");
10109+
10110+ old = atomic64_cmpxchg(v, c, new);
10111 if (likely(old == c))
10112 break;
10113 c = old;
10114 }
10115- return c != (u);
10116+ return c != u;
10117 }
10118
10119 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10120diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10121index 7664894..45a974b 100644
10122--- a/arch/sparc/include/asm/barrier_64.h
10123+++ b/arch/sparc/include/asm/barrier_64.h
10124@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10125 do { \
10126 compiletime_assert_atomic_type(*p); \
10127 barrier(); \
10128- ACCESS_ONCE(*p) = (v); \
10129+ ACCESS_ONCE_RW(*p) = (v); \
10130 } while (0)
10131
10132 #define smp_load_acquire(p) \
10133diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10134index 5bb6991..5c2132e 100644
10135--- a/arch/sparc/include/asm/cache.h
10136+++ b/arch/sparc/include/asm/cache.h
10137@@ -7,10 +7,12 @@
10138 #ifndef _SPARC_CACHE_H
10139 #define _SPARC_CACHE_H
10140
10141+#include <linux/const.h>
10142+
10143 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10144
10145 #define L1_CACHE_SHIFT 5
10146-#define L1_CACHE_BYTES 32
10147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10148
10149 #ifdef CONFIG_SPARC32
10150 #define SMP_CACHE_BYTES_SHIFT 5
10151diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10152index a24e41f..47677ff 100644
10153--- a/arch/sparc/include/asm/elf_32.h
10154+++ b/arch/sparc/include/asm/elf_32.h
10155@@ -114,6 +114,13 @@ typedef struct {
10156
10157 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10158
10159+#ifdef CONFIG_PAX_ASLR
10160+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10161+
10162+#define PAX_DELTA_MMAP_LEN 16
10163+#define PAX_DELTA_STACK_LEN 16
10164+#endif
10165+
10166 /* This yields a mask that user programs can use to figure out what
10167 instruction set this cpu supports. This can NOT be done in userspace
10168 on Sparc. */
10169diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10170index 370ca1e..d4f4a98 100644
10171--- a/arch/sparc/include/asm/elf_64.h
10172+++ b/arch/sparc/include/asm/elf_64.h
10173@@ -189,6 +189,13 @@ typedef struct {
10174 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10175 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10176
10177+#ifdef CONFIG_PAX_ASLR
10178+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10179+
10180+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10181+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10182+#endif
10183+
10184 extern unsigned long sparc64_elf_hwcap;
10185 #define ELF_HWCAP sparc64_elf_hwcap
10186
10187diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10188index a3890da..f6a408e 100644
10189--- a/arch/sparc/include/asm/pgalloc_32.h
10190+++ b/arch/sparc/include/asm/pgalloc_32.h
10191@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10192 }
10193
10194 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10195+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10196
10197 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10198 unsigned long address)
10199diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10200index 5e31871..13469c6 100644
10201--- a/arch/sparc/include/asm/pgalloc_64.h
10202+++ b/arch/sparc/include/asm/pgalloc_64.h
10203@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10204 }
10205
10206 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10207+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10208
10209 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10210 {
10211@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10212 }
10213
10214 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10215+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10216
10217 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10218 {
10219diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10220index 59ba6f6..4518128 100644
10221--- a/arch/sparc/include/asm/pgtable.h
10222+++ b/arch/sparc/include/asm/pgtable.h
10223@@ -5,4 +5,8 @@
10224 #else
10225 #include <asm/pgtable_32.h>
10226 #endif
10227+
10228+#define ktla_ktva(addr) (addr)
10229+#define ktva_ktla(addr) (addr)
10230+
10231 #endif
10232diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10233index b9b91ae..950b91e 100644
10234--- a/arch/sparc/include/asm/pgtable_32.h
10235+++ b/arch/sparc/include/asm/pgtable_32.h
10236@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10237 #define PAGE_SHARED SRMMU_PAGE_SHARED
10238 #define PAGE_COPY SRMMU_PAGE_COPY
10239 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10240+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10241+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10242+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10243 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10244
10245 /* Top-level page directory - dummy used by init-mm.
10246@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10247
10248 /* xwr */
10249 #define __P000 PAGE_NONE
10250-#define __P001 PAGE_READONLY
10251-#define __P010 PAGE_COPY
10252-#define __P011 PAGE_COPY
10253+#define __P001 PAGE_READONLY_NOEXEC
10254+#define __P010 PAGE_COPY_NOEXEC
10255+#define __P011 PAGE_COPY_NOEXEC
10256 #define __P100 PAGE_READONLY
10257 #define __P101 PAGE_READONLY
10258 #define __P110 PAGE_COPY
10259 #define __P111 PAGE_COPY
10260
10261 #define __S000 PAGE_NONE
10262-#define __S001 PAGE_READONLY
10263-#define __S010 PAGE_SHARED
10264-#define __S011 PAGE_SHARED
10265+#define __S001 PAGE_READONLY_NOEXEC
10266+#define __S010 PAGE_SHARED_NOEXEC
10267+#define __S011 PAGE_SHARED_NOEXEC
10268 #define __S100 PAGE_READONLY
10269 #define __S101 PAGE_READONLY
10270 #define __S110 PAGE_SHARED
10271diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10272index 79da178..c2eede8 100644
10273--- a/arch/sparc/include/asm/pgtsrmmu.h
10274+++ b/arch/sparc/include/asm/pgtsrmmu.h
10275@@ -115,6 +115,11 @@
10276 SRMMU_EXEC | SRMMU_REF)
10277 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10278 SRMMU_EXEC | SRMMU_REF)
10279+
10280+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10281+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10282+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10283+
10284 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10285 SRMMU_DIRTY | SRMMU_REF)
10286
10287diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10288index 29d64b1..4272fe8 100644
10289--- a/arch/sparc/include/asm/setup.h
10290+++ b/arch/sparc/include/asm/setup.h
10291@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10292 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10293
10294 /* init_64.c */
10295-extern atomic_t dcpage_flushes;
10296-extern atomic_t dcpage_flushes_xcall;
10297+extern atomic_unchecked_t dcpage_flushes;
10298+extern atomic_unchecked_t dcpage_flushes_xcall;
10299
10300 extern int sysctl_tsb_ratio;
10301 #endif
10302diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10303index 9689176..63c18ea 100644
10304--- a/arch/sparc/include/asm/spinlock_64.h
10305+++ b/arch/sparc/include/asm/spinlock_64.h
10306@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10307
10308 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10309
10310-static void inline arch_read_lock(arch_rwlock_t *lock)
10311+static inline void arch_read_lock(arch_rwlock_t *lock)
10312 {
10313 unsigned long tmp1, tmp2;
10314
10315 __asm__ __volatile__ (
10316 "1: ldsw [%2], %0\n"
10317 " brlz,pn %0, 2f\n"
10318-"4: add %0, 1, %1\n"
10319+"4: addcc %0, 1, %1\n"
10320+
10321+#ifdef CONFIG_PAX_REFCOUNT
10322+" tvs %%icc, 6\n"
10323+#endif
10324+
10325 " cas [%2], %0, %1\n"
10326 " cmp %0, %1\n"
10327 " bne,pn %%icc, 1b\n"
10328@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10329 " .previous"
10330 : "=&r" (tmp1), "=&r" (tmp2)
10331 : "r" (lock)
10332- : "memory");
10333+ : "memory", "cc");
10334 }
10335
10336-static int inline arch_read_trylock(arch_rwlock_t *lock)
10337+static inline int arch_read_trylock(arch_rwlock_t *lock)
10338 {
10339 int tmp1, tmp2;
10340
10341@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10342 "1: ldsw [%2], %0\n"
10343 " brlz,a,pn %0, 2f\n"
10344 " mov 0, %0\n"
10345-" add %0, 1, %1\n"
10346+" addcc %0, 1, %1\n"
10347+
10348+#ifdef CONFIG_PAX_REFCOUNT
10349+" tvs %%icc, 6\n"
10350+#endif
10351+
10352 " cas [%2], %0, %1\n"
10353 " cmp %0, %1\n"
10354 " bne,pn %%icc, 1b\n"
10355@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10356 return tmp1;
10357 }
10358
10359-static void inline arch_read_unlock(arch_rwlock_t *lock)
10360+static inline void arch_read_unlock(arch_rwlock_t *lock)
10361 {
10362 unsigned long tmp1, tmp2;
10363
10364 __asm__ __volatile__(
10365 "1: lduw [%2], %0\n"
10366-" sub %0, 1, %1\n"
10367+" subcc %0, 1, %1\n"
10368+
10369+#ifdef CONFIG_PAX_REFCOUNT
10370+" tvs %%icc, 6\n"
10371+#endif
10372+
10373 " cas [%2], %0, %1\n"
10374 " cmp %0, %1\n"
10375 " bne,pn %%xcc, 1b\n"
10376@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10377 : "memory");
10378 }
10379
10380-static void inline arch_write_lock(arch_rwlock_t *lock)
10381+static inline void arch_write_lock(arch_rwlock_t *lock)
10382 {
10383 unsigned long mask, tmp1, tmp2;
10384
10385@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10386 : "memory");
10387 }
10388
10389-static void inline arch_write_unlock(arch_rwlock_t *lock)
10390+static inline void arch_write_unlock(arch_rwlock_t *lock)
10391 {
10392 __asm__ __volatile__(
10393 " stw %%g0, [%0]"
10394@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10395 : "memory");
10396 }
10397
10398-static int inline arch_write_trylock(arch_rwlock_t *lock)
10399+static inline int arch_write_trylock(arch_rwlock_t *lock)
10400 {
10401 unsigned long mask, tmp1, tmp2, result;
10402
10403diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10404index 025c984..a216504 100644
10405--- a/arch/sparc/include/asm/thread_info_32.h
10406+++ b/arch/sparc/include/asm/thread_info_32.h
10407@@ -49,6 +49,8 @@ struct thread_info {
10408 unsigned long w_saved;
10409
10410 struct restart_block restart_block;
10411+
10412+ unsigned long lowest_stack;
10413 };
10414
10415 /*
10416diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10417index 798f027..b009941 100644
10418--- a/arch/sparc/include/asm/thread_info_64.h
10419+++ b/arch/sparc/include/asm/thread_info_64.h
10420@@ -63,6 +63,8 @@ struct thread_info {
10421 struct pt_regs *kern_una_regs;
10422 unsigned int kern_una_insn;
10423
10424+ unsigned long lowest_stack;
10425+
10426 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10427 __attribute__ ((aligned(64)));
10428 };
10429@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10430 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10431 /* flag bit 4 is available */
10432 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10433-/* flag bit 6 is available */
10434+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10435 #define TIF_32BIT 7 /* 32-bit binary */
10436 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10437 #define TIF_SECCOMP 9 /* secure computing */
10438 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10439 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10440+
10441 /* NOTE: Thread flags >= 12 should be ones we have no interest
10442 * in using in assembly, else we can't use the mask as
10443 * an immediate value in instructions such as andcc.
10444@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10445 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10446 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10447 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10448+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10449
10450 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10451 _TIF_DO_NOTIFY_RESUME_MASK | \
10452 _TIF_NEED_RESCHED)
10453 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10454
10455+#define _TIF_WORK_SYSCALL \
10456+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10457+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10458+
10459 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10460
10461 /*
10462diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10463index bd56c28..4b63d83 100644
10464--- a/arch/sparc/include/asm/uaccess.h
10465+++ b/arch/sparc/include/asm/uaccess.h
10466@@ -1,5 +1,6 @@
10467 #ifndef ___ASM_SPARC_UACCESS_H
10468 #define ___ASM_SPARC_UACCESS_H
10469+
10470 #if defined(__sparc__) && defined(__arch64__)
10471 #include <asm/uaccess_64.h>
10472 #else
10473diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10474index 9634d08..020b2dc 100644
10475--- a/arch/sparc/include/asm/uaccess_32.h
10476+++ b/arch/sparc/include/asm/uaccess_32.h
10477@@ -47,6 +47,7 @@
10478 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10479 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10480 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
10481+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10482 #define access_ok(type, addr, size) \
10483 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10484
10485@@ -250,27 +251,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10486
10487 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10488 {
10489- if (n && __access_ok((unsigned long) to, n))
10490+ if ((long)n < 0)
10491+ return n;
10492+
10493+ if (n && __access_ok((unsigned long) to, n)) {
10494+ if (!__builtin_constant_p(n))
10495+ check_object_size(from, n, true);
10496 return __copy_user(to, (__force void __user *) from, n);
10497- else
10498+ } else
10499 return n;
10500 }
10501
10502 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10503 {
10504+ if ((long)n < 0)
10505+ return n;
10506+
10507+ if (!__builtin_constant_p(n))
10508+ check_object_size(from, n, true);
10509+
10510 return __copy_user(to, (__force void __user *) from, n);
10511 }
10512
10513 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10514 {
10515- if (n && __access_ok((unsigned long) from, n))
10516+ if ((long)n < 0)
10517+ return n;
10518+
10519+ if (n && __access_ok((unsigned long) from, n)) {
10520+ if (!__builtin_constant_p(n))
10521+ check_object_size(to, n, false);
10522 return __copy_user((__force void __user *) to, from, n);
10523- else
10524+ } else
10525 return n;
10526 }
10527
10528 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10529 {
10530+ if ((long)n < 0)
10531+ return n;
10532+
10533 return __copy_user((__force void __user *) to, from, n);
10534 }
10535
10536diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10537index c990a5e..7384856 100644
10538--- a/arch/sparc/include/asm/uaccess_64.h
10539+++ b/arch/sparc/include/asm/uaccess_64.h
10540@@ -10,6 +10,7 @@
10541 #include <linux/compiler.h>
10542 #include <linux/string.h>
10543 #include <linux/thread_info.h>
10544+#include <linux/kernel.h>
10545 #include <asm/asi.h>
10546 #include <asm/spitfire.h>
10547 #include <asm-generic/uaccess-unaligned.h>
10548@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10549 return 1;
10550 }
10551
10552+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10553+{
10554+ return 1;
10555+}
10556+
10557 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10558 {
10559 return 1;
10560@@ -214,8 +220,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10561 static inline unsigned long __must_check
10562 copy_from_user(void *to, const void __user *from, unsigned long size)
10563 {
10564- unsigned long ret = ___copy_from_user(to, from, size);
10565+ unsigned long ret;
10566
10567+ if ((long)size < 0 || size > INT_MAX)
10568+ return size;
10569+
10570+ if (!__builtin_constant_p(size))
10571+ check_object_size(to, size, false);
10572+
10573+ ret = ___copy_from_user(to, from, size);
10574 if (unlikely(ret))
10575 ret = copy_from_user_fixup(to, from, size);
10576
10577@@ -231,8 +244,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10578 static inline unsigned long __must_check
10579 copy_to_user(void __user *to, const void *from, unsigned long size)
10580 {
10581- unsigned long ret = ___copy_to_user(to, from, size);
10582+ unsigned long ret;
10583
10584+ if ((long)size < 0 || size > INT_MAX)
10585+ return size;
10586+
10587+ if (!__builtin_constant_p(size))
10588+ check_object_size(from, size, true);
10589+
10590+ ret = ___copy_to_user(to, from, size);
10591 if (unlikely(ret))
10592 ret = copy_to_user_fixup(to, from, size);
10593 return ret;
10594diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10595index 7cf9c6e..6206648 100644
10596--- a/arch/sparc/kernel/Makefile
10597+++ b/arch/sparc/kernel/Makefile
10598@@ -4,7 +4,7 @@
10599 #
10600
10601 asflags-y := -ansi
10602-ccflags-y := -Werror
10603+#ccflags-y := -Werror
10604
10605 extra-y := head_$(BITS).o
10606
10607diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10608index 50e7b62..79fae35 100644
10609--- a/arch/sparc/kernel/process_32.c
10610+++ b/arch/sparc/kernel/process_32.c
10611@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10612
10613 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10614 r->psr, r->pc, r->npc, r->y, print_tainted());
10615- printk("PC: <%pS>\n", (void *) r->pc);
10616+ printk("PC: <%pA>\n", (void *) r->pc);
10617 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10618 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10619 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10620 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10621 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10622 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10623- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10624+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10625
10626 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10627 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10628@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10629 rw = (struct reg_window32 *) fp;
10630 pc = rw->ins[7];
10631 printk("[%08lx : ", pc);
10632- printk("%pS ] ", (void *) pc);
10633+ printk("%pA ] ", (void *) pc);
10634 fp = rw->ins[6];
10635 } while (++count < 16);
10636 printk("\n");
10637diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10638index 46a5964..a35c62c 100644
10639--- a/arch/sparc/kernel/process_64.c
10640+++ b/arch/sparc/kernel/process_64.c
10641@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10642 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10643 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10644 if (regs->tstate & TSTATE_PRIV)
10645- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10646+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10647 }
10648
10649 void show_regs(struct pt_regs *regs)
10650@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10651
10652 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10653 regs->tpc, regs->tnpc, regs->y, print_tainted());
10654- printk("TPC: <%pS>\n", (void *) regs->tpc);
10655+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10656 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10657 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10658 regs->u_regs[3]);
10659@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10660 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10661 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10662 regs->u_regs[15]);
10663- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10664+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10665 show_regwindow(regs);
10666 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10667 }
10668@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10669 ((tp && tp->task) ? tp->task->pid : -1));
10670
10671 if (gp->tstate & TSTATE_PRIV) {
10672- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10673+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10674 (void *) gp->tpc,
10675 (void *) gp->o7,
10676 (void *) gp->i7,
10677diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10678index 79cc0d1..ec62734 100644
10679--- a/arch/sparc/kernel/prom_common.c
10680+++ b/arch/sparc/kernel/prom_common.c
10681@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10682
10683 unsigned int prom_early_allocated __initdata;
10684
10685-static struct of_pdt_ops prom_sparc_ops __initdata = {
10686+static struct of_pdt_ops prom_sparc_ops __initconst = {
10687 .nextprop = prom_common_nextprop,
10688 .getproplen = prom_getproplen,
10689 .getproperty = prom_getproperty,
10690diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10691index 9ddc492..27a5619 100644
10692--- a/arch/sparc/kernel/ptrace_64.c
10693+++ b/arch/sparc/kernel/ptrace_64.c
10694@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10695 return ret;
10696 }
10697
10698+#ifdef CONFIG_GRKERNSEC_SETXID
10699+extern void gr_delayed_cred_worker(void);
10700+#endif
10701+
10702 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10703 {
10704 int ret = 0;
10705@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10706 if (test_thread_flag(TIF_NOHZ))
10707 user_exit();
10708
10709+#ifdef CONFIG_GRKERNSEC_SETXID
10710+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10711+ gr_delayed_cred_worker();
10712+#endif
10713+
10714 if (test_thread_flag(TIF_SYSCALL_TRACE))
10715 ret = tracehook_report_syscall_entry(regs);
10716
10717@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10718 if (test_thread_flag(TIF_NOHZ))
10719 user_exit();
10720
10721+#ifdef CONFIG_GRKERNSEC_SETXID
10722+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10723+ gr_delayed_cred_worker();
10724+#endif
10725+
10726 audit_syscall_exit(regs);
10727
10728 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10729diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10730index da6f1a7..e5dea8f 100644
10731--- a/arch/sparc/kernel/smp_64.c
10732+++ b/arch/sparc/kernel/smp_64.c
10733@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10734 return;
10735
10736 #ifdef CONFIG_DEBUG_DCFLUSH
10737- atomic_inc(&dcpage_flushes);
10738+ atomic_inc_unchecked(&dcpage_flushes);
10739 #endif
10740
10741 this_cpu = get_cpu();
10742@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10743 xcall_deliver(data0, __pa(pg_addr),
10744 (u64) pg_addr, cpumask_of(cpu));
10745 #ifdef CONFIG_DEBUG_DCFLUSH
10746- atomic_inc(&dcpage_flushes_xcall);
10747+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10748 #endif
10749 }
10750 }
10751@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10752 preempt_disable();
10753
10754 #ifdef CONFIG_DEBUG_DCFLUSH
10755- atomic_inc(&dcpage_flushes);
10756+ atomic_inc_unchecked(&dcpage_flushes);
10757 #endif
10758 data0 = 0;
10759 pg_addr = page_address(page);
10760@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10761 xcall_deliver(data0, __pa(pg_addr),
10762 (u64) pg_addr, cpu_online_mask);
10763 #ifdef CONFIG_DEBUG_DCFLUSH
10764- atomic_inc(&dcpage_flushes_xcall);
10765+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10766 #endif
10767 }
10768 __local_flush_dcache_page(page);
10769diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10770index 646988d..b88905f 100644
10771--- a/arch/sparc/kernel/sys_sparc_32.c
10772+++ b/arch/sparc/kernel/sys_sparc_32.c
10773@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10774 if (len > TASK_SIZE - PAGE_SIZE)
10775 return -ENOMEM;
10776 if (!addr)
10777- addr = TASK_UNMAPPED_BASE;
10778+ addr = current->mm->mmap_base;
10779
10780 info.flags = 0;
10781 info.length = len;
10782diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10783index 30e7ddb..266a3b0 100644
10784--- a/arch/sparc/kernel/sys_sparc_64.c
10785+++ b/arch/sparc/kernel/sys_sparc_64.c
10786@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10787 struct vm_area_struct * vma;
10788 unsigned long task_size = TASK_SIZE;
10789 int do_color_align;
10790+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10791 struct vm_unmapped_area_info info;
10792
10793 if (flags & MAP_FIXED) {
10794 /* We do not accept a shared mapping if it would violate
10795 * cache aliasing constraints.
10796 */
10797- if ((flags & MAP_SHARED) &&
10798+ if ((filp || (flags & MAP_SHARED)) &&
10799 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10800 return -EINVAL;
10801 return addr;
10802@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10803 if (filp || (flags & MAP_SHARED))
10804 do_color_align = 1;
10805
10806+#ifdef CONFIG_PAX_RANDMMAP
10807+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10808+#endif
10809+
10810 if (addr) {
10811 if (do_color_align)
10812 addr = COLOR_ALIGN(addr, pgoff);
10813@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10814 addr = PAGE_ALIGN(addr);
10815
10816 vma = find_vma(mm, addr);
10817- if (task_size - len >= addr &&
10818- (!vma || addr + len <= vma->vm_start))
10819+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10820 return addr;
10821 }
10822
10823 info.flags = 0;
10824 info.length = len;
10825- info.low_limit = TASK_UNMAPPED_BASE;
10826+ info.low_limit = mm->mmap_base;
10827 info.high_limit = min(task_size, VA_EXCLUDE_START);
10828 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10829 info.align_offset = pgoff << PAGE_SHIFT;
10830+ info.threadstack_offset = offset;
10831 addr = vm_unmapped_area(&info);
10832
10833 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10834 VM_BUG_ON(addr != -ENOMEM);
10835 info.low_limit = VA_EXCLUDE_END;
10836+
10837+#ifdef CONFIG_PAX_RANDMMAP
10838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10839+ info.low_limit += mm->delta_mmap;
10840+#endif
10841+
10842 info.high_limit = task_size;
10843 addr = vm_unmapped_area(&info);
10844 }
10845@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10846 unsigned long task_size = STACK_TOP32;
10847 unsigned long addr = addr0;
10848 int do_color_align;
10849+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10850 struct vm_unmapped_area_info info;
10851
10852 /* This should only ever run for 32-bit processes. */
10853@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10854 /* We do not accept a shared mapping if it would violate
10855 * cache aliasing constraints.
10856 */
10857- if ((flags & MAP_SHARED) &&
10858+ if ((filp || (flags & MAP_SHARED)) &&
10859 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10860 return -EINVAL;
10861 return addr;
10862@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10863 if (filp || (flags & MAP_SHARED))
10864 do_color_align = 1;
10865
10866+#ifdef CONFIG_PAX_RANDMMAP
10867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10868+#endif
10869+
10870 /* requesting a specific address */
10871 if (addr) {
10872 if (do_color_align)
10873@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10874 addr = PAGE_ALIGN(addr);
10875
10876 vma = find_vma(mm, addr);
10877- if (task_size - len >= addr &&
10878- (!vma || addr + len <= vma->vm_start))
10879+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10880 return addr;
10881 }
10882
10883@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10884 info.high_limit = mm->mmap_base;
10885 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10886 info.align_offset = pgoff << PAGE_SHIFT;
10887+ info.threadstack_offset = offset;
10888 addr = vm_unmapped_area(&info);
10889
10890 /*
10891@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10892 VM_BUG_ON(addr != -ENOMEM);
10893 info.flags = 0;
10894 info.low_limit = TASK_UNMAPPED_BASE;
10895+
10896+#ifdef CONFIG_PAX_RANDMMAP
10897+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10898+ info.low_limit += mm->delta_mmap;
10899+#endif
10900+
10901 info.high_limit = STACK_TOP32;
10902 addr = vm_unmapped_area(&info);
10903 }
10904@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10905 EXPORT_SYMBOL(get_fb_unmapped_area);
10906
10907 /* Essentially the same as PowerPC. */
10908-static unsigned long mmap_rnd(void)
10909+static unsigned long mmap_rnd(struct mm_struct *mm)
10910 {
10911 unsigned long rnd = 0UL;
10912
10913+#ifdef CONFIG_PAX_RANDMMAP
10914+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10915+#endif
10916+
10917 if (current->flags & PF_RANDOMIZE) {
10918 unsigned long val = get_random_int();
10919 if (test_thread_flag(TIF_32BIT))
10920@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10921
10922 void arch_pick_mmap_layout(struct mm_struct *mm)
10923 {
10924- unsigned long random_factor = mmap_rnd();
10925+ unsigned long random_factor = mmap_rnd(mm);
10926 unsigned long gap;
10927
10928 /*
10929@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10930 gap == RLIM_INFINITY ||
10931 sysctl_legacy_va_layout) {
10932 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10933+
10934+#ifdef CONFIG_PAX_RANDMMAP
10935+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10936+ mm->mmap_base += mm->delta_mmap;
10937+#endif
10938+
10939 mm->get_unmapped_area = arch_get_unmapped_area;
10940 } else {
10941 /* We know it's 32-bit */
10942@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10943 gap = (task_size / 6 * 5);
10944
10945 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10946+
10947+#ifdef CONFIG_PAX_RANDMMAP
10948+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10949+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10950+#endif
10951+
10952 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10953 }
10954 }
10955diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10956index bb00089..e0ea580 100644
10957--- a/arch/sparc/kernel/syscalls.S
10958+++ b/arch/sparc/kernel/syscalls.S
10959@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10960 #endif
10961 .align 32
10962 1: ldx [%g6 + TI_FLAGS], %l5
10963- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10964+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10965 be,pt %icc, rtrap
10966 nop
10967 call syscall_trace_leave
10968@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10969
10970 srl %i3, 0, %o3 ! IEU0
10971 srl %i2, 0, %o2 ! IEU0 Group
10972- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10973+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10974 bne,pn %icc, linux_syscall_trace32 ! CTI
10975 mov %i0, %l5 ! IEU1
10976 5: call %l7 ! CTI Group brk forced
10977@@ -218,7 +218,7 @@ linux_sparc_syscall:
10978
10979 mov %i3, %o3 ! IEU1
10980 mov %i4, %o4 ! IEU0 Group
10981- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10982+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10983 bne,pn %icc, linux_syscall_trace ! CTI Group
10984 mov %i0, %l5 ! IEU0
10985 2: call %l7 ! CTI Group brk forced
10986@@ -233,7 +233,7 @@ ret_sys_call:
10987
10988 cmp %o0, -ERESTART_RESTARTBLOCK
10989 bgeu,pn %xcc, 1f
10990- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10991+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10992 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10993
10994 2:
10995diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10996index 6fd386c5..6907d81 100644
10997--- a/arch/sparc/kernel/traps_32.c
10998+++ b/arch/sparc/kernel/traps_32.c
10999@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11000 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11001 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11002
11003+extern void gr_handle_kernel_exploit(void);
11004+
11005 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11006 {
11007 static int die_counter;
11008@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11009 count++ < 30 &&
11010 (((unsigned long) rw) >= PAGE_OFFSET) &&
11011 !(((unsigned long) rw) & 0x7)) {
11012- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11013+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11014 (void *) rw->ins[7]);
11015 rw = (struct reg_window32 *)rw->ins[6];
11016 }
11017 }
11018 printk("Instruction DUMP:");
11019 instruction_dump ((unsigned long *) regs->pc);
11020- if(regs->psr & PSR_PS)
11021+ if(regs->psr & PSR_PS) {
11022+ gr_handle_kernel_exploit();
11023 do_exit(SIGKILL);
11024+ }
11025 do_exit(SIGSEGV);
11026 }
11027
11028diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11029index 981a769..d906eda 100644
11030--- a/arch/sparc/kernel/traps_64.c
11031+++ b/arch/sparc/kernel/traps_64.c
11032@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11033 i + 1,
11034 p->trapstack[i].tstate, p->trapstack[i].tpc,
11035 p->trapstack[i].tnpc, p->trapstack[i].tt);
11036- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11037+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11038 }
11039 }
11040
11041@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11042
11043 lvl -= 0x100;
11044 if (regs->tstate & TSTATE_PRIV) {
11045+
11046+#ifdef CONFIG_PAX_REFCOUNT
11047+ if (lvl == 6)
11048+ pax_report_refcount_overflow(regs);
11049+#endif
11050+
11051 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11052 die_if_kernel(buffer, regs);
11053 }
11054@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11055 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11056 {
11057 char buffer[32];
11058-
11059+
11060 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11061 0, lvl, SIGTRAP) == NOTIFY_STOP)
11062 return;
11063
11064+#ifdef CONFIG_PAX_REFCOUNT
11065+ if (lvl == 6)
11066+ pax_report_refcount_overflow(regs);
11067+#endif
11068+
11069 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11070
11071 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11072@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11073 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11074 printk("%s" "ERROR(%d): ",
11075 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11076- printk("TPC<%pS>\n", (void *) regs->tpc);
11077+ printk("TPC<%pA>\n", (void *) regs->tpc);
11078 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11080 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11081@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11082 smp_processor_id(),
11083 (type & 0x1) ? 'I' : 'D',
11084 regs->tpc);
11085- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11086+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11087 panic("Irrecoverable Cheetah+ parity error.");
11088 }
11089
11090@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11091 smp_processor_id(),
11092 (type & 0x1) ? 'I' : 'D',
11093 regs->tpc);
11094- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11095+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11096 }
11097
11098 struct sun4v_error_entry {
11099@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11100 /*0x38*/u64 reserved_5;
11101 };
11102
11103-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11104-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11105+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11106+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11107
11108 static const char *sun4v_err_type_to_str(u8 type)
11109 {
11110@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11111 }
11112
11113 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11114- int cpu, const char *pfx, atomic_t *ocnt)
11115+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11116 {
11117 u64 *raw_ptr = (u64 *) ent;
11118 u32 attrs;
11119@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11120
11121 show_regs(regs);
11122
11123- if ((cnt = atomic_read(ocnt)) != 0) {
11124- atomic_set(ocnt, 0);
11125+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11126+ atomic_set_unchecked(ocnt, 0);
11127 wmb();
11128 printk("%s: Queue overflowed %d times.\n",
11129 pfx, cnt);
11130@@ -2048,7 +2059,7 @@ out:
11131 */
11132 void sun4v_resum_overflow(struct pt_regs *regs)
11133 {
11134- atomic_inc(&sun4v_resum_oflow_cnt);
11135+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11136 }
11137
11138 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11139@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11140 /* XXX Actually even this can make not that much sense. Perhaps
11141 * XXX we should just pull the plug and panic directly from here?
11142 */
11143- atomic_inc(&sun4v_nonresum_oflow_cnt);
11144+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11145 }
11146
11147 static void sun4v_tlb_error(struct pt_regs *regs)
11148@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11149
11150 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11151 regs->tpc, tl);
11152- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11153+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11154 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11155- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11156+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11157 (void *) regs->u_regs[UREG_I7]);
11158 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11159 "pte[%lx] error[%lx]\n",
11160@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11161
11162 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11163 regs->tpc, tl);
11164- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11165+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11166 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11167- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11168+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11169 (void *) regs->u_regs[UREG_I7]);
11170 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11171 "pte[%lx] error[%lx]\n",
11172@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11173 fp = (unsigned long)sf->fp + STACK_BIAS;
11174 }
11175
11176- printk(" [%016lx] %pS\n", pc, (void *) pc);
11177+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11179 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11180 int index = tsk->curr_ret_stack;
11181 if (tsk->ret_stack && index >= graph) {
11182 pc = tsk->ret_stack[index - graph].ret;
11183- printk(" [%016lx] %pS\n", pc, (void *) pc);
11184+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11185 graph++;
11186 }
11187 }
11188@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11189 return (struct reg_window *) (fp + STACK_BIAS);
11190 }
11191
11192+extern void gr_handle_kernel_exploit(void);
11193+
11194 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11195 {
11196 static int die_counter;
11197@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11198 while (rw &&
11199 count++ < 30 &&
11200 kstack_valid(tp, (unsigned long) rw)) {
11201- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11202+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11203 (void *) rw->ins[7]);
11204
11205 rw = kernel_stack_up(rw);
11206@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11207 }
11208 user_instruction_dump ((unsigned int __user *) regs->tpc);
11209 }
11210- if (regs->tstate & TSTATE_PRIV)
11211+ if (regs->tstate & TSTATE_PRIV) {
11212+ gr_handle_kernel_exploit();
11213 do_exit(SIGKILL);
11214+ }
11215 do_exit(SIGSEGV);
11216 }
11217 EXPORT_SYMBOL(die_if_kernel);
11218diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11219index 62098a8..547ab2c 100644
11220--- a/arch/sparc/kernel/unaligned_64.c
11221+++ b/arch/sparc/kernel/unaligned_64.c
11222@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11223 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11224
11225 if (__ratelimit(&ratelimit)) {
11226- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11227+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11228 regs->tpc, (void *) regs->tpc);
11229 }
11230 }
11231diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11232index 3269b02..64f5231 100644
11233--- a/arch/sparc/lib/Makefile
11234+++ b/arch/sparc/lib/Makefile
11235@@ -2,7 +2,7 @@
11236 #
11237
11238 asflags-y := -ansi -DST_DIV0=0x02
11239-ccflags-y := -Werror
11240+#ccflags-y := -Werror
11241
11242 lib-$(CONFIG_SPARC32) += ashrdi3.o
11243 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11244diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11245index 05dac43..76f8ed4 100644
11246--- a/arch/sparc/lib/atomic_64.S
11247+++ b/arch/sparc/lib/atomic_64.S
11248@@ -15,11 +15,22 @@
11249 * a value and does the barriers.
11250 */
11251
11252-#define ATOMIC_OP(op) \
11253-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11254+#ifdef CONFIG_PAX_REFCOUNT
11255+#define __REFCOUNT_OP(op) op##cc
11256+#define __OVERFLOW_IOP tvs %icc, 6;
11257+#define __OVERFLOW_XOP tvs %xcc, 6;
11258+#else
11259+#define __REFCOUNT_OP(op) op
11260+#define __OVERFLOW_IOP
11261+#define __OVERFLOW_XOP
11262+#endif
11263+
11264+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11265+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11266 BACKOFF_SETUP(%o2); \
11267 1: lduw [%o1], %g1; \
11268- op %g1, %o0, %g7; \
11269+ asm_op %g1, %o0, %g7; \
11270+ post_op \
11271 cas [%o1], %g1, %g7; \
11272 cmp %g1, %g7; \
11273 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11274@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11275 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11276 ENDPROC(atomic_##op); \
11277
11278-#define ATOMIC_OP_RETURN(op) \
11279-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11280+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11281+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11282+
11283+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11284+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11285 BACKOFF_SETUP(%o2); \
11286 1: lduw [%o1], %g1; \
11287- op %g1, %o0, %g7; \
11288+ asm_op %g1, %o0, %g7; \
11289+ post_op \
11290 cas [%o1], %g1, %g7; \
11291 cmp %g1, %g7; \
11292 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11293@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11294 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11295 ENDPROC(atomic_##op##_return);
11296
11297+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11298+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11299+
11300 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11301
11302 ATOMIC_OPS(add)
11303@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11304
11305 #undef ATOMIC_OPS
11306 #undef ATOMIC_OP_RETURN
11307+#undef __ATOMIC_OP_RETURN
11308 #undef ATOMIC_OP
11309+#undef __ATOMIC_OP
11310
11311-#define ATOMIC64_OP(op) \
11312-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11313+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11314+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11315 BACKOFF_SETUP(%o2); \
11316 1: ldx [%o1], %g1; \
11317- op %g1, %o0, %g7; \
11318+ asm_op %g1, %o0, %g7; \
11319+ post_op \
11320 casx [%o1], %g1, %g7; \
11321 cmp %g1, %g7; \
11322 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11323@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11324 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11325 ENDPROC(atomic64_##op); \
11326
11327-#define ATOMIC64_OP_RETURN(op) \
11328-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11329+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11330+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11331+
11332+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11333+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11334 BACKOFF_SETUP(%o2); \
11335 1: ldx [%o1], %g1; \
11336- op %g1, %o0, %g7; \
11337+ asm_op %g1, %o0, %g7; \
11338+ post_op \
11339 casx [%o1], %g1, %g7; \
11340 cmp %g1, %g7; \
11341 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11342@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11343 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11344 ENDPROC(atomic64_##op##_return);
11345
11346+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11347+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11348+
11349 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11350
11351 ATOMIC64_OPS(add)
11352@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11353
11354 #undef ATOMIC64_OPS
11355 #undef ATOMIC64_OP_RETURN
11356+#undef __ATOMIC64_OP_RETURN
11357 #undef ATOMIC64_OP
11358+#undef __ATOMIC64_OP
11359+#undef __OVERFLOW_XOP
11360+#undef __OVERFLOW_IOP
11361+#undef __REFCOUNT_OP
11362
11363 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11364 BACKOFF_SETUP(%o2)
11365diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11366index 1d649a9..fbc5bfc 100644
11367--- a/arch/sparc/lib/ksyms.c
11368+++ b/arch/sparc/lib/ksyms.c
11369@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11370 /* Atomic counter implementation. */
11371 #define ATOMIC_OP(op) \
11372 EXPORT_SYMBOL(atomic_##op); \
11373-EXPORT_SYMBOL(atomic64_##op);
11374+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11375+EXPORT_SYMBOL(atomic64_##op); \
11376+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11377
11378 #define ATOMIC_OP_RETURN(op) \
11379 EXPORT_SYMBOL(atomic_##op##_return); \
11380@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11381 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11382
11383 ATOMIC_OPS(add)
11384+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11385+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11386 ATOMIC_OPS(sub)
11387
11388 #undef ATOMIC_OPS
11389diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11390index 30c3ecc..736f015 100644
11391--- a/arch/sparc/mm/Makefile
11392+++ b/arch/sparc/mm/Makefile
11393@@ -2,7 +2,7 @@
11394 #
11395
11396 asflags-y := -ansi
11397-ccflags-y := -Werror
11398+#ccflags-y := -Werror
11399
11400 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11401 obj-y += fault_$(BITS).o
11402diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11403index 70d8171..274c6c0 100644
11404--- a/arch/sparc/mm/fault_32.c
11405+++ b/arch/sparc/mm/fault_32.c
11406@@ -21,6 +21,9 @@
11407 #include <linux/perf_event.h>
11408 #include <linux/interrupt.h>
11409 #include <linux/kdebug.h>
11410+#include <linux/slab.h>
11411+#include <linux/pagemap.h>
11412+#include <linux/compiler.h>
11413
11414 #include <asm/page.h>
11415 #include <asm/pgtable.h>
11416@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11417 return safe_compute_effective_address(regs, insn);
11418 }
11419
11420+#ifdef CONFIG_PAX_PAGEEXEC
11421+#ifdef CONFIG_PAX_DLRESOLVE
11422+static void pax_emuplt_close(struct vm_area_struct *vma)
11423+{
11424+ vma->vm_mm->call_dl_resolve = 0UL;
11425+}
11426+
11427+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11428+{
11429+ unsigned int *kaddr;
11430+
11431+ vmf->page = alloc_page(GFP_HIGHUSER);
11432+ if (!vmf->page)
11433+ return VM_FAULT_OOM;
11434+
11435+ kaddr = kmap(vmf->page);
11436+ memset(kaddr, 0, PAGE_SIZE);
11437+ kaddr[0] = 0x9DE3BFA8U; /* save */
11438+ flush_dcache_page(vmf->page);
11439+ kunmap(vmf->page);
11440+ return VM_FAULT_MAJOR;
11441+}
11442+
11443+static const struct vm_operations_struct pax_vm_ops = {
11444+ .close = pax_emuplt_close,
11445+ .fault = pax_emuplt_fault
11446+};
11447+
11448+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11449+{
11450+ int ret;
11451+
11452+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11453+ vma->vm_mm = current->mm;
11454+ vma->vm_start = addr;
11455+ vma->vm_end = addr + PAGE_SIZE;
11456+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11457+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11458+ vma->vm_ops = &pax_vm_ops;
11459+
11460+ ret = insert_vm_struct(current->mm, vma);
11461+ if (ret)
11462+ return ret;
11463+
11464+ ++current->mm->total_vm;
11465+ return 0;
11466+}
11467+#endif
11468+
11469+/*
11470+ * PaX: decide what to do with offenders (regs->pc = fault address)
11471+ *
11472+ * returns 1 when task should be killed
11473+ * 2 when patched PLT trampoline was detected
11474+ * 3 when unpatched PLT trampoline was detected
11475+ */
11476+static int pax_handle_fetch_fault(struct pt_regs *regs)
11477+{
11478+
11479+#ifdef CONFIG_PAX_EMUPLT
11480+ int err;
11481+
11482+ do { /* PaX: patched PLT emulation #1 */
11483+ unsigned int sethi1, sethi2, jmpl;
11484+
11485+ err = get_user(sethi1, (unsigned int *)regs->pc);
11486+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11487+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11488+
11489+ if (err)
11490+ break;
11491+
11492+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11493+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11494+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11495+ {
11496+ unsigned int addr;
11497+
11498+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11499+ addr = regs->u_regs[UREG_G1];
11500+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11501+ regs->pc = addr;
11502+ regs->npc = addr+4;
11503+ return 2;
11504+ }
11505+ } while (0);
11506+
11507+ do { /* PaX: patched PLT emulation #2 */
11508+ unsigned int ba;
11509+
11510+ err = get_user(ba, (unsigned int *)regs->pc);
11511+
11512+ if (err)
11513+ break;
11514+
11515+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11516+ unsigned int addr;
11517+
11518+ if ((ba & 0xFFC00000U) == 0x30800000U)
11519+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11520+ else
11521+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11522+ regs->pc = addr;
11523+ regs->npc = addr+4;
11524+ return 2;
11525+ }
11526+ } while (0);
11527+
11528+ do { /* PaX: patched PLT emulation #3 */
11529+ unsigned int sethi, bajmpl, nop;
11530+
11531+ err = get_user(sethi, (unsigned int *)regs->pc);
11532+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11533+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11534+
11535+ if (err)
11536+ break;
11537+
11538+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11539+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11540+ nop == 0x01000000U)
11541+ {
11542+ unsigned int addr;
11543+
11544+ addr = (sethi & 0x003FFFFFU) << 10;
11545+ regs->u_regs[UREG_G1] = addr;
11546+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11547+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11548+ else
11549+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11550+ regs->pc = addr;
11551+ regs->npc = addr+4;
11552+ return 2;
11553+ }
11554+ } while (0);
11555+
11556+ do { /* PaX: unpatched PLT emulation step 1 */
11557+ unsigned int sethi, ba, nop;
11558+
11559+ err = get_user(sethi, (unsigned int *)regs->pc);
11560+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11561+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11562+
11563+ if (err)
11564+ break;
11565+
11566+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11567+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11568+ nop == 0x01000000U)
11569+ {
11570+ unsigned int addr, save, call;
11571+
11572+ if ((ba & 0xFFC00000U) == 0x30800000U)
11573+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11574+ else
11575+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11576+
11577+ err = get_user(save, (unsigned int *)addr);
11578+ err |= get_user(call, (unsigned int *)(addr+4));
11579+ err |= get_user(nop, (unsigned int *)(addr+8));
11580+ if (err)
11581+ break;
11582+
11583+#ifdef CONFIG_PAX_DLRESOLVE
11584+ if (save == 0x9DE3BFA8U &&
11585+ (call & 0xC0000000U) == 0x40000000U &&
11586+ nop == 0x01000000U)
11587+ {
11588+ struct vm_area_struct *vma;
11589+ unsigned long call_dl_resolve;
11590+
11591+ down_read(&current->mm->mmap_sem);
11592+ call_dl_resolve = current->mm->call_dl_resolve;
11593+ up_read(&current->mm->mmap_sem);
11594+ if (likely(call_dl_resolve))
11595+ goto emulate;
11596+
11597+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11598+
11599+ down_write(&current->mm->mmap_sem);
11600+ if (current->mm->call_dl_resolve) {
11601+ call_dl_resolve = current->mm->call_dl_resolve;
11602+ up_write(&current->mm->mmap_sem);
11603+ if (vma)
11604+ kmem_cache_free(vm_area_cachep, vma);
11605+ goto emulate;
11606+ }
11607+
11608+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11609+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11610+ up_write(&current->mm->mmap_sem);
11611+ if (vma)
11612+ kmem_cache_free(vm_area_cachep, vma);
11613+ return 1;
11614+ }
11615+
11616+ if (pax_insert_vma(vma, call_dl_resolve)) {
11617+ up_write(&current->mm->mmap_sem);
11618+ kmem_cache_free(vm_area_cachep, vma);
11619+ return 1;
11620+ }
11621+
11622+ current->mm->call_dl_resolve = call_dl_resolve;
11623+ up_write(&current->mm->mmap_sem);
11624+
11625+emulate:
11626+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11627+ regs->pc = call_dl_resolve;
11628+ regs->npc = addr+4;
11629+ return 3;
11630+ }
11631+#endif
11632+
11633+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11634+ if ((save & 0xFFC00000U) == 0x05000000U &&
11635+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11636+ nop == 0x01000000U)
11637+ {
11638+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11639+ regs->u_regs[UREG_G2] = addr + 4;
11640+ addr = (save & 0x003FFFFFU) << 10;
11641+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11642+ regs->pc = addr;
11643+ regs->npc = addr+4;
11644+ return 3;
11645+ }
11646+ }
11647+ } while (0);
11648+
11649+ do { /* PaX: unpatched PLT emulation step 2 */
11650+ unsigned int save, call, nop;
11651+
11652+ err = get_user(save, (unsigned int *)(regs->pc-4));
11653+ err |= get_user(call, (unsigned int *)regs->pc);
11654+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11655+ if (err)
11656+ break;
11657+
11658+ if (save == 0x9DE3BFA8U &&
11659+ (call & 0xC0000000U) == 0x40000000U &&
11660+ nop == 0x01000000U)
11661+ {
11662+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11663+
11664+ regs->u_regs[UREG_RETPC] = regs->pc;
11665+ regs->pc = dl_resolve;
11666+ regs->npc = dl_resolve+4;
11667+ return 3;
11668+ }
11669+ } while (0);
11670+#endif
11671+
11672+ return 1;
11673+}
11674+
11675+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11676+{
11677+ unsigned long i;
11678+
11679+ printk(KERN_ERR "PAX: bytes at PC: ");
11680+ for (i = 0; i < 8; i++) {
11681+ unsigned int c;
11682+ if (get_user(c, (unsigned int *)pc+i))
11683+ printk(KERN_CONT "???????? ");
11684+ else
11685+ printk(KERN_CONT "%08x ", c);
11686+ }
11687+ printk("\n");
11688+}
11689+#endif
11690+
11691 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11692 int text_fault)
11693 {
11694@@ -226,6 +500,24 @@ good_area:
11695 if (!(vma->vm_flags & VM_WRITE))
11696 goto bad_area;
11697 } else {
11698+
11699+#ifdef CONFIG_PAX_PAGEEXEC
11700+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11701+ up_read(&mm->mmap_sem);
11702+ switch (pax_handle_fetch_fault(regs)) {
11703+
11704+#ifdef CONFIG_PAX_EMUPLT
11705+ case 2:
11706+ case 3:
11707+ return;
11708+#endif
11709+
11710+ }
11711+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11712+ do_group_exit(SIGKILL);
11713+ }
11714+#endif
11715+
11716 /* Allow reads even for write-only mappings */
11717 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11718 goto bad_area;
11719diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11720index 4798232..f76e3aa 100644
11721--- a/arch/sparc/mm/fault_64.c
11722+++ b/arch/sparc/mm/fault_64.c
11723@@ -22,6 +22,9 @@
11724 #include <linux/kdebug.h>
11725 #include <linux/percpu.h>
11726 #include <linux/context_tracking.h>
11727+#include <linux/slab.h>
11728+#include <linux/pagemap.h>
11729+#include <linux/compiler.h>
11730
11731 #include <asm/page.h>
11732 #include <asm/pgtable.h>
11733@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11734 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11735 regs->tpc);
11736 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11737- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11738+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11739 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11740 dump_stack();
11741 unhandled_fault(regs->tpc, current, regs);
11742@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11743 show_regs(regs);
11744 }
11745
11746+#ifdef CONFIG_PAX_PAGEEXEC
11747+#ifdef CONFIG_PAX_DLRESOLVE
11748+static void pax_emuplt_close(struct vm_area_struct *vma)
11749+{
11750+ vma->vm_mm->call_dl_resolve = 0UL;
11751+}
11752+
11753+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11754+{
11755+ unsigned int *kaddr;
11756+
11757+ vmf->page = alloc_page(GFP_HIGHUSER);
11758+ if (!vmf->page)
11759+ return VM_FAULT_OOM;
11760+
11761+ kaddr = kmap(vmf->page);
11762+ memset(kaddr, 0, PAGE_SIZE);
11763+ kaddr[0] = 0x9DE3BFA8U; /* save */
11764+ flush_dcache_page(vmf->page);
11765+ kunmap(vmf->page);
11766+ return VM_FAULT_MAJOR;
11767+}
11768+
11769+static const struct vm_operations_struct pax_vm_ops = {
11770+ .close = pax_emuplt_close,
11771+ .fault = pax_emuplt_fault
11772+};
11773+
11774+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11775+{
11776+ int ret;
11777+
11778+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11779+ vma->vm_mm = current->mm;
11780+ vma->vm_start = addr;
11781+ vma->vm_end = addr + PAGE_SIZE;
11782+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11783+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11784+ vma->vm_ops = &pax_vm_ops;
11785+
11786+ ret = insert_vm_struct(current->mm, vma);
11787+ if (ret)
11788+ return ret;
11789+
11790+ ++current->mm->total_vm;
11791+ return 0;
11792+}
11793+#endif
11794+
11795+/*
11796+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11797+ *
11798+ * returns 1 when task should be killed
11799+ * 2 when patched PLT trampoline was detected
11800+ * 3 when unpatched PLT trampoline was detected
11801+ */
11802+static int pax_handle_fetch_fault(struct pt_regs *regs)
11803+{
11804+
11805+#ifdef CONFIG_PAX_EMUPLT
11806+ int err;
11807+
11808+ do { /* PaX: patched PLT emulation #1 */
11809+ unsigned int sethi1, sethi2, jmpl;
11810+
11811+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11812+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11814+
11815+ if (err)
11816+ break;
11817+
11818+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11819+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11820+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11821+ {
11822+ unsigned long addr;
11823+
11824+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11825+ addr = regs->u_regs[UREG_G1];
11826+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11827+
11828+ if (test_thread_flag(TIF_32BIT))
11829+ addr &= 0xFFFFFFFFUL;
11830+
11831+ regs->tpc = addr;
11832+ regs->tnpc = addr+4;
11833+ return 2;
11834+ }
11835+ } while (0);
11836+
11837+ do { /* PaX: patched PLT emulation #2 */
11838+ unsigned int ba;
11839+
11840+ err = get_user(ba, (unsigned int *)regs->tpc);
11841+
11842+ if (err)
11843+ break;
11844+
11845+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11846+ unsigned long addr;
11847+
11848+ if ((ba & 0xFFC00000U) == 0x30800000U)
11849+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11850+ else
11851+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11852+
11853+ if (test_thread_flag(TIF_32BIT))
11854+ addr &= 0xFFFFFFFFUL;
11855+
11856+ regs->tpc = addr;
11857+ regs->tnpc = addr+4;
11858+ return 2;
11859+ }
11860+ } while (0);
11861+
11862+ do { /* PaX: patched PLT emulation #3 */
11863+ unsigned int sethi, bajmpl, nop;
11864+
11865+ err = get_user(sethi, (unsigned int *)regs->tpc);
11866+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11867+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11868+
11869+ if (err)
11870+ break;
11871+
11872+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11873+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ addr = (sethi & 0x003FFFFFU) << 10;
11879+ regs->u_regs[UREG_G1] = addr;
11880+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11881+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11882+ else
11883+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11884+
11885+ if (test_thread_flag(TIF_32BIT))
11886+ addr &= 0xFFFFFFFFUL;
11887+
11888+ regs->tpc = addr;
11889+ regs->tnpc = addr+4;
11890+ return 2;
11891+ }
11892+ } while (0);
11893+
11894+ do { /* PaX: patched PLT emulation #4 */
11895+ unsigned int sethi, mov1, call, mov2;
11896+
11897+ err = get_user(sethi, (unsigned int *)regs->tpc);
11898+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11899+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11900+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11901+
11902+ if (err)
11903+ break;
11904+
11905+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11906+ mov1 == 0x8210000FU &&
11907+ (call & 0xC0000000U) == 0x40000000U &&
11908+ mov2 == 0x9E100001U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11913+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11914+
11915+ if (test_thread_flag(TIF_32BIT))
11916+ addr &= 0xFFFFFFFFUL;
11917+
11918+ regs->tpc = addr;
11919+ regs->tnpc = addr+4;
11920+ return 2;
11921+ }
11922+ } while (0);
11923+
11924+ do { /* PaX: patched PLT emulation #5 */
11925+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11926+
11927+ err = get_user(sethi, (unsigned int *)regs->tpc);
11928+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11929+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11930+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11931+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11932+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11933+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11934+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11935+
11936+ if (err)
11937+ break;
11938+
11939+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11940+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11941+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11942+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11943+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11944+ sllx == 0x83287020U &&
11945+ jmpl == 0x81C04005U &&
11946+ nop == 0x01000000U)
11947+ {
11948+ unsigned long addr;
11949+
11950+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11951+ regs->u_regs[UREG_G1] <<= 32;
11952+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11953+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11954+ regs->tpc = addr;
11955+ regs->tnpc = addr+4;
11956+ return 2;
11957+ }
11958+ } while (0);
11959+
11960+ do { /* PaX: patched PLT emulation #6 */
11961+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11962+
11963+ err = get_user(sethi, (unsigned int *)regs->tpc);
11964+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11965+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11966+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11967+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11968+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11969+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11970+
11971+ if (err)
11972+ break;
11973+
11974+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11975+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11976+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11977+ sllx == 0x83287020U &&
11978+ (or & 0xFFFFE000U) == 0x8A116000U &&
11979+ jmpl == 0x81C04005U &&
11980+ nop == 0x01000000U)
11981+ {
11982+ unsigned long addr;
11983+
11984+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11985+ regs->u_regs[UREG_G1] <<= 32;
11986+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11987+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11988+ regs->tpc = addr;
11989+ regs->tnpc = addr+4;
11990+ return 2;
11991+ }
11992+ } while (0);
11993+
11994+ do { /* PaX: unpatched PLT emulation step 1 */
11995+ unsigned int sethi, ba, nop;
11996+
11997+ err = get_user(sethi, (unsigned int *)regs->tpc);
11998+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11999+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12000+
12001+ if (err)
12002+ break;
12003+
12004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12005+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12006+ nop == 0x01000000U)
12007+ {
12008+ unsigned long addr;
12009+ unsigned int save, call;
12010+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12011+
12012+ if ((ba & 0xFFC00000U) == 0x30800000U)
12013+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12014+ else
12015+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12016+
12017+ if (test_thread_flag(TIF_32BIT))
12018+ addr &= 0xFFFFFFFFUL;
12019+
12020+ err = get_user(save, (unsigned int *)addr);
12021+ err |= get_user(call, (unsigned int *)(addr+4));
12022+ err |= get_user(nop, (unsigned int *)(addr+8));
12023+ if (err)
12024+ break;
12025+
12026+#ifdef CONFIG_PAX_DLRESOLVE
12027+ if (save == 0x9DE3BFA8U &&
12028+ (call & 0xC0000000U) == 0x40000000U &&
12029+ nop == 0x01000000U)
12030+ {
12031+ struct vm_area_struct *vma;
12032+ unsigned long call_dl_resolve;
12033+
12034+ down_read(&current->mm->mmap_sem);
12035+ call_dl_resolve = current->mm->call_dl_resolve;
12036+ up_read(&current->mm->mmap_sem);
12037+ if (likely(call_dl_resolve))
12038+ goto emulate;
12039+
12040+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12041+
12042+ down_write(&current->mm->mmap_sem);
12043+ if (current->mm->call_dl_resolve) {
12044+ call_dl_resolve = current->mm->call_dl_resolve;
12045+ up_write(&current->mm->mmap_sem);
12046+ if (vma)
12047+ kmem_cache_free(vm_area_cachep, vma);
12048+ goto emulate;
12049+ }
12050+
12051+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12052+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12053+ up_write(&current->mm->mmap_sem);
12054+ if (vma)
12055+ kmem_cache_free(vm_area_cachep, vma);
12056+ return 1;
12057+ }
12058+
12059+ if (pax_insert_vma(vma, call_dl_resolve)) {
12060+ up_write(&current->mm->mmap_sem);
12061+ kmem_cache_free(vm_area_cachep, vma);
12062+ return 1;
12063+ }
12064+
12065+ current->mm->call_dl_resolve = call_dl_resolve;
12066+ up_write(&current->mm->mmap_sem);
12067+
12068+emulate:
12069+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12070+ regs->tpc = call_dl_resolve;
12071+ regs->tnpc = addr+4;
12072+ return 3;
12073+ }
12074+#endif
12075+
12076+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12077+ if ((save & 0xFFC00000U) == 0x05000000U &&
12078+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12079+ nop == 0x01000000U)
12080+ {
12081+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12082+ regs->u_regs[UREG_G2] = addr + 4;
12083+ addr = (save & 0x003FFFFFU) << 10;
12084+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12085+
12086+ if (test_thread_flag(TIF_32BIT))
12087+ addr &= 0xFFFFFFFFUL;
12088+
12089+ regs->tpc = addr;
12090+ regs->tnpc = addr+4;
12091+ return 3;
12092+ }
12093+
12094+ /* PaX: 64-bit PLT stub */
12095+ err = get_user(sethi1, (unsigned int *)addr);
12096+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12097+ err |= get_user(or1, (unsigned int *)(addr+8));
12098+ err |= get_user(or2, (unsigned int *)(addr+12));
12099+ err |= get_user(sllx, (unsigned int *)(addr+16));
12100+ err |= get_user(add, (unsigned int *)(addr+20));
12101+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12102+ err |= get_user(nop, (unsigned int *)(addr+28));
12103+ if (err)
12104+ break;
12105+
12106+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12107+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12108+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12109+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12110+ sllx == 0x89293020U &&
12111+ add == 0x8A010005U &&
12112+ jmpl == 0x89C14000U &&
12113+ nop == 0x01000000U)
12114+ {
12115+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12116+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12117+ regs->u_regs[UREG_G4] <<= 32;
12118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12119+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12120+ regs->u_regs[UREG_G4] = addr + 24;
12121+ addr = regs->u_regs[UREG_G5];
12122+ regs->tpc = addr;
12123+ regs->tnpc = addr+4;
12124+ return 3;
12125+ }
12126+ }
12127+ } while (0);
12128+
12129+#ifdef CONFIG_PAX_DLRESOLVE
12130+ do { /* PaX: unpatched PLT emulation step 2 */
12131+ unsigned int save, call, nop;
12132+
12133+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12134+ err |= get_user(call, (unsigned int *)regs->tpc);
12135+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12136+ if (err)
12137+ break;
12138+
12139+ if (save == 0x9DE3BFA8U &&
12140+ (call & 0xC0000000U) == 0x40000000U &&
12141+ nop == 0x01000000U)
12142+ {
12143+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12144+
12145+ if (test_thread_flag(TIF_32BIT))
12146+ dl_resolve &= 0xFFFFFFFFUL;
12147+
12148+ regs->u_regs[UREG_RETPC] = regs->tpc;
12149+ regs->tpc = dl_resolve;
12150+ regs->tnpc = dl_resolve+4;
12151+ return 3;
12152+ }
12153+ } while (0);
12154+#endif
12155+
12156+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12157+ unsigned int sethi, ba, nop;
12158+
12159+ err = get_user(sethi, (unsigned int *)regs->tpc);
12160+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12161+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12162+
12163+ if (err)
12164+ break;
12165+
12166+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12167+ (ba & 0xFFF00000U) == 0x30600000U &&
12168+ nop == 0x01000000U)
12169+ {
12170+ unsigned long addr;
12171+
12172+ addr = (sethi & 0x003FFFFFU) << 10;
12173+ regs->u_regs[UREG_G1] = addr;
12174+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12175+
12176+ if (test_thread_flag(TIF_32BIT))
12177+ addr &= 0xFFFFFFFFUL;
12178+
12179+ regs->tpc = addr;
12180+ regs->tnpc = addr+4;
12181+ return 2;
12182+ }
12183+ } while (0);
12184+
12185+#endif
12186+
12187+ return 1;
12188+}
12189+
12190+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12191+{
12192+ unsigned long i;
12193+
12194+ printk(KERN_ERR "PAX: bytes at PC: ");
12195+ for (i = 0; i < 8; i++) {
12196+ unsigned int c;
12197+ if (get_user(c, (unsigned int *)pc+i))
12198+ printk(KERN_CONT "???????? ");
12199+ else
12200+ printk(KERN_CONT "%08x ", c);
12201+ }
12202+ printk("\n");
12203+}
12204+#endif
12205+
12206 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12207 {
12208 enum ctx_state prev_state = exception_enter();
12209@@ -353,6 +816,29 @@ retry:
12210 if (!vma)
12211 goto bad_area;
12212
12213+#ifdef CONFIG_PAX_PAGEEXEC
12214+ /* PaX: detect ITLB misses on non-exec pages */
12215+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12216+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12217+ {
12218+ if (address != regs->tpc)
12219+ goto good_area;
12220+
12221+ up_read(&mm->mmap_sem);
12222+ switch (pax_handle_fetch_fault(regs)) {
12223+
12224+#ifdef CONFIG_PAX_EMUPLT
12225+ case 2:
12226+ case 3:
12227+ return;
12228+#endif
12229+
12230+ }
12231+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12232+ do_group_exit(SIGKILL);
12233+ }
12234+#endif
12235+
12236 /* Pure DTLB misses do not tell us whether the fault causing
12237 * load/store/atomic was a write or not, it only says that there
12238 * was no match. So in such a case we (carefully) read the
12239diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12240index d329537..2c3746a 100644
12241--- a/arch/sparc/mm/hugetlbpage.c
12242+++ b/arch/sparc/mm/hugetlbpage.c
12243@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12244 unsigned long addr,
12245 unsigned long len,
12246 unsigned long pgoff,
12247- unsigned long flags)
12248+ unsigned long flags,
12249+ unsigned long offset)
12250 {
12251+ struct mm_struct *mm = current->mm;
12252 unsigned long task_size = TASK_SIZE;
12253 struct vm_unmapped_area_info info;
12254
12255@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12256
12257 info.flags = 0;
12258 info.length = len;
12259- info.low_limit = TASK_UNMAPPED_BASE;
12260+ info.low_limit = mm->mmap_base;
12261 info.high_limit = min(task_size, VA_EXCLUDE_START);
12262 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12263 info.align_offset = 0;
12264+ info.threadstack_offset = offset;
12265 addr = vm_unmapped_area(&info);
12266
12267 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12268 VM_BUG_ON(addr != -ENOMEM);
12269 info.low_limit = VA_EXCLUDE_END;
12270+
12271+#ifdef CONFIG_PAX_RANDMMAP
12272+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12273+ info.low_limit += mm->delta_mmap;
12274+#endif
12275+
12276 info.high_limit = task_size;
12277 addr = vm_unmapped_area(&info);
12278 }
12279@@ -55,7 +64,8 @@ static unsigned long
12280 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12281 const unsigned long len,
12282 const unsigned long pgoff,
12283- const unsigned long flags)
12284+ const unsigned long flags,
12285+ const unsigned long offset)
12286 {
12287 struct mm_struct *mm = current->mm;
12288 unsigned long addr = addr0;
12289@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12290 info.high_limit = mm->mmap_base;
12291 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12292 info.align_offset = 0;
12293+ info.threadstack_offset = offset;
12294 addr = vm_unmapped_area(&info);
12295
12296 /*
12297@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12298 VM_BUG_ON(addr != -ENOMEM);
12299 info.flags = 0;
12300 info.low_limit = TASK_UNMAPPED_BASE;
12301+
12302+#ifdef CONFIG_PAX_RANDMMAP
12303+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12304+ info.low_limit += mm->delta_mmap;
12305+#endif
12306+
12307 info.high_limit = STACK_TOP32;
12308 addr = vm_unmapped_area(&info);
12309 }
12310@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12311 struct mm_struct *mm = current->mm;
12312 struct vm_area_struct *vma;
12313 unsigned long task_size = TASK_SIZE;
12314+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12315
12316 if (test_thread_flag(TIF_32BIT))
12317 task_size = STACK_TOP32;
12318@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12319 return addr;
12320 }
12321
12322+#ifdef CONFIG_PAX_RANDMMAP
12323+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12324+#endif
12325+
12326 if (addr) {
12327 addr = ALIGN(addr, HPAGE_SIZE);
12328 vma = find_vma(mm, addr);
12329- if (task_size - len >= addr &&
12330- (!vma || addr + len <= vma->vm_start))
12331+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12332 return addr;
12333 }
12334 if (mm->get_unmapped_area == arch_get_unmapped_area)
12335 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12336- pgoff, flags);
12337+ pgoff, flags, offset);
12338 else
12339 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12340- pgoff, flags);
12341+ pgoff, flags, offset);
12342 }
12343
12344 pte_t *huge_pte_alloc(struct mm_struct *mm,
12345diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12346index 3ea267c..93f0659 100644
12347--- a/arch/sparc/mm/init_64.c
12348+++ b/arch/sparc/mm/init_64.c
12349@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12350 int num_kernel_image_mappings;
12351
12352 #ifdef CONFIG_DEBUG_DCFLUSH
12353-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12354+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12355 #ifdef CONFIG_SMP
12356-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12357+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12358 #endif
12359 #endif
12360
12361@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12362 {
12363 BUG_ON(tlb_type == hypervisor);
12364 #ifdef CONFIG_DEBUG_DCFLUSH
12365- atomic_inc(&dcpage_flushes);
12366+ atomic_inc_unchecked(&dcpage_flushes);
12367 #endif
12368
12369 #ifdef DCACHE_ALIASING_POSSIBLE
12370@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12371
12372 #ifdef CONFIG_DEBUG_DCFLUSH
12373 seq_printf(m, "DCPageFlushes\t: %d\n",
12374- atomic_read(&dcpage_flushes));
12375+ atomic_read_unchecked(&dcpage_flushes));
12376 #ifdef CONFIG_SMP
12377 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12378- atomic_read(&dcpage_flushes_xcall));
12379+ atomic_read_unchecked(&dcpage_flushes_xcall));
12380 #endif /* CONFIG_SMP */
12381 #endif /* CONFIG_DEBUG_DCFLUSH */
12382 }
12383diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12384index 7cca418..53fc030 100644
12385--- a/arch/tile/Kconfig
12386+++ b/arch/tile/Kconfig
12387@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12388
12389 config KEXEC
12390 bool "kexec system call"
12391+ depends on !GRKERNSEC_KMEM
12392 ---help---
12393 kexec is a system call that implements the ability to shutdown your
12394 current kernel, and to start another kernel. It is like a reboot
12395diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12396index 7b11c5f..755a026 100644
12397--- a/arch/tile/include/asm/atomic_64.h
12398+++ b/arch/tile/include/asm/atomic_64.h
12399@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12400
12401 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12402
12403+#define atomic64_read_unchecked(v) atomic64_read(v)
12404+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12405+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12406+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12407+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12408+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12409+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12410+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12411+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12412+
12413 /* Define this to indicate that cmpxchg is an efficient operation. */
12414 #define __HAVE_ARCH_CMPXCHG
12415
12416diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12417index 6160761..00cac88 100644
12418--- a/arch/tile/include/asm/cache.h
12419+++ b/arch/tile/include/asm/cache.h
12420@@ -15,11 +15,12 @@
12421 #ifndef _ASM_TILE_CACHE_H
12422 #define _ASM_TILE_CACHE_H
12423
12424+#include <linux/const.h>
12425 #include <arch/chip.h>
12426
12427 /* bytes per L1 data cache line */
12428 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 /* bytes per L2 cache line */
12433 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12434diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12435index b6cde32..c0cb736 100644
12436--- a/arch/tile/include/asm/uaccess.h
12437+++ b/arch/tile/include/asm/uaccess.h
12438@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12439 const void __user *from,
12440 unsigned long n)
12441 {
12442- int sz = __compiletime_object_size(to);
12443+ size_t sz = __compiletime_object_size(to);
12444
12445- if (likely(sz == -1 || sz >= n))
12446+ if (likely(sz == (size_t)-1 || sz >= n))
12447 n = _copy_from_user(to, from, n);
12448 else
12449 copy_from_user_overflow();
12450diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12451index 3270e00..a77236e 100644
12452--- a/arch/tile/mm/hugetlbpage.c
12453+++ b/arch/tile/mm/hugetlbpage.c
12454@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12455 info.high_limit = TASK_SIZE;
12456 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12457 info.align_offset = 0;
12458+ info.threadstack_offset = 0;
12459 return vm_unmapped_area(&info);
12460 }
12461
12462@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12463 info.high_limit = current->mm->mmap_base;
12464 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12465 info.align_offset = 0;
12466+ info.threadstack_offset = 0;
12467 addr = vm_unmapped_area(&info);
12468
12469 /*
12470diff --git a/arch/um/Makefile b/arch/um/Makefile
12471index e4b1a96..16162f8 100644
12472--- a/arch/um/Makefile
12473+++ b/arch/um/Makefile
12474@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12475 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12476 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12477
12478+ifdef CONSTIFY_PLUGIN
12479+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12480+endif
12481+
12482 #This will adjust *FLAGS accordingly to the platform.
12483 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12484
12485diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12486index 19e1bdd..3665b77 100644
12487--- a/arch/um/include/asm/cache.h
12488+++ b/arch/um/include/asm/cache.h
12489@@ -1,6 +1,7 @@
12490 #ifndef __UM_CACHE_H
12491 #define __UM_CACHE_H
12492
12493+#include <linux/const.h>
12494
12495 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12496 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12497@@ -12,6 +13,6 @@
12498 # define L1_CACHE_SHIFT 5
12499 #endif
12500
12501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12503
12504 #endif
12505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12506index 2e0a6b1..a64d0f5 100644
12507--- a/arch/um/include/asm/kmap_types.h
12508+++ b/arch/um/include/asm/kmap_types.h
12509@@ -8,6 +8,6 @@
12510
12511 /* No more #include "asm/arch/kmap_types.h" ! */
12512
12513-#define KM_TYPE_NR 14
12514+#define KM_TYPE_NR 15
12515
12516 #endif
12517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12518index 71c5d13..4c7b9f1 100644
12519--- a/arch/um/include/asm/page.h
12520+++ b/arch/um/include/asm/page.h
12521@@ -14,6 +14,9 @@
12522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12523 #define PAGE_MASK (~(PAGE_SIZE-1))
12524
12525+#define ktla_ktva(addr) (addr)
12526+#define ktva_ktla(addr) (addr)
12527+
12528 #ifndef __ASSEMBLY__
12529
12530 struct page;
12531diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12532index 0032f92..cd151e0 100644
12533--- a/arch/um/include/asm/pgtable-3level.h
12534+++ b/arch/um/include/asm/pgtable-3level.h
12535@@ -58,6 +58,7 @@
12536 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12537 #define pud_populate(mm, pud, pmd) \
12538 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12539+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12540
12541 #ifdef CONFIG_64BIT
12542 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12543diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12544index f17bca8..48adb87 100644
12545--- a/arch/um/kernel/process.c
12546+++ b/arch/um/kernel/process.c
12547@@ -356,22 +356,6 @@ int singlestepping(void * t)
12548 return 2;
12549 }
12550
12551-/*
12552- * Only x86 and x86_64 have an arch_align_stack().
12553- * All other arches have "#define arch_align_stack(x) (x)"
12554- * in their asm/exec.h
12555- * As this is included in UML from asm-um/system-generic.h,
12556- * we can use it to behave as the subarch does.
12557- */
12558-#ifndef arch_align_stack
12559-unsigned long arch_align_stack(unsigned long sp)
12560-{
12561- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12562- sp -= get_random_int() % 8192;
12563- return sp & ~0xf;
12564-}
12565-#endif
12566-
12567 unsigned long get_wchan(struct task_struct *p)
12568 {
12569 unsigned long stack_page, sp, ip;
12570diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12571index ad8f795..2c7eec6 100644
12572--- a/arch/unicore32/include/asm/cache.h
12573+++ b/arch/unicore32/include/asm/cache.h
12574@@ -12,8 +12,10 @@
12575 #ifndef __UNICORE_CACHE_H__
12576 #define __UNICORE_CACHE_H__
12577
12578-#define L1_CACHE_SHIFT (5)
12579-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12580+#include <linux/const.h>
12581+
12582+#define L1_CACHE_SHIFT 5
12583+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12584
12585 /*
12586 * Memory returned by kmalloc() may be used for DMA, so we must make
12587diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12588index 0dc9d01..98df103 100644
12589--- a/arch/x86/Kconfig
12590+++ b/arch/x86/Kconfig
12591@@ -130,7 +130,7 @@ config X86
12592 select RTC_LIB
12593 select HAVE_DEBUG_STACKOVERFLOW
12594 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12595- select HAVE_CC_STACKPROTECTOR
12596+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12597 select GENERIC_CPU_AUTOPROBE
12598 select HAVE_ARCH_AUDITSYSCALL
12599 select ARCH_SUPPORTS_ATOMIC_RMW
12600@@ -263,7 +263,7 @@ config X86_HT
12601
12602 config X86_32_LAZY_GS
12603 def_bool y
12604- depends on X86_32 && !CC_STACKPROTECTOR
12605+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12606
12607 config ARCH_HWEIGHT_CFLAGS
12608 string
12609@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12610
12611 menuconfig HYPERVISOR_GUEST
12612 bool "Linux guest support"
12613+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12614 ---help---
12615 Say Y here to enable options for running Linux under various hyper-
12616 visors. This option enables basic hypervisor detection and platform
12617@@ -978,6 +979,7 @@ config VM86
12618
12619 config X86_16BIT
12620 bool "Enable support for 16-bit segments" if EXPERT
12621+ depends on !GRKERNSEC
12622 default y
12623 ---help---
12624 This option is required by programs like Wine to run 16-bit
12625@@ -1151,6 +1153,7 @@ choice
12626
12627 config NOHIGHMEM
12628 bool "off"
12629+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12630 ---help---
12631 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12632 However, the address space of 32-bit x86 processors is only 4
12633@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12634
12635 config HIGHMEM4G
12636 bool "4GB"
12637+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12638 ---help---
12639 Select this if you have a 32-bit processor and between 1 and 4
12640 gigabytes of physical RAM.
12641@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12642 hex
12643 default 0xB0000000 if VMSPLIT_3G_OPT
12644 default 0x80000000 if VMSPLIT_2G
12645- default 0x78000000 if VMSPLIT_2G_OPT
12646+ default 0x70000000 if VMSPLIT_2G_OPT
12647 default 0x40000000 if VMSPLIT_1G
12648 default 0xC0000000
12649 depends on X86_32
12650@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12651
12652 config KEXEC
12653 bool "kexec system call"
12654+ depends on !GRKERNSEC_KMEM
12655 ---help---
12656 kexec is a system call that implements the ability to shutdown your
12657 current kernel, and to start another kernel. It is like a reboot
12658@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12659
12660 config PHYSICAL_ALIGN
12661 hex "Alignment value to which kernel should be aligned"
12662- default "0x200000"
12663+ default "0x1000000"
12664+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12665+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12666 range 0x2000 0x1000000 if X86_32
12667 range 0x200000 0x1000000 if X86_64
12668 ---help---
12669@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12670 def_bool n
12671 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12672 depends on X86_32 || IA32_EMULATION
12673+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12674 ---help---
12675 Certain buggy versions of glibc will crash if they are
12676 presented with a 32-bit vDSO that is not mapped at the address
12677diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12678index 6983314..54ad7e8 100644
12679--- a/arch/x86/Kconfig.cpu
12680+++ b/arch/x86/Kconfig.cpu
12681@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12682
12683 config X86_F00F_BUG
12684 def_bool y
12685- depends on M586MMX || M586TSC || M586 || M486
12686+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12687
12688 config X86_INVD_BUG
12689 def_bool y
12690@@ -327,7 +327,7 @@ config X86_INVD_BUG
12691
12692 config X86_ALIGNMENT_16
12693 def_bool y
12694- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12695+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12696
12697 config X86_INTEL_USERCOPY
12698 def_bool y
12699@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12700 # generates cmov.
12701 config X86_CMOV
12702 def_bool y
12703- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12704+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12705
12706 config X86_MINIMUM_CPU_FAMILY
12707 int
12708diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12709index 61bd2ad..50b625d 100644
12710--- a/arch/x86/Kconfig.debug
12711+++ b/arch/x86/Kconfig.debug
12712@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12713 config DEBUG_RODATA
12714 bool "Write protect kernel read-only data structures"
12715 default y
12716- depends on DEBUG_KERNEL
12717+ depends on DEBUG_KERNEL && BROKEN
12718 ---help---
12719 Mark the kernel read-only data as write-protected in the pagetables,
12720 in order to catch accidental (and incorrect) writes to such const
12721@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12722
12723 config DEBUG_SET_MODULE_RONX
12724 bool "Set loadable kernel module data as NX and text as RO"
12725- depends on MODULES
12726+ depends on MODULES && BROKEN
12727 ---help---
12728 This option helps catch unintended modifications to loadable
12729 kernel module's text and read-only data. It also prevents execution
12730diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12731index 920e616..ac3d4df 100644
12732--- a/arch/x86/Makefile
12733+++ b/arch/x86/Makefile
12734@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12735 # CPU-specific tuning. Anything which can be shared with UML should go here.
12736 include $(srctree)/arch/x86/Makefile_32.cpu
12737 KBUILD_CFLAGS += $(cflags-y)
12738-
12739- # temporary until string.h is fixed
12740- KBUILD_CFLAGS += -ffreestanding
12741 else
12742 BITS := 64
12743 UTS_MACHINE := x86_64
12744@@ -107,6 +104,9 @@ else
12745 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12746 endif
12747
12748+# temporary until string.h is fixed
12749+KBUILD_CFLAGS += -ffreestanding
12750+
12751 # Make sure compiler does not have buggy stack-protector support.
12752 ifdef CONFIG_CC_STACKPROTECTOR
12753 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12754@@ -180,6 +180,7 @@ archheaders:
12755 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12756
12757 archprepare:
12758+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12759 ifeq ($(CONFIG_KEXEC_FILE),y)
12760 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12761 endif
12762@@ -263,3 +264,9 @@ define archhelp
12763 echo ' FDARGS="..." arguments for the booted kernel'
12764 echo ' FDINITRD=file initrd for the booted kernel'
12765 endef
12766+
12767+define OLD_LD
12768+
12769+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12770+*** Please upgrade your binutils to 2.18 or newer
12771+endef
12772diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12773index 3db07f3..9d81d0f 100644
12774--- a/arch/x86/boot/Makefile
12775+++ b/arch/x86/boot/Makefile
12776@@ -56,6 +56,9 @@ clean-files += cpustr.h
12777 # ---------------------------------------------------------------------------
12778
12779 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12780+ifdef CONSTIFY_PLUGIN
12781+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12782+endif
12783 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12784 GCOV_PROFILE := n
12785
12786diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12787index 878e4b9..20537ab 100644
12788--- a/arch/x86/boot/bitops.h
12789+++ b/arch/x86/boot/bitops.h
12790@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12791 u8 v;
12792 const u32 *p = (const u32 *)addr;
12793
12794- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12795+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12796 return v;
12797 }
12798
12799@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12800
12801 static inline void set_bit(int nr, void *addr)
12802 {
12803- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12804+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12805 }
12806
12807 #endif /* BOOT_BITOPS_H */
12808diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12809index bd49ec6..94c7f58 100644
12810--- a/arch/x86/boot/boot.h
12811+++ b/arch/x86/boot/boot.h
12812@@ -84,7 +84,7 @@ static inline void io_delay(void)
12813 static inline u16 ds(void)
12814 {
12815 u16 seg;
12816- asm("movw %%ds,%0" : "=rm" (seg));
12817+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12818 return seg;
12819 }
12820
12821diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12822index 8bd44e8..6b111e9 100644
12823--- a/arch/x86/boot/compressed/Makefile
12824+++ b/arch/x86/boot/compressed/Makefile
12825@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12826 KBUILD_CFLAGS += -mno-mmx -mno-sse
12827 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12828 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12829+ifdef CONSTIFY_PLUGIN
12830+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12831+endif
12832
12833 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12834 GCOV_PROFILE := n
12835diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12836index a53440e..c3dbf1e 100644
12837--- a/arch/x86/boot/compressed/efi_stub_32.S
12838+++ b/arch/x86/boot/compressed/efi_stub_32.S
12839@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12840 * parameter 2, ..., param n. To make things easy, we save the return
12841 * address of efi_call_phys in a global variable.
12842 */
12843- popl %ecx
12844- movl %ecx, saved_return_addr(%edx)
12845- /* get the function pointer into ECX*/
12846- popl %ecx
12847- movl %ecx, efi_rt_function_ptr(%edx)
12848+ popl saved_return_addr(%edx)
12849+ popl efi_rt_function_ptr(%edx)
12850
12851 /*
12852 * 3. Call the physical function.
12853 */
12854- call *%ecx
12855+ call *efi_rt_function_ptr(%edx)
12856
12857 /*
12858 * 4. Balance the stack. And because EAX contain the return value,
12859@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12860 1: popl %edx
12861 subl $1b, %edx
12862
12863- movl efi_rt_function_ptr(%edx), %ecx
12864- pushl %ecx
12865+ pushl efi_rt_function_ptr(%edx)
12866
12867 /*
12868 * 10. Push the saved return address onto the stack and return.
12869 */
12870- movl saved_return_addr(%edx), %ecx
12871- pushl %ecx
12872- ret
12873+ jmpl *saved_return_addr(%edx)
12874 ENDPROC(efi_call_phys)
12875 .previous
12876
12877diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12878index 630384a..278e788 100644
12879--- a/arch/x86/boot/compressed/efi_thunk_64.S
12880+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12881@@ -189,8 +189,8 @@ efi_gdt64:
12882 .long 0 /* Filled out by user */
12883 .word 0
12884 .quad 0x0000000000000000 /* NULL descriptor */
12885- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12886- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12887+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12888+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12889 .quad 0x0080890000000000 /* TS descriptor */
12890 .quad 0x0000000000000000 /* TS continued */
12891 efi_gdt64_end:
12892diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12893index 1d7fbbc..36ecd58 100644
12894--- a/arch/x86/boot/compressed/head_32.S
12895+++ b/arch/x86/boot/compressed/head_32.S
12896@@ -140,10 +140,10 @@ preferred_addr:
12897 addl %eax, %ebx
12898 notl %eax
12899 andl %eax, %ebx
12900- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12901+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12902 jge 1f
12903 #endif
12904- movl $LOAD_PHYSICAL_ADDR, %ebx
12905+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12906 1:
12907
12908 /* Target address to relocate to for decompression */
12909diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12910index 6b1766c..ad465c9 100644
12911--- a/arch/x86/boot/compressed/head_64.S
12912+++ b/arch/x86/boot/compressed/head_64.S
12913@@ -94,10 +94,10 @@ ENTRY(startup_32)
12914 addl %eax, %ebx
12915 notl %eax
12916 andl %eax, %ebx
12917- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12918+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12919 jge 1f
12920 #endif
12921- movl $LOAD_PHYSICAL_ADDR, %ebx
12922+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12923 1:
12924
12925 /* Target address to relocate to for decompression */
12926@@ -322,10 +322,10 @@ preferred_addr:
12927 addq %rax, %rbp
12928 notq %rax
12929 andq %rax, %rbp
12930- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12931+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12932 jge 1f
12933 #endif
12934- movq $LOAD_PHYSICAL_ADDR, %rbp
12935+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12936 1:
12937
12938 /* Target address to relocate to for decompression */
12939@@ -434,8 +434,8 @@ gdt:
12940 .long gdt
12941 .word 0
12942 .quad 0x0000000000000000 /* NULL descriptor */
12943- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12944- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12945+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12946+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12947 .quad 0x0080890000000000 /* TS descriptor */
12948 .quad 0x0000000000000000 /* TS continued */
12949 gdt_end:
12950diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12951index a950864..c710239 100644
12952--- a/arch/x86/boot/compressed/misc.c
12953+++ b/arch/x86/boot/compressed/misc.c
12954@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12955 * Calculate the delta between where vmlinux was linked to load
12956 * and where it was actually loaded.
12957 */
12958- delta = min_addr - LOAD_PHYSICAL_ADDR;
12959+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12960 if (!delta) {
12961 debug_putstr("No relocation needed... ");
12962 return;
12963@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12964 Elf32_Ehdr ehdr;
12965 Elf32_Phdr *phdrs, *phdr;
12966 #endif
12967- void *dest;
12968+ void *dest, *prev;
12969 int i;
12970
12971 memcpy(&ehdr, output, sizeof(ehdr));
12972@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12973 case PT_LOAD:
12974 #ifdef CONFIG_RELOCATABLE
12975 dest = output;
12976- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12977+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12978 #else
12979 dest = (void *)(phdr->p_paddr);
12980 #endif
12981 memcpy(dest,
12982 output + phdr->p_offset,
12983 phdr->p_filesz);
12984+ if (i)
12985+ memset(prev, 0xff, dest - prev);
12986+ prev = dest + phdr->p_filesz;
12987 break;
12988 default: /* Ignore other PT_* */ break;
12989 }
12990@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12991 error("Destination address too large");
12992 #endif
12993 #ifndef CONFIG_RELOCATABLE
12994- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12995+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12996 error("Wrong destination address");
12997 #endif
12998
12999diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13000index 1fd7d57..0f7d096 100644
13001--- a/arch/x86/boot/cpucheck.c
13002+++ b/arch/x86/boot/cpucheck.c
13003@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13004 u32 ecx = MSR_K7_HWCR;
13005 u32 eax, edx;
13006
13007- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13008+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13009 eax &= ~(1 << 15);
13010- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13011+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13012
13013 get_cpuflags(); /* Make sure it really did something */
13014 err = check_cpuflags();
13015@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13016 u32 ecx = MSR_VIA_FCR;
13017 u32 eax, edx;
13018
13019- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13020+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13021 eax |= (1<<1)|(1<<7);
13022- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13023+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13024
13025 set_bit(X86_FEATURE_CX8, cpu.flags);
13026 err = check_cpuflags();
13027@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13028 u32 eax, edx;
13029 u32 level = 1;
13030
13031- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13032- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13033- asm("cpuid"
13034+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13035+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13036+ asm volatile("cpuid"
13037 : "+a" (level), "=d" (cpu.flags[0])
13038 : : "ecx", "ebx");
13039- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13040+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13041
13042 err = check_cpuflags();
13043 } else if (err == 0x01 &&
13044diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13045index 16ef025..91e033b 100644
13046--- a/arch/x86/boot/header.S
13047+++ b/arch/x86/boot/header.S
13048@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13049 # single linked list of
13050 # struct setup_data
13051
13052-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13053+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13054
13055 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13057+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13058+#else
13059 #define VO_INIT_SIZE (VO__end - VO__text)
13060+#endif
13061 #if ZO_INIT_SIZE > VO_INIT_SIZE
13062 #define INIT_SIZE ZO_INIT_SIZE
13063 #else
13064diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13065index db75d07..8e6d0af 100644
13066--- a/arch/x86/boot/memory.c
13067+++ b/arch/x86/boot/memory.c
13068@@ -19,7 +19,7 @@
13069
13070 static int detect_memory_e820(void)
13071 {
13072- int count = 0;
13073+ unsigned int count = 0;
13074 struct biosregs ireg, oreg;
13075 struct e820entry *desc = boot_params.e820_map;
13076 static struct e820entry buf; /* static so it is zeroed */
13077diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13078index ba3e100..6501b8f 100644
13079--- a/arch/x86/boot/video-vesa.c
13080+++ b/arch/x86/boot/video-vesa.c
13081@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13082
13083 boot_params.screen_info.vesapm_seg = oreg.es;
13084 boot_params.screen_info.vesapm_off = oreg.di;
13085+ boot_params.screen_info.vesapm_size = oreg.cx;
13086 }
13087
13088 /*
13089diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13090index 43eda28..5ab5fdb 100644
13091--- a/arch/x86/boot/video.c
13092+++ b/arch/x86/boot/video.c
13093@@ -96,7 +96,7 @@ static void store_mode_params(void)
13094 static unsigned int get_entry(void)
13095 {
13096 char entry_buf[4];
13097- int i, len = 0;
13098+ unsigned int i, len = 0;
13099 int key;
13100 unsigned int v;
13101
13102diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13103index 9105655..41779c1 100644
13104--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13105+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13106@@ -8,6 +8,8 @@
13107 * including this sentence is retained in full.
13108 */
13109
13110+#include <asm/alternative-asm.h>
13111+
13112 .extern crypto_ft_tab
13113 .extern crypto_it_tab
13114 .extern crypto_fl_tab
13115@@ -70,6 +72,8 @@
13116 je B192; \
13117 leaq 32(r9),r9;
13118
13119+#define ret pax_force_retaddr; ret
13120+
13121 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13122 movq r1,r2; \
13123 movq r3,r4; \
13124diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13125index 477e9d7..c92c7d8 100644
13126--- a/arch/x86/crypto/aesni-intel_asm.S
13127+++ b/arch/x86/crypto/aesni-intel_asm.S
13128@@ -31,6 +31,7 @@
13129
13130 #include <linux/linkage.h>
13131 #include <asm/inst.h>
13132+#include <asm/alternative-asm.h>
13133
13134 #ifdef __x86_64__
13135 .data
13136@@ -205,7 +206,7 @@ enc: .octa 0x2
13137 * num_initial_blocks = b mod 4
13138 * encrypt the initial num_initial_blocks blocks and apply ghash on
13139 * the ciphertext
13140-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13141+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13142 * are clobbered
13143 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13144 */
13145@@ -214,8 +215,8 @@ enc: .octa 0x2
13146 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13147 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13148 mov arg7, %r10 # %r10 = AAD
13149- mov arg8, %r12 # %r12 = aadLen
13150- mov %r12, %r11
13151+ mov arg8, %r15 # %r15 = aadLen
13152+ mov %r15, %r11
13153 pxor %xmm\i, %xmm\i
13154 _get_AAD_loop\num_initial_blocks\operation:
13155 movd (%r10), \TMP1
13156@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13157 psrldq $4, %xmm\i
13158 pxor \TMP1, %xmm\i
13159 add $4, %r10
13160- sub $4, %r12
13161+ sub $4, %r15
13162 jne _get_AAD_loop\num_initial_blocks\operation
13163 cmp $16, %r11
13164 je _get_AAD_loop2_done\num_initial_blocks\operation
13165- mov $16, %r12
13166+ mov $16, %r15
13167 _get_AAD_loop2\num_initial_blocks\operation:
13168 psrldq $4, %xmm\i
13169- sub $4, %r12
13170- cmp %r11, %r12
13171+ sub $4, %r15
13172+ cmp %r11, %r15
13173 jne _get_AAD_loop2\num_initial_blocks\operation
13174 _get_AAD_loop2_done\num_initial_blocks\operation:
13175 movdqa SHUF_MASK(%rip), %xmm14
13176@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13177 * num_initial_blocks = b mod 4
13178 * encrypt the initial num_initial_blocks blocks and apply ghash on
13179 * the ciphertext
13180-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13181+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13182 * are clobbered
13183 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13184 */
13185@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13186 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13187 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13188 mov arg7, %r10 # %r10 = AAD
13189- mov arg8, %r12 # %r12 = aadLen
13190- mov %r12, %r11
13191+ mov arg8, %r15 # %r15 = aadLen
13192+ mov %r15, %r11
13193 pxor %xmm\i, %xmm\i
13194 _get_AAD_loop\num_initial_blocks\operation:
13195 movd (%r10), \TMP1
13196@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13197 psrldq $4, %xmm\i
13198 pxor \TMP1, %xmm\i
13199 add $4, %r10
13200- sub $4, %r12
13201+ sub $4, %r15
13202 jne _get_AAD_loop\num_initial_blocks\operation
13203 cmp $16, %r11
13204 je _get_AAD_loop2_done\num_initial_blocks\operation
13205- mov $16, %r12
13206+ mov $16, %r15
13207 _get_AAD_loop2\num_initial_blocks\operation:
13208 psrldq $4, %xmm\i
13209- sub $4, %r12
13210- cmp %r11, %r12
13211+ sub $4, %r15
13212+ cmp %r11, %r15
13213 jne _get_AAD_loop2\num_initial_blocks\operation
13214 _get_AAD_loop2_done\num_initial_blocks\operation:
13215 movdqa SHUF_MASK(%rip), %xmm14
13216@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13217 *
13218 *****************************************************************************/
13219 ENTRY(aesni_gcm_dec)
13220- push %r12
13221+ push %r15
13222 push %r13
13223 push %r14
13224 mov %rsp, %r14
13225@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13226 */
13227 sub $VARIABLE_OFFSET, %rsp
13228 and $~63, %rsp # align rsp to 64 bytes
13229- mov %arg6, %r12
13230- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13231+ mov %arg6, %r15
13232+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13233 movdqa SHUF_MASK(%rip), %xmm2
13234 PSHUFB_XMM %xmm2, %xmm13
13235
13236@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13237 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13238 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13239 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13240- mov %r13, %r12
13241- and $(3<<4), %r12
13242+ mov %r13, %r15
13243+ and $(3<<4), %r15
13244 jz _initial_num_blocks_is_0_decrypt
13245- cmp $(2<<4), %r12
13246+ cmp $(2<<4), %r15
13247 jb _initial_num_blocks_is_1_decrypt
13248 je _initial_num_blocks_is_2_decrypt
13249 _initial_num_blocks_is_3_decrypt:
13250@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13251 sub $16, %r11
13252 add %r13, %r11
13253 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13254- lea SHIFT_MASK+16(%rip), %r12
13255- sub %r13, %r12
13256+ lea SHIFT_MASK+16(%rip), %r15
13257+ sub %r13, %r15
13258 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13259 # (%r13 is the number of bytes in plaintext mod 16)
13260- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13261+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13262 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13263
13264 movdqa %xmm1, %xmm2
13265 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13266- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13267+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13268 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13269 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13270 pand %xmm1, %xmm2
13271@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13272 sub $1, %r13
13273 jne _less_than_8_bytes_left_decrypt
13274 _multiple_of_16_bytes_decrypt:
13275- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13276- shl $3, %r12 # convert into number of bits
13277- movd %r12d, %xmm15 # len(A) in %xmm15
13278+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13279+ shl $3, %r15 # convert into number of bits
13280+ movd %r15d, %xmm15 # len(A) in %xmm15
13281 shl $3, %arg4 # len(C) in bits (*128)
13282 MOVQ_R64_XMM %arg4, %xmm1
13283 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13284@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13285 mov %r14, %rsp
13286 pop %r14
13287 pop %r13
13288- pop %r12
13289+ pop %r15
13290+ pax_force_retaddr
13291 ret
13292 ENDPROC(aesni_gcm_dec)
13293
13294@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13295 * poly = x^128 + x^127 + x^126 + x^121 + 1
13296 ***************************************************************************/
13297 ENTRY(aesni_gcm_enc)
13298- push %r12
13299+ push %r15
13300 push %r13
13301 push %r14
13302 mov %rsp, %r14
13303@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13304 #
13305 sub $VARIABLE_OFFSET, %rsp
13306 and $~63, %rsp
13307- mov %arg6, %r12
13308- movdqu (%r12), %xmm13
13309+ mov %arg6, %r15
13310+ movdqu (%r15), %xmm13
13311 movdqa SHUF_MASK(%rip), %xmm2
13312 PSHUFB_XMM %xmm2, %xmm13
13313
13314@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13315 movdqa %xmm13, HashKey(%rsp)
13316 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13317 and $-16, %r13
13318- mov %r13, %r12
13319+ mov %r13, %r15
13320
13321 # Encrypt first few blocks
13322
13323- and $(3<<4), %r12
13324+ and $(3<<4), %r15
13325 jz _initial_num_blocks_is_0_encrypt
13326- cmp $(2<<4), %r12
13327+ cmp $(2<<4), %r15
13328 jb _initial_num_blocks_is_1_encrypt
13329 je _initial_num_blocks_is_2_encrypt
13330 _initial_num_blocks_is_3_encrypt:
13331@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13332 sub $16, %r11
13333 add %r13, %r11
13334 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13335- lea SHIFT_MASK+16(%rip), %r12
13336- sub %r13, %r12
13337+ lea SHIFT_MASK+16(%rip), %r15
13338+ sub %r13, %r15
13339 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13340 # (%r13 is the number of bytes in plaintext mod 16)
13341- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13342+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13343 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13344 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13345- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13346+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13347 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13348 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13349 movdqa SHUF_MASK(%rip), %xmm10
13350@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13351 sub $1, %r13
13352 jne _less_than_8_bytes_left_encrypt
13353 _multiple_of_16_bytes_encrypt:
13354- mov arg8, %r12 # %r12 = addLen (number of bytes)
13355- shl $3, %r12
13356- movd %r12d, %xmm15 # len(A) in %xmm15
13357+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13358+ shl $3, %r15
13359+ movd %r15d, %xmm15 # len(A) in %xmm15
13360 shl $3, %arg4 # len(C) in bits (*128)
13361 MOVQ_R64_XMM %arg4, %xmm1
13362 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13363@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13364 mov %r14, %rsp
13365 pop %r14
13366 pop %r13
13367- pop %r12
13368+ pop %r15
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_gcm_enc)
13372
13373@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13374 pxor %xmm1, %xmm0
13375 movaps %xmm0, (TKEYP)
13376 add $0x10, TKEYP
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_key_expansion_128)
13380 ENDPROC(_key_expansion_256a)
13381@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13382 shufps $0b01001110, %xmm2, %xmm1
13383 movaps %xmm1, 0x10(TKEYP)
13384 add $0x20, TKEYP
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_key_expansion_192a)
13388
13389@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13390
13391 movaps %xmm0, (TKEYP)
13392 add $0x10, TKEYP
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(_key_expansion_192b)
13396
13397@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13398 pxor %xmm1, %xmm2
13399 movaps %xmm2, (TKEYP)
13400 add $0x10, TKEYP
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(_key_expansion_256b)
13404
13405@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13406 #ifndef __x86_64__
13407 popl KEYP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_set_key)
13412
13413@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13414 popl KLEN
13415 popl KEYP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_enc)
13420
13421@@ -1974,6 +1983,7 @@ _aesni_enc1:
13422 AESENC KEY STATE
13423 movaps 0x70(TKEYP), KEY
13424 AESENCLAST KEY STATE
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_enc1)
13428
13429@@ -2083,6 +2093,7 @@ _aesni_enc4:
13430 AESENCLAST KEY STATE2
13431 AESENCLAST KEY STATE3
13432 AESENCLAST KEY STATE4
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_enc4)
13436
13437@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13438 popl KLEN
13439 popl KEYP
13440 #endif
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_dec)
13444
13445@@ -2164,6 +2176,7 @@ _aesni_dec1:
13446 AESDEC KEY STATE
13447 movaps 0x70(TKEYP), KEY
13448 AESDECLAST KEY STATE
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(_aesni_dec1)
13452
13453@@ -2273,6 +2286,7 @@ _aesni_dec4:
13454 AESDECLAST KEY STATE2
13455 AESDECLAST KEY STATE3
13456 AESDECLAST KEY STATE4
13457+ pax_force_retaddr
13458 ret
13459 ENDPROC(_aesni_dec4)
13460
13461@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13462 popl KEYP
13463 popl LEN
13464 #endif
13465+ pax_force_retaddr
13466 ret
13467 ENDPROC(aesni_ecb_enc)
13468
13469@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13470 popl KEYP
13471 popl LEN
13472 #endif
13473+ pax_force_retaddr
13474 ret
13475 ENDPROC(aesni_ecb_dec)
13476
13477@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13478 popl LEN
13479 popl IVP
13480 #endif
13481+ pax_force_retaddr
13482 ret
13483 ENDPROC(aesni_cbc_enc)
13484
13485@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13486 popl LEN
13487 popl IVP
13488 #endif
13489+ pax_force_retaddr
13490 ret
13491 ENDPROC(aesni_cbc_dec)
13492
13493@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13494 mov $1, TCTR_LOW
13495 MOVQ_R64_XMM TCTR_LOW INC
13496 MOVQ_R64_XMM CTR TCTR_LOW
13497+ pax_force_retaddr
13498 ret
13499 ENDPROC(_aesni_inc_init)
13500
13501@@ -2579,6 +2598,7 @@ _aesni_inc:
13502 .Linc_low:
13503 movaps CTR, IV
13504 PSHUFB_XMM BSWAP_MASK IV
13505+ pax_force_retaddr
13506 ret
13507 ENDPROC(_aesni_inc)
13508
13509@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13510 .Lctr_enc_ret:
13511 movups IV, (IVP)
13512 .Lctr_enc_just_ret:
13513+ pax_force_retaddr
13514 ret
13515 ENDPROC(aesni_ctr_enc)
13516
13517@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13518 pxor INC, STATE4
13519 movdqu STATE4, 0x70(OUTP)
13520
13521+ pax_force_retaddr
13522 ret
13523 ENDPROC(aesni_xts_crypt8)
13524
13525diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13526index 246c670..466e2d6 100644
13527--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13528+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13529@@ -21,6 +21,7 @@
13530 */
13531
13532 #include <linux/linkage.h>
13533+#include <asm/alternative-asm.h>
13534
13535 .file "blowfish-x86_64-asm.S"
13536 .text
13537@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13538 jnz .L__enc_xor;
13539
13540 write_block();
13541+ pax_force_retaddr
13542 ret;
13543 .L__enc_xor:
13544 xor_block();
13545+ pax_force_retaddr
13546 ret;
13547 ENDPROC(__blowfish_enc_blk)
13548
13549@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13550
13551 movq %r11, %rbp;
13552
13553+ pax_force_retaddr
13554 ret;
13555 ENDPROC(blowfish_dec_blk)
13556
13557@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13558
13559 popq %rbx;
13560 popq %rbp;
13561+ pax_force_retaddr
13562 ret;
13563
13564 .L__enc_xor4:
13565@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13566
13567 popq %rbx;
13568 popq %rbp;
13569+ pax_force_retaddr
13570 ret;
13571 ENDPROC(__blowfish_enc_blk_4way)
13572
13573@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13574 popq %rbx;
13575 popq %rbp;
13576
13577+ pax_force_retaddr
13578 ret;
13579 ENDPROC(blowfish_dec_blk_4way)
13580diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13581index ce71f92..1dce7ec 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13584@@ -16,6 +16,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13594 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13602 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13609 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13610 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13617 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13618 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13625 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13626 %xmm8, %rsi);
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_16way)
13631
13632@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13633 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13634 %xmm8, %rsi);
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_16way)
13639
13640@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13641 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13642 %xmm8, %rsi);
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_16way)
13647
13648@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13649 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13650 %xmm8, %rsi);
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_16way)
13655
13656@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13657 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13658 %xmm8, %rsi);
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_16way)
13663
13664diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13665index 0e0b886..5a3123c 100644
13666--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13667+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13668@@ -11,6 +11,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 #define CAMELLIA_TABLE_BYTE_LEN 272
13675
13676@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13677 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13678 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13679 %rcx, (%r9));
13680+ pax_force_retaddr
13681 ret;
13682 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13683
13684@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13685 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13686 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13687 %rax, (%r9));
13688+ pax_force_retaddr
13689 ret;
13690 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13691
13692@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13693 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13694 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13695
13696+ pax_force_retaddr
13697 ret;
13698
13699 .align 8
13700@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13701 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13702 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13703
13704+ pax_force_retaddr
13705 ret;
13706
13707 .align 8
13708@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13709
13710 vzeroupper;
13711
13712+ pax_force_retaddr
13713 ret;
13714 ENDPROC(camellia_ecb_enc_32way)
13715
13716@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13717
13718 vzeroupper;
13719
13720+ pax_force_retaddr
13721 ret;
13722 ENDPROC(camellia_ecb_dec_32way)
13723
13724@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13725
13726 vzeroupper;
13727
13728+ pax_force_retaddr
13729 ret;
13730 ENDPROC(camellia_cbc_dec_32way)
13731
13732@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13733
13734 vzeroupper;
13735
13736+ pax_force_retaddr
13737 ret;
13738 ENDPROC(camellia_ctr_32way)
13739
13740@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13741
13742 vzeroupper;
13743
13744+ pax_force_retaddr
13745 ret;
13746 ENDPROC(camellia_xts_crypt_32way)
13747
13748diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13749index 310319c..db3d7b5 100644
13750--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13751+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13752@@ -21,6 +21,7 @@
13753 */
13754
13755 #include <linux/linkage.h>
13756+#include <asm/alternative-asm.h>
13757
13758 .file "camellia-x86_64-asm_64.S"
13759 .text
13760@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13761 enc_outunpack(mov, RT1);
13762
13763 movq RRBP, %rbp;
13764+ pax_force_retaddr
13765 ret;
13766
13767 .L__enc_xor:
13768 enc_outunpack(xor, RT1);
13769
13770 movq RRBP, %rbp;
13771+ pax_force_retaddr
13772 ret;
13773 ENDPROC(__camellia_enc_blk)
13774
13775@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13776 dec_outunpack();
13777
13778 movq RRBP, %rbp;
13779+ pax_force_retaddr
13780 ret;
13781 ENDPROC(camellia_dec_blk)
13782
13783@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13784
13785 movq RRBP, %rbp;
13786 popq %rbx;
13787+ pax_force_retaddr
13788 ret;
13789
13790 .L__enc2_xor:
13791@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13792
13793 movq RRBP, %rbp;
13794 popq %rbx;
13795+ pax_force_retaddr
13796 ret;
13797 ENDPROC(__camellia_enc_blk_2way)
13798
13799@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13800
13801 movq RRBP, %rbp;
13802 movq RXOR, %rbx;
13803+ pax_force_retaddr
13804 ret;
13805 ENDPROC(camellia_dec_blk_2way)
13806diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13807index c35fd5d..2d8c7db 100644
13808--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13809+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13810@@ -24,6 +24,7 @@
13811 */
13812
13813 #include <linux/linkage.h>
13814+#include <asm/alternative-asm.h>
13815
13816 .file "cast5-avx-x86_64-asm_64.S"
13817
13818@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13819 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13820 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13821
13822+ pax_force_retaddr
13823 ret;
13824 ENDPROC(__cast5_enc_blk16)
13825
13826@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13827 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13828 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13829
13830+ pax_force_retaddr
13831 ret;
13832
13833 .L__skip_dec:
13834@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13835 vmovdqu RR4, (6*4*4)(%r11);
13836 vmovdqu RL4, (7*4*4)(%r11);
13837
13838+ pax_force_retaddr
13839 ret;
13840 ENDPROC(cast5_ecb_enc_16way)
13841
13842@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13843 vmovdqu RR4, (6*4*4)(%r11);
13844 vmovdqu RL4, (7*4*4)(%r11);
13845
13846+ pax_force_retaddr
13847 ret;
13848 ENDPROC(cast5_ecb_dec_16way)
13849
13850@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13851 * %rdx: src
13852 */
13853
13854- pushq %r12;
13855+ pushq %r14;
13856
13857 movq %rsi, %r11;
13858- movq %rdx, %r12;
13859+ movq %rdx, %r14;
13860
13861 vmovdqu (0*16)(%rdx), RL1;
13862 vmovdqu (1*16)(%rdx), RR1;
13863@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13864 call __cast5_dec_blk16;
13865
13866 /* xor with src */
13867- vmovq (%r12), RX;
13868+ vmovq (%r14), RX;
13869 vpshufd $0x4f, RX, RX;
13870 vpxor RX, RR1, RR1;
13871- vpxor 0*16+8(%r12), RL1, RL1;
13872- vpxor 1*16+8(%r12), RR2, RR2;
13873- vpxor 2*16+8(%r12), RL2, RL2;
13874- vpxor 3*16+8(%r12), RR3, RR3;
13875- vpxor 4*16+8(%r12), RL3, RL3;
13876- vpxor 5*16+8(%r12), RR4, RR4;
13877- vpxor 6*16+8(%r12), RL4, RL4;
13878+ vpxor 0*16+8(%r14), RL1, RL1;
13879+ vpxor 1*16+8(%r14), RR2, RR2;
13880+ vpxor 2*16+8(%r14), RL2, RL2;
13881+ vpxor 3*16+8(%r14), RR3, RR3;
13882+ vpxor 4*16+8(%r14), RL3, RL3;
13883+ vpxor 5*16+8(%r14), RR4, RR4;
13884+ vpxor 6*16+8(%r14), RL4, RL4;
13885
13886 vmovdqu RR1, (0*16)(%r11);
13887 vmovdqu RL1, (1*16)(%r11);
13888@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13889 vmovdqu RR4, (6*16)(%r11);
13890 vmovdqu RL4, (7*16)(%r11);
13891
13892- popq %r12;
13893+ popq %r14;
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(cast5_cbc_dec_16way)
13898
13899@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13900 * %rcx: iv (big endian, 64bit)
13901 */
13902
13903- pushq %r12;
13904+ pushq %r14;
13905
13906 movq %rsi, %r11;
13907- movq %rdx, %r12;
13908+ movq %rdx, %r14;
13909
13910 vpcmpeqd RTMP, RTMP, RTMP;
13911 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13912@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13913 call __cast5_enc_blk16;
13914
13915 /* dst = src ^ iv */
13916- vpxor (0*16)(%r12), RR1, RR1;
13917- vpxor (1*16)(%r12), RL1, RL1;
13918- vpxor (2*16)(%r12), RR2, RR2;
13919- vpxor (3*16)(%r12), RL2, RL2;
13920- vpxor (4*16)(%r12), RR3, RR3;
13921- vpxor (5*16)(%r12), RL3, RL3;
13922- vpxor (6*16)(%r12), RR4, RR4;
13923- vpxor (7*16)(%r12), RL4, RL4;
13924+ vpxor (0*16)(%r14), RR1, RR1;
13925+ vpxor (1*16)(%r14), RL1, RL1;
13926+ vpxor (2*16)(%r14), RR2, RR2;
13927+ vpxor (3*16)(%r14), RL2, RL2;
13928+ vpxor (4*16)(%r14), RR3, RR3;
13929+ vpxor (5*16)(%r14), RL3, RL3;
13930+ vpxor (6*16)(%r14), RR4, RR4;
13931+ vpxor (7*16)(%r14), RL4, RL4;
13932 vmovdqu RR1, (0*16)(%r11);
13933 vmovdqu RL1, (1*16)(%r11);
13934 vmovdqu RR2, (2*16)(%r11);
13935@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13936 vmovdqu RR4, (6*16)(%r11);
13937 vmovdqu RL4, (7*16)(%r11);
13938
13939- popq %r12;
13940+ popq %r14;
13941
13942+ pax_force_retaddr
13943 ret;
13944 ENDPROC(cast5_ctr_16way)
13945diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13946index e3531f8..e123f35 100644
13947--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13948+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13949@@ -24,6 +24,7 @@
13950 */
13951
13952 #include <linux/linkage.h>
13953+#include <asm/alternative-asm.h>
13954 #include "glue_helper-asm-avx.S"
13955
13956 .file "cast6-avx-x86_64-asm_64.S"
13957@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13958 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13959 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13960
13961+ pax_force_retaddr
13962 ret;
13963 ENDPROC(__cast6_enc_blk8)
13964
13965@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13966 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13967 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13968
13969+ pax_force_retaddr
13970 ret;
13971 ENDPROC(__cast6_dec_blk8)
13972
13973@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13974
13975 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13976
13977+ pax_force_retaddr
13978 ret;
13979 ENDPROC(cast6_ecb_enc_8way)
13980
13981@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13982
13983 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13984
13985+ pax_force_retaddr
13986 ret;
13987 ENDPROC(cast6_ecb_dec_8way)
13988
13989@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13990 * %rdx: src
13991 */
13992
13993- pushq %r12;
13994+ pushq %r14;
13995
13996 movq %rsi, %r11;
13997- movq %rdx, %r12;
13998+ movq %rdx, %r14;
13999
14000 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14001
14002 call __cast6_dec_blk8;
14003
14004- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14005+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14006
14007- popq %r12;
14008+ popq %r14;
14009
14010+ pax_force_retaddr
14011 ret;
14012 ENDPROC(cast6_cbc_dec_8way)
14013
14014@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14015 * %rcx: iv (little endian, 128bit)
14016 */
14017
14018- pushq %r12;
14019+ pushq %r14;
14020
14021 movq %rsi, %r11;
14022- movq %rdx, %r12;
14023+ movq %rdx, %r14;
14024
14025 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14026 RD2, RX, RKR, RKM);
14027
14028 call __cast6_enc_blk8;
14029
14030- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14031+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14032
14033- popq %r12;
14034+ popq %r14;
14035
14036+ pax_force_retaddr
14037 ret;
14038 ENDPROC(cast6_ctr_8way)
14039
14040@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14041 /* dst <= regs xor IVs(in dst) */
14042 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14043
14044+ pax_force_retaddr
14045 ret;
14046 ENDPROC(cast6_xts_enc_8way)
14047
14048@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14049 /* dst <= regs xor IVs(in dst) */
14050 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14051
14052+ pax_force_retaddr
14053 ret;
14054 ENDPROC(cast6_xts_dec_8way)
14055diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14056index 26d49eb..c0a8c84 100644
14057--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14058+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14059@@ -45,6 +45,7 @@
14060
14061 #include <asm/inst.h>
14062 #include <linux/linkage.h>
14063+#include <asm/alternative-asm.h>
14064
14065 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14066
14067@@ -309,6 +310,7 @@ do_return:
14068 popq %rsi
14069 popq %rdi
14070 popq %rbx
14071+ pax_force_retaddr
14072 ret
14073
14074 ################################################################
14075diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14076index 5d1e007..098cb4f 100644
14077--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14078+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14079@@ -18,6 +18,7 @@
14080
14081 #include <linux/linkage.h>
14082 #include <asm/inst.h>
14083+#include <asm/alternative-asm.h>
14084
14085 .data
14086
14087@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14088 psrlq $1, T2
14089 pxor T2, T1
14090 pxor T1, DATA
14091+ pax_force_retaddr
14092 ret
14093 ENDPROC(__clmul_gf128mul_ble)
14094
14095@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14096 call __clmul_gf128mul_ble
14097 PSHUFB_XMM BSWAP DATA
14098 movups DATA, (%rdi)
14099+ pax_force_retaddr
14100 ret
14101 ENDPROC(clmul_ghash_mul)
14102
14103@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14104 PSHUFB_XMM BSWAP DATA
14105 movups DATA, (%rdi)
14106 .Lupdate_just_ret:
14107+ pax_force_retaddr
14108 ret
14109 ENDPROC(clmul_ghash_update)
14110diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14111index 9279e0b..c4b3d2c 100644
14112--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14113+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14114@@ -1,4 +1,5 @@
14115 #include <linux/linkage.h>
14116+#include <asm/alternative-asm.h>
14117
14118 # enter salsa20_encrypt_bytes
14119 ENTRY(salsa20_encrypt_bytes)
14120@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14121 add %r11,%rsp
14122 mov %rdi,%rax
14123 mov %rsi,%rdx
14124+ pax_force_retaddr
14125 ret
14126 # bytesatleast65:
14127 ._bytesatleast65:
14128@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14129 add %r11,%rsp
14130 mov %rdi,%rax
14131 mov %rsi,%rdx
14132+ pax_force_retaddr
14133 ret
14134 ENDPROC(salsa20_keysetup)
14135
14136@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14137 add %r11,%rsp
14138 mov %rdi,%rax
14139 mov %rsi,%rdx
14140+ pax_force_retaddr
14141 ret
14142 ENDPROC(salsa20_ivsetup)
14143diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14144index 2f202f4..d9164d6 100644
14145--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14146+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14147@@ -24,6 +24,7 @@
14148 */
14149
14150 #include <linux/linkage.h>
14151+#include <asm/alternative-asm.h>
14152 #include "glue_helper-asm-avx.S"
14153
14154 .file "serpent-avx-x86_64-asm_64.S"
14155@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14156 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14157 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14158
14159+ pax_force_retaddr
14160 ret;
14161 ENDPROC(__serpent_enc_blk8_avx)
14162
14163@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14164 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14165 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14166
14167+ pax_force_retaddr
14168 ret;
14169 ENDPROC(__serpent_dec_blk8_avx)
14170
14171@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14172
14173 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14174
14175+ pax_force_retaddr
14176 ret;
14177 ENDPROC(serpent_ecb_enc_8way_avx)
14178
14179@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14180
14181 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14182
14183+ pax_force_retaddr
14184 ret;
14185 ENDPROC(serpent_ecb_dec_8way_avx)
14186
14187@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14188
14189 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14190
14191+ pax_force_retaddr
14192 ret;
14193 ENDPROC(serpent_cbc_dec_8way_avx)
14194
14195@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14196
14197 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14198
14199+ pax_force_retaddr
14200 ret;
14201 ENDPROC(serpent_ctr_8way_avx)
14202
14203@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14204 /* dst <= regs xor IVs(in dst) */
14205 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14206
14207+ pax_force_retaddr
14208 ret;
14209 ENDPROC(serpent_xts_enc_8way_avx)
14210
14211@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14212 /* dst <= regs xor IVs(in dst) */
14213 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14214
14215+ pax_force_retaddr
14216 ret;
14217 ENDPROC(serpent_xts_dec_8way_avx)
14218diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14219index b222085..abd483c 100644
14220--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14221+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14222@@ -15,6 +15,7 @@
14223 */
14224
14225 #include <linux/linkage.h>
14226+#include <asm/alternative-asm.h>
14227 #include "glue_helper-asm-avx2.S"
14228
14229 .file "serpent-avx2-asm_64.S"
14230@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14231 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14232 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14233
14234+ pax_force_retaddr
14235 ret;
14236 ENDPROC(__serpent_enc_blk16)
14237
14238@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14239 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14240 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14241
14242+ pax_force_retaddr
14243 ret;
14244 ENDPROC(__serpent_dec_blk16)
14245
14246@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14247
14248 vzeroupper;
14249
14250+ pax_force_retaddr
14251 ret;
14252 ENDPROC(serpent_ecb_enc_16way)
14253
14254@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14255
14256 vzeroupper;
14257
14258+ pax_force_retaddr
14259 ret;
14260 ENDPROC(serpent_ecb_dec_16way)
14261
14262@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14263
14264 vzeroupper;
14265
14266+ pax_force_retaddr
14267 ret;
14268 ENDPROC(serpent_cbc_dec_16way)
14269
14270@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14271
14272 vzeroupper;
14273
14274+ pax_force_retaddr
14275 ret;
14276 ENDPROC(serpent_ctr_16way)
14277
14278@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14279
14280 vzeroupper;
14281
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(serpent_xts_enc_16way)
14285
14286@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14287
14288 vzeroupper;
14289
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(serpent_xts_dec_16way)
14293diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14294index acc066c..1559cc4 100644
14295--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14296+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14297@@ -25,6 +25,7 @@
14298 */
14299
14300 #include <linux/linkage.h>
14301+#include <asm/alternative-asm.h>
14302
14303 .file "serpent-sse2-x86_64-asm_64.S"
14304 .text
14305@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14306 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14307 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14308
14309+ pax_force_retaddr
14310 ret;
14311
14312 .L__enc_xor8:
14313 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14314 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14315
14316+ pax_force_retaddr
14317 ret;
14318 ENDPROC(__serpent_enc_blk_8way)
14319
14320@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14321 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14322 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14323
14324+ pax_force_retaddr
14325 ret;
14326 ENDPROC(serpent_dec_blk_8way)
14327diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14328index a410950..9dfe7ad 100644
14329--- a/arch/x86/crypto/sha1_ssse3_asm.S
14330+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14331@@ -29,6 +29,7 @@
14332 */
14333
14334 #include <linux/linkage.h>
14335+#include <asm/alternative-asm.h>
14336
14337 #define CTX %rdi // arg1
14338 #define BUF %rsi // arg2
14339@@ -75,9 +76,9 @@
14340
14341 push %rbx
14342 push %rbp
14343- push %r12
14344+ push %r14
14345
14346- mov %rsp, %r12
14347+ mov %rsp, %r14
14348 sub $64, %rsp # allocate workspace
14349 and $~15, %rsp # align stack
14350
14351@@ -99,11 +100,12 @@
14352 xor %rax, %rax
14353 rep stosq
14354
14355- mov %r12, %rsp # deallocate workspace
14356+ mov %r14, %rsp # deallocate workspace
14357
14358- pop %r12
14359+ pop %r14
14360 pop %rbp
14361 pop %rbx
14362+ pax_force_retaddr
14363 ret
14364
14365 ENDPROC(\name)
14366diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14367index 642f156..51a513c 100644
14368--- a/arch/x86/crypto/sha256-avx-asm.S
14369+++ b/arch/x86/crypto/sha256-avx-asm.S
14370@@ -49,6 +49,7 @@
14371
14372 #ifdef CONFIG_AS_AVX
14373 #include <linux/linkage.h>
14374+#include <asm/alternative-asm.h>
14375
14376 ## assume buffers not aligned
14377 #define VMOVDQ vmovdqu
14378@@ -460,6 +461,7 @@ done_hash:
14379 popq %r13
14380 popq %rbp
14381 popq %rbx
14382+ pax_force_retaddr
14383 ret
14384 ENDPROC(sha256_transform_avx)
14385
14386diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14387index 9e86944..3795e6a 100644
14388--- a/arch/x86/crypto/sha256-avx2-asm.S
14389+++ b/arch/x86/crypto/sha256-avx2-asm.S
14390@@ -50,6 +50,7 @@
14391
14392 #ifdef CONFIG_AS_AVX2
14393 #include <linux/linkage.h>
14394+#include <asm/alternative-asm.h>
14395
14396 ## assume buffers not aligned
14397 #define VMOVDQ vmovdqu
14398@@ -720,6 +721,7 @@ done_hash:
14399 popq %r12
14400 popq %rbp
14401 popq %rbx
14402+ pax_force_retaddr
14403 ret
14404 ENDPROC(sha256_transform_rorx)
14405
14406diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14407index f833b74..8c62a9e 100644
14408--- a/arch/x86/crypto/sha256-ssse3-asm.S
14409+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14410@@ -47,6 +47,7 @@
14411 ########################################################################
14412
14413 #include <linux/linkage.h>
14414+#include <asm/alternative-asm.h>
14415
14416 ## assume buffers not aligned
14417 #define MOVDQ movdqu
14418@@ -471,6 +472,7 @@ done_hash:
14419 popq %rbp
14420 popq %rbx
14421
14422+ pax_force_retaddr
14423 ret
14424 ENDPROC(sha256_transform_ssse3)
14425
14426diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14427index 974dde9..a823ff9 100644
14428--- a/arch/x86/crypto/sha512-avx-asm.S
14429+++ b/arch/x86/crypto/sha512-avx-asm.S
14430@@ -49,6 +49,7 @@
14431
14432 #ifdef CONFIG_AS_AVX
14433 #include <linux/linkage.h>
14434+#include <asm/alternative-asm.h>
14435
14436 .text
14437
14438@@ -364,6 +365,7 @@ updateblock:
14439 mov frame_RSPSAVE(%rsp), %rsp
14440
14441 nowork:
14442+ pax_force_retaddr
14443 ret
14444 ENDPROC(sha512_transform_avx)
14445
14446diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14447index 568b961..ed20c37 100644
14448--- a/arch/x86/crypto/sha512-avx2-asm.S
14449+++ b/arch/x86/crypto/sha512-avx2-asm.S
14450@@ -51,6 +51,7 @@
14451
14452 #ifdef CONFIG_AS_AVX2
14453 #include <linux/linkage.h>
14454+#include <asm/alternative-asm.h>
14455
14456 .text
14457
14458@@ -678,6 +679,7 @@ done_hash:
14459
14460 # Restore Stack Pointer
14461 mov frame_RSPSAVE(%rsp), %rsp
14462+ pax_force_retaddr
14463 ret
14464 ENDPROC(sha512_transform_rorx)
14465
14466diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14467index fb56855..6edd768 100644
14468--- a/arch/x86/crypto/sha512-ssse3-asm.S
14469+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14470@@ -48,6 +48,7 @@
14471 ########################################################################
14472
14473 #include <linux/linkage.h>
14474+#include <asm/alternative-asm.h>
14475
14476 .text
14477
14478@@ -363,6 +364,7 @@ updateblock:
14479 mov frame_RSPSAVE(%rsp), %rsp
14480
14481 nowork:
14482+ pax_force_retaddr
14483 ret
14484 ENDPROC(sha512_transform_ssse3)
14485
14486diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14487index 0505813..b067311 100644
14488--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14489+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14490@@ -24,6 +24,7 @@
14491 */
14492
14493 #include <linux/linkage.h>
14494+#include <asm/alternative-asm.h>
14495 #include "glue_helper-asm-avx.S"
14496
14497 .file "twofish-avx-x86_64-asm_64.S"
14498@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14499 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14500 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14501
14502+ pax_force_retaddr
14503 ret;
14504 ENDPROC(__twofish_enc_blk8)
14505
14506@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14507 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14508 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14509
14510+ pax_force_retaddr
14511 ret;
14512 ENDPROC(__twofish_dec_blk8)
14513
14514@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14515
14516 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14517
14518+ pax_force_retaddr
14519 ret;
14520 ENDPROC(twofish_ecb_enc_8way)
14521
14522@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14523
14524 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14525
14526+ pax_force_retaddr
14527 ret;
14528 ENDPROC(twofish_ecb_dec_8way)
14529
14530@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14531 * %rdx: src
14532 */
14533
14534- pushq %r12;
14535+ pushq %r14;
14536
14537 movq %rsi, %r11;
14538- movq %rdx, %r12;
14539+ movq %rdx, %r14;
14540
14541 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14542
14543 call __twofish_dec_blk8;
14544
14545- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14546+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14547
14548- popq %r12;
14549+ popq %r14;
14550
14551+ pax_force_retaddr
14552 ret;
14553 ENDPROC(twofish_cbc_dec_8way)
14554
14555@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14556 * %rcx: iv (little endian, 128bit)
14557 */
14558
14559- pushq %r12;
14560+ pushq %r14;
14561
14562 movq %rsi, %r11;
14563- movq %rdx, %r12;
14564+ movq %rdx, %r14;
14565
14566 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14567 RD2, RX0, RX1, RY0);
14568
14569 call __twofish_enc_blk8;
14570
14571- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14572+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14573
14574- popq %r12;
14575+ popq %r14;
14576
14577+ pax_force_retaddr
14578 ret;
14579 ENDPROC(twofish_ctr_8way)
14580
14581@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14582 /* dst <= regs xor IVs(in dst) */
14583 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14584
14585+ pax_force_retaddr
14586 ret;
14587 ENDPROC(twofish_xts_enc_8way)
14588
14589@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14590 /* dst <= regs xor IVs(in dst) */
14591 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14592
14593+ pax_force_retaddr
14594 ret;
14595 ENDPROC(twofish_xts_dec_8way)
14596diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14597index 1c3b7ce..02f578d 100644
14598--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14599+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14600@@ -21,6 +21,7 @@
14601 */
14602
14603 #include <linux/linkage.h>
14604+#include <asm/alternative-asm.h>
14605
14606 .file "twofish-x86_64-asm-3way.S"
14607 .text
14608@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14609 popq %r13;
14610 popq %r14;
14611 popq %r15;
14612+ pax_force_retaddr
14613 ret;
14614
14615 .L__enc_xor3:
14616@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14617 popq %r13;
14618 popq %r14;
14619 popq %r15;
14620+ pax_force_retaddr
14621 ret;
14622 ENDPROC(__twofish_enc_blk_3way)
14623
14624@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14625 popq %r13;
14626 popq %r14;
14627 popq %r15;
14628+ pax_force_retaddr
14629 ret;
14630 ENDPROC(twofish_dec_blk_3way)
14631diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14632index a039d21..524b8b2 100644
14633--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14634+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14635@@ -22,6 +22,7 @@
14636
14637 #include <linux/linkage.h>
14638 #include <asm/asm-offsets.h>
14639+#include <asm/alternative-asm.h>
14640
14641 #define a_offset 0
14642 #define b_offset 4
14643@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14644
14645 popq R1
14646 movq $1,%rax
14647+ pax_force_retaddr
14648 ret
14649 ENDPROC(twofish_enc_blk)
14650
14651@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14652
14653 popq R1
14654 movq $1,%rax
14655+ pax_force_retaddr
14656 ret
14657 ENDPROC(twofish_dec_blk)
14658diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14659index ae6aad1..719d6d9 100644
14660--- a/arch/x86/ia32/ia32_aout.c
14661+++ b/arch/x86/ia32/ia32_aout.c
14662@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14663 unsigned long dump_start, dump_size;
14664 struct user32 dump;
14665
14666+ memset(&dump, 0, sizeof(dump));
14667+
14668 fs = get_fs();
14669 set_fs(KERNEL_DS);
14670 has_dumped = 1;
14671diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14672index f9e181a..300544c 100644
14673--- a/arch/x86/ia32/ia32_signal.c
14674+++ b/arch/x86/ia32/ia32_signal.c
14675@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14676 if (__get_user(set.sig[0], &frame->sc.oldmask)
14677 || (_COMPAT_NSIG_WORDS > 1
14678 && __copy_from_user((((char *) &set.sig) + 4),
14679- &frame->extramask,
14680+ frame->extramask,
14681 sizeof(frame->extramask))))
14682 goto badframe;
14683
14684@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14685 sp -= frame_size;
14686 /* Align the stack pointer according to the i386 ABI,
14687 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14688- sp = ((sp + 4) & -16ul) - 4;
14689+ sp = ((sp - 12) & -16ul) - 4;
14690 return (void __user *) sp;
14691 }
14692
14693@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14694 } else {
14695 /* Return stub is in 32bit vsyscall page */
14696 if (current->mm->context.vdso)
14697- restorer = current->mm->context.vdso +
14698- selected_vdso32->sym___kernel_sigreturn;
14699+ restorer = (void __force_user *)(current->mm->context.vdso +
14700+ selected_vdso32->sym___kernel_sigreturn);
14701 else
14702- restorer = &frame->retcode;
14703+ restorer = frame->retcode;
14704 }
14705
14706 put_user_try {
14707@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14708 * These are actually not used anymore, but left because some
14709 * gdb versions depend on them as a marker.
14710 */
14711- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14712+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14713 } put_user_catch(err);
14714
14715 if (err)
14716@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14717 0xb8,
14718 __NR_ia32_rt_sigreturn,
14719 0x80cd,
14720- 0,
14721+ 0
14722 };
14723
14724 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14725@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14726
14727 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14728 restorer = ksig->ka.sa.sa_restorer;
14729+ else if (current->mm->context.vdso)
14730+ /* Return stub is in 32bit vsyscall page */
14731+ restorer = (void __force_user *)(current->mm->context.vdso +
14732+ selected_vdso32->sym___kernel_rt_sigreturn);
14733 else
14734- restorer = current->mm->context.vdso +
14735- selected_vdso32->sym___kernel_rt_sigreturn;
14736+ restorer = frame->retcode;
14737 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14738
14739 /*
14740 * Not actually used anymore, but left because some gdb
14741 * versions need it.
14742 */
14743- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14744+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14745 } put_user_catch(err);
14746
14747 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14748diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14749index 82e8a1d..4e998d5 100644
14750--- a/arch/x86/ia32/ia32entry.S
14751+++ b/arch/x86/ia32/ia32entry.S
14752@@ -15,8 +15,10 @@
14753 #include <asm/irqflags.h>
14754 #include <asm/asm.h>
14755 #include <asm/smap.h>
14756+#include <asm/pgtable.h>
14757 #include <linux/linkage.h>
14758 #include <linux/err.h>
14759+#include <asm/alternative-asm.h>
14760
14761 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14762 #include <linux/elf-em.h>
14763@@ -62,12 +64,12 @@
14764 */
14765 .macro LOAD_ARGS32 offset, _r9=0
14766 .if \_r9
14767- movl \offset+16(%rsp),%r9d
14768+ movl \offset+R9(%rsp),%r9d
14769 .endif
14770- movl \offset+40(%rsp),%ecx
14771- movl \offset+48(%rsp),%edx
14772- movl \offset+56(%rsp),%esi
14773- movl \offset+64(%rsp),%edi
14774+ movl \offset+RCX(%rsp),%ecx
14775+ movl \offset+RDX(%rsp),%edx
14776+ movl \offset+RSI(%rsp),%esi
14777+ movl \offset+RDI(%rsp),%edi
14778 movl %eax,%eax /* zero extension */
14779 .endm
14780
14781@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14782 ENDPROC(native_irq_enable_sysexit)
14783 #endif
14784
14785+ .macro pax_enter_kernel_user
14786+ pax_set_fptr_mask
14787+#ifdef CONFIG_PAX_MEMORY_UDEREF
14788+ call pax_enter_kernel_user
14789+#endif
14790+ .endm
14791+
14792+ .macro pax_exit_kernel_user
14793+#ifdef CONFIG_PAX_MEMORY_UDEREF
14794+ call pax_exit_kernel_user
14795+#endif
14796+#ifdef CONFIG_PAX_RANDKSTACK
14797+ pushq %rax
14798+ pushq %r11
14799+ call pax_randomize_kstack
14800+ popq %r11
14801+ popq %rax
14802+#endif
14803+ .endm
14804+
14805+ .macro pax_erase_kstack
14806+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14807+ call pax_erase_kstack
14808+#endif
14809+ .endm
14810+
14811 /*
14812 * 32bit SYSENTER instruction entry.
14813 *
14814@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14815 CFI_REGISTER rsp,rbp
14816 SWAPGS_UNSAFE_STACK
14817 movq PER_CPU_VAR(kernel_stack), %rsp
14818- addq $(KERNEL_STACK_OFFSET),%rsp
14819- /*
14820- * No need to follow this irqs on/off section: the syscall
14821- * disabled irqs, here we enable it straight after entry:
14822- */
14823- ENABLE_INTERRUPTS(CLBR_NONE)
14824 movl %ebp,%ebp /* zero extension */
14825 pushq_cfi $__USER32_DS
14826 /*CFI_REL_OFFSET ss,0*/
14827@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14828 CFI_REL_OFFSET rsp,0
14829 pushfq_cfi
14830 /*CFI_REL_OFFSET rflags,0*/
14831- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14832- CFI_REGISTER rip,r10
14833+ orl $X86_EFLAGS_IF,(%rsp)
14834+ GET_THREAD_INFO(%r11)
14835+ movl TI_sysenter_return(%r11), %r11d
14836+ CFI_REGISTER rip,r11
14837 pushq_cfi $__USER32_CS
14838 /*CFI_REL_OFFSET cs,0*/
14839 movl %eax, %eax
14840- pushq_cfi %r10
14841+ pushq_cfi %r11
14842 CFI_REL_OFFSET rip,0
14843 pushq_cfi %rax
14844 cld
14845 SAVE_ARGS 0,1,0
14846+ pax_enter_kernel_user
14847+
14848+#ifdef CONFIG_PAX_RANDKSTACK
14849+ pax_erase_kstack
14850+#endif
14851+
14852+ /*
14853+ * No need to follow this irqs on/off section: the syscall
14854+ * disabled irqs, here we enable it straight after entry:
14855+ */
14856+ ENABLE_INTERRUPTS(CLBR_NONE)
14857 /* no need to do an access_ok check here because rbp has been
14858 32bit zero extended */
14859+
14860+#ifdef CONFIG_PAX_MEMORY_UDEREF
14861+ addq pax_user_shadow_base,%rbp
14862+ ASM_PAX_OPEN_USERLAND
14863+#endif
14864+
14865 ASM_STAC
14866 1: movl (%rbp),%ebp
14867 _ASM_EXTABLE(1b,ia32_badarg)
14868 ASM_CLAC
14869
14870+#ifdef CONFIG_PAX_MEMORY_UDEREF
14871+ ASM_PAX_CLOSE_USERLAND
14872+#endif
14873+
14874 /*
14875 * Sysenter doesn't filter flags, so we need to clear NT
14876 * ourselves. To save a few cycles, we can check whether
14877@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14878 jnz sysenter_fix_flags
14879 sysenter_flags_fixed:
14880
14881- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14882- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14883+ GET_THREAD_INFO(%r11)
14884+ orl $TS_COMPAT,TI_status(%r11)
14885+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14886 CFI_REMEMBER_STATE
14887 jnz sysenter_tracesys
14888 cmpq $(IA32_NR_syscalls-1),%rax
14889@@ -172,15 +218,18 @@ sysenter_do_call:
14890 sysenter_dispatch:
14891 call *ia32_sys_call_table(,%rax,8)
14892 movq %rax,RAX-ARGOFFSET(%rsp)
14893+ GET_THREAD_INFO(%r11)
14894 DISABLE_INTERRUPTS(CLBR_NONE)
14895 TRACE_IRQS_OFF
14896- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14897+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14898 jnz sysexit_audit
14899 sysexit_from_sys_call:
14900- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14901+ pax_exit_kernel_user
14902+ pax_erase_kstack
14903+ andl $~TS_COMPAT,TI_status(%r11)
14904 /* clear IF, that popfq doesn't enable interrupts early */
14905- andl $~0x200,EFLAGS-R11(%rsp)
14906- movl RIP-R11(%rsp),%edx /* User %eip */
14907+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14908+ movl RIP(%rsp),%edx /* User %eip */
14909 CFI_REGISTER rip,rdx
14910 RESTORE_ARGS 0,24,0,0,0,0
14911 xorq %r8,%r8
14912@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14913 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14914 movl %eax,%edi /* 1st arg: syscall number */
14915 call __audit_syscall_entry
14916+
14917+ pax_erase_kstack
14918+
14919 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14920 cmpq $(IA32_NR_syscalls-1),%rax
14921 ja ia32_badsys
14922@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14923 .endm
14924
14925 .macro auditsys_exit exit
14926- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14927+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14928 jnz ia32_ret_from_sys_call
14929 TRACE_IRQS_ON
14930 ENABLE_INTERRUPTS(CLBR_NONE)
14931@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14932 1: setbe %al /* 1 if error, 0 if not */
14933 movzbl %al,%edi /* zero-extend that into %edi */
14934 call __audit_syscall_exit
14935+ GET_THREAD_INFO(%r11)
14936 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14937 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14938 DISABLE_INTERRUPTS(CLBR_NONE)
14939 TRACE_IRQS_OFF
14940- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14941+ testl %edi,TI_flags(%r11)
14942 jz \exit
14943 CLEAR_RREGS -ARGOFFSET
14944 jmp int_with_check
14945@@ -253,7 +306,7 @@ sysenter_fix_flags:
14946
14947 sysenter_tracesys:
14948 #ifdef CONFIG_AUDITSYSCALL
14949- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14950+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14951 jz sysenter_auditsys
14952 #endif
14953 SAVE_REST
14954@@ -265,6 +318,9 @@ sysenter_tracesys:
14955 RESTORE_REST
14956 cmpq $(IA32_NR_syscalls-1),%rax
14957 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14958+
14959+ pax_erase_kstack
14960+
14961 jmp sysenter_do_call
14962 CFI_ENDPROC
14963 ENDPROC(ia32_sysenter_target)
14964@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14965 ENTRY(ia32_cstar_target)
14966 CFI_STARTPROC32 simple
14967 CFI_SIGNAL_FRAME
14968- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14969+ CFI_DEF_CFA rsp,0
14970 CFI_REGISTER rip,rcx
14971 /*CFI_REGISTER rflags,r11*/
14972 SWAPGS_UNSAFE_STACK
14973 movl %esp,%r8d
14974 CFI_REGISTER rsp,r8
14975 movq PER_CPU_VAR(kernel_stack),%rsp
14976+ SAVE_ARGS 8*6,0,0
14977+ pax_enter_kernel_user
14978+
14979+#ifdef CONFIG_PAX_RANDKSTACK
14980+ pax_erase_kstack
14981+#endif
14982+
14983 /*
14984 * No need to follow this irqs on/off section: the syscall
14985 * disabled irqs and here we enable it straight after entry:
14986 */
14987 ENABLE_INTERRUPTS(CLBR_NONE)
14988- SAVE_ARGS 8,0,0
14989 movl %eax,%eax /* zero extension */
14990 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14991 movq %rcx,RIP-ARGOFFSET(%rsp)
14992@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14993 /* no need to do an access_ok check here because r8 has been
14994 32bit zero extended */
14995 /* hardware stack frame is complete now */
14996+
14997+#ifdef CONFIG_PAX_MEMORY_UDEREF
14998+ ASM_PAX_OPEN_USERLAND
14999+ movq pax_user_shadow_base,%r8
15000+ addq RSP-ARGOFFSET(%rsp),%r8
15001+#endif
15002+
15003 ASM_STAC
15004 1: movl (%r8),%r9d
15005 _ASM_EXTABLE(1b,ia32_badarg)
15006 ASM_CLAC
15007- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15008- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15009+
15010+#ifdef CONFIG_PAX_MEMORY_UDEREF
15011+ ASM_PAX_CLOSE_USERLAND
15012+#endif
15013+
15014+ GET_THREAD_INFO(%r11)
15015+ orl $TS_COMPAT,TI_status(%r11)
15016+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15017 CFI_REMEMBER_STATE
15018 jnz cstar_tracesys
15019 cmpq $IA32_NR_syscalls-1,%rax
15020@@ -335,13 +410,16 @@ cstar_do_call:
15021 cstar_dispatch:
15022 call *ia32_sys_call_table(,%rax,8)
15023 movq %rax,RAX-ARGOFFSET(%rsp)
15024+ GET_THREAD_INFO(%r11)
15025 DISABLE_INTERRUPTS(CLBR_NONE)
15026 TRACE_IRQS_OFF
15027- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15028+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15029 jnz sysretl_audit
15030 sysretl_from_sys_call:
15031- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15032- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15033+ pax_exit_kernel_user
15034+ pax_erase_kstack
15035+ andl $~TS_COMPAT,TI_status(%r11)
15036+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15037 movl RIP-ARGOFFSET(%rsp),%ecx
15038 CFI_REGISTER rip,rcx
15039 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15040@@ -368,7 +446,7 @@ sysretl_audit:
15041
15042 cstar_tracesys:
15043 #ifdef CONFIG_AUDITSYSCALL
15044- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15045+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15046 jz cstar_auditsys
15047 #endif
15048 xchgl %r9d,%ebp
15049@@ -382,11 +460,19 @@ cstar_tracesys:
15050 xchgl %ebp,%r9d
15051 cmpq $(IA32_NR_syscalls-1),%rax
15052 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15053+
15054+ pax_erase_kstack
15055+
15056 jmp cstar_do_call
15057 END(ia32_cstar_target)
15058
15059 ia32_badarg:
15060 ASM_CLAC
15061+
15062+#ifdef CONFIG_PAX_MEMORY_UDEREF
15063+ ASM_PAX_CLOSE_USERLAND
15064+#endif
15065+
15066 movq $-EFAULT,%rax
15067 jmp ia32_sysret
15068 CFI_ENDPROC
15069@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15070 CFI_REL_OFFSET rip,RIP-RIP
15071 PARAVIRT_ADJUST_EXCEPTION_FRAME
15072 SWAPGS
15073- /*
15074- * No need to follow this irqs on/off section: the syscall
15075- * disabled irqs and here we enable it straight after entry:
15076- */
15077- ENABLE_INTERRUPTS(CLBR_NONE)
15078 movl %eax,%eax
15079 pushq_cfi %rax
15080 cld
15081 /* note the registers are not zero extended to the sf.
15082 this could be a problem. */
15083 SAVE_ARGS 0,1,0
15084- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15085- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15086+ pax_enter_kernel_user
15087+
15088+#ifdef CONFIG_PAX_RANDKSTACK
15089+ pax_erase_kstack
15090+#endif
15091+
15092+ /*
15093+ * No need to follow this irqs on/off section: the syscall
15094+ * disabled irqs and here we enable it straight after entry:
15095+ */
15096+ ENABLE_INTERRUPTS(CLBR_NONE)
15097+ GET_THREAD_INFO(%r11)
15098+ orl $TS_COMPAT,TI_status(%r11)
15099+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15100 jnz ia32_tracesys
15101 cmpq $(IA32_NR_syscalls-1),%rax
15102 ja ia32_badsys
15103@@ -458,6 +551,9 @@ ia32_tracesys:
15104 RESTORE_REST
15105 cmpq $(IA32_NR_syscalls-1),%rax
15106 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15107+
15108+ pax_erase_kstack
15109+
15110 jmp ia32_do_call
15111 END(ia32_syscall)
15112
15113diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15114index 8e0ceec..af13504 100644
15115--- a/arch/x86/ia32/sys_ia32.c
15116+++ b/arch/x86/ia32/sys_ia32.c
15117@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15118 */
15119 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15120 {
15121- typeof(ubuf->st_uid) uid = 0;
15122- typeof(ubuf->st_gid) gid = 0;
15123+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15124+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15125 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15126 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15127 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15128diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15129index 372231c..51b537d 100644
15130--- a/arch/x86/include/asm/alternative-asm.h
15131+++ b/arch/x86/include/asm/alternative-asm.h
15132@@ -18,6 +18,45 @@
15133 .endm
15134 #endif
15135
15136+#ifdef KERNEXEC_PLUGIN
15137+ .macro pax_force_retaddr_bts rip=0
15138+ btsq $63,\rip(%rsp)
15139+ .endm
15140+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15141+ .macro pax_force_retaddr rip=0, reload=0
15142+ btsq $63,\rip(%rsp)
15143+ .endm
15144+ .macro pax_force_fptr ptr
15145+ btsq $63,\ptr
15146+ .endm
15147+ .macro pax_set_fptr_mask
15148+ .endm
15149+#endif
15150+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15151+ .macro pax_force_retaddr rip=0, reload=0
15152+ .if \reload
15153+ pax_set_fptr_mask
15154+ .endif
15155+ orq %r12,\rip(%rsp)
15156+ .endm
15157+ .macro pax_force_fptr ptr
15158+ orq %r12,\ptr
15159+ .endm
15160+ .macro pax_set_fptr_mask
15161+ movabs $0x8000000000000000,%r12
15162+ .endm
15163+#endif
15164+#else
15165+ .macro pax_force_retaddr rip=0, reload=0
15166+ .endm
15167+ .macro pax_force_fptr ptr
15168+ .endm
15169+ .macro pax_force_retaddr_bts rip=0
15170+ .endm
15171+ .macro pax_set_fptr_mask
15172+ .endm
15173+#endif
15174+
15175 .macro altinstruction_entry orig alt feature orig_len alt_len
15176 .long \orig - .
15177 .long \alt - .
15178diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15179index 473bdbe..b1e3377 100644
15180--- a/arch/x86/include/asm/alternative.h
15181+++ b/arch/x86/include/asm/alternative.h
15182@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15183 ".pushsection .discard,\"aw\",@progbits\n" \
15184 DISCARD_ENTRY(1) \
15185 ".popsection\n" \
15186- ".pushsection .altinstr_replacement, \"ax\"\n" \
15187+ ".pushsection .altinstr_replacement, \"a\"\n" \
15188 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15189 ".popsection"
15190
15191@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15192 DISCARD_ENTRY(1) \
15193 DISCARD_ENTRY(2) \
15194 ".popsection\n" \
15195- ".pushsection .altinstr_replacement, \"ax\"\n" \
15196+ ".pushsection .altinstr_replacement, \"a\"\n" \
15197 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15198 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15199 ".popsection"
15200diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15201index 465b309..ab7e51f 100644
15202--- a/arch/x86/include/asm/apic.h
15203+++ b/arch/x86/include/asm/apic.h
15204@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15205
15206 #ifdef CONFIG_X86_LOCAL_APIC
15207
15208-extern unsigned int apic_verbosity;
15209+extern int apic_verbosity;
15210 extern int local_apic_timer_c2_ok;
15211
15212 extern int disable_apic;
15213diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15214index 20370c6..a2eb9b0 100644
15215--- a/arch/x86/include/asm/apm.h
15216+++ b/arch/x86/include/asm/apm.h
15217@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15218 __asm__ __volatile__(APM_DO_ZERO_SEGS
15219 "pushl %%edi\n\t"
15220 "pushl %%ebp\n\t"
15221- "lcall *%%cs:apm_bios_entry\n\t"
15222+ "lcall *%%ss:apm_bios_entry\n\t"
15223 "setc %%al\n\t"
15224 "popl %%ebp\n\t"
15225 "popl %%edi\n\t"
15226@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15227 __asm__ __volatile__(APM_DO_ZERO_SEGS
15228 "pushl %%edi\n\t"
15229 "pushl %%ebp\n\t"
15230- "lcall *%%cs:apm_bios_entry\n\t"
15231+ "lcall *%%ss:apm_bios_entry\n\t"
15232 "setc %%bl\n\t"
15233 "popl %%ebp\n\t"
15234 "popl %%edi\n\t"
15235diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15236index 5e5cd12..51cdc93 100644
15237--- a/arch/x86/include/asm/atomic.h
15238+++ b/arch/x86/include/asm/atomic.h
15239@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15240 }
15241
15242 /**
15243+ * atomic_read_unchecked - read atomic variable
15244+ * @v: pointer of type atomic_unchecked_t
15245+ *
15246+ * Atomically reads the value of @v.
15247+ */
15248+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15249+{
15250+ return ACCESS_ONCE((v)->counter);
15251+}
15252+
15253+/**
15254 * atomic_set - set atomic variable
15255 * @v: pointer of type atomic_t
15256 * @i: required value
15257@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15258 }
15259
15260 /**
15261+ * atomic_set_unchecked - set atomic variable
15262+ * @v: pointer of type atomic_unchecked_t
15263+ * @i: required value
15264+ *
15265+ * Atomically sets the value of @v to @i.
15266+ */
15267+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15268+{
15269+ v->counter = i;
15270+}
15271+
15272+/**
15273 * atomic_add - add integer to atomic variable
15274 * @i: integer value to add
15275 * @v: pointer of type atomic_t
15276@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15277 */
15278 static inline void atomic_add(int i, atomic_t *v)
15279 {
15280- asm volatile(LOCK_PREFIX "addl %1,%0"
15281+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15282+
15283+#ifdef CONFIG_PAX_REFCOUNT
15284+ "jno 0f\n"
15285+ LOCK_PREFIX "subl %1,%0\n"
15286+ "int $4\n0:\n"
15287+ _ASM_EXTABLE(0b, 0b)
15288+#endif
15289+
15290+ : "+m" (v->counter)
15291+ : "ir" (i));
15292+}
15293+
15294+/**
15295+ * atomic_add_unchecked - add integer to atomic variable
15296+ * @i: integer value to add
15297+ * @v: pointer of type atomic_unchecked_t
15298+ *
15299+ * Atomically adds @i to @v.
15300+ */
15301+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15302+{
15303+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15304 : "+m" (v->counter)
15305 : "ir" (i));
15306 }
15307@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15308 */
15309 static inline void atomic_sub(int i, atomic_t *v)
15310 {
15311- asm volatile(LOCK_PREFIX "subl %1,%0"
15312+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15313+
15314+#ifdef CONFIG_PAX_REFCOUNT
15315+ "jno 0f\n"
15316+ LOCK_PREFIX "addl %1,%0\n"
15317+ "int $4\n0:\n"
15318+ _ASM_EXTABLE(0b, 0b)
15319+#endif
15320+
15321+ : "+m" (v->counter)
15322+ : "ir" (i));
15323+}
15324+
15325+/**
15326+ * atomic_sub_unchecked - subtract integer from atomic variable
15327+ * @i: integer value to subtract
15328+ * @v: pointer of type atomic_unchecked_t
15329+ *
15330+ * Atomically subtracts @i from @v.
15331+ */
15332+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15333+{
15334+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15335 : "+m" (v->counter)
15336 : "ir" (i));
15337 }
15338@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15339 */
15340 static inline int atomic_sub_and_test(int i, atomic_t *v)
15341 {
15342- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15343+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15344 }
15345
15346 /**
15347@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15348 */
15349 static inline void atomic_inc(atomic_t *v)
15350 {
15351- asm volatile(LOCK_PREFIX "incl %0"
15352+ asm volatile(LOCK_PREFIX "incl %0\n"
15353+
15354+#ifdef CONFIG_PAX_REFCOUNT
15355+ "jno 0f\n"
15356+ LOCK_PREFIX "decl %0\n"
15357+ "int $4\n0:\n"
15358+ _ASM_EXTABLE(0b, 0b)
15359+#endif
15360+
15361+ : "+m" (v->counter));
15362+}
15363+
15364+/**
15365+ * atomic_inc_unchecked - increment atomic variable
15366+ * @v: pointer of type atomic_unchecked_t
15367+ *
15368+ * Atomically increments @v by 1.
15369+ */
15370+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15371+{
15372+ asm volatile(LOCK_PREFIX "incl %0\n"
15373 : "+m" (v->counter));
15374 }
15375
15376@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15377 */
15378 static inline void atomic_dec(atomic_t *v)
15379 {
15380- asm volatile(LOCK_PREFIX "decl %0"
15381+ asm volatile(LOCK_PREFIX "decl %0\n"
15382+
15383+#ifdef CONFIG_PAX_REFCOUNT
15384+ "jno 0f\n"
15385+ LOCK_PREFIX "incl %0\n"
15386+ "int $4\n0:\n"
15387+ _ASM_EXTABLE(0b, 0b)
15388+#endif
15389+
15390+ : "+m" (v->counter));
15391+}
15392+
15393+/**
15394+ * atomic_dec_unchecked - decrement atomic variable
15395+ * @v: pointer of type atomic_unchecked_t
15396+ *
15397+ * Atomically decrements @v by 1.
15398+ */
15399+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15400+{
15401+ asm volatile(LOCK_PREFIX "decl %0\n"
15402 : "+m" (v->counter));
15403 }
15404
15405@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15406 */
15407 static inline int atomic_dec_and_test(atomic_t *v)
15408 {
15409- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15410+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15411 }
15412
15413 /**
15414@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15415 */
15416 static inline int atomic_inc_and_test(atomic_t *v)
15417 {
15418- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15419+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15420+}
15421+
15422+/**
15423+ * atomic_inc_and_test_unchecked - increment and test
15424+ * @v: pointer of type atomic_unchecked_t
15425+ *
15426+ * Atomically increments @v by 1
15427+ * and returns true if the result is zero, or false for all
15428+ * other cases.
15429+ */
15430+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15431+{
15432+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15433 }
15434
15435 /**
15436@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15437 */
15438 static inline int atomic_add_negative(int i, atomic_t *v)
15439 {
15440- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15441+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15442 }
15443
15444 /**
15445@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15446 *
15447 * Atomically adds @i to @v and returns @i + @v
15448 */
15449-static inline int atomic_add_return(int i, atomic_t *v)
15450+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15451+{
15452+ return i + xadd_check_overflow(&v->counter, i);
15453+}
15454+
15455+/**
15456+ * atomic_add_return_unchecked - add integer and return
15457+ * @i: integer value to add
15458+ * @v: pointer of type atomic_unchecked_t
15459+ *
15460+ * Atomically adds @i to @v and returns @i + @v
15461+ */
15462+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15463 {
15464 return i + xadd(&v->counter, i);
15465 }
15466@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15467 *
15468 * Atomically subtracts @i from @v and returns @v - @i
15469 */
15470-static inline int atomic_sub_return(int i, atomic_t *v)
15471+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15472 {
15473 return atomic_add_return(-i, v);
15474 }
15475
15476 #define atomic_inc_return(v) (atomic_add_return(1, v))
15477+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15478+{
15479+ return atomic_add_return_unchecked(1, v);
15480+}
15481 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15482
15483-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15484+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15485+{
15486+ return cmpxchg(&v->counter, old, new);
15487+}
15488+
15489+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15490 {
15491 return cmpxchg(&v->counter, old, new);
15492 }
15493@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15494 return xchg(&v->counter, new);
15495 }
15496
15497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15498+{
15499+ return xchg(&v->counter, new);
15500+}
15501+
15502 /**
15503 * __atomic_add_unless - add unless the number is already a given value
15504 * @v: pointer of type atomic_t
15505@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15506 */
15507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15508 {
15509- int c, old;
15510+ int c, old, new;
15511 c = atomic_read(v);
15512 for (;;) {
15513- if (unlikely(c == (u)))
15514+ if (unlikely(c == u))
15515 break;
15516- old = atomic_cmpxchg((v), c, c + (a));
15517+
15518+ asm volatile("addl %2,%0\n"
15519+
15520+#ifdef CONFIG_PAX_REFCOUNT
15521+ "jno 0f\n"
15522+ "subl %2,%0\n"
15523+ "int $4\n0:\n"
15524+ _ASM_EXTABLE(0b, 0b)
15525+#endif
15526+
15527+ : "=r" (new)
15528+ : "0" (c), "ir" (a));
15529+
15530+ old = atomic_cmpxchg(v, c, new);
15531 if (likely(old == c))
15532 break;
15533 c = old;
15534@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15535 }
15536
15537 /**
15538+ * atomic_inc_not_zero_hint - increment if not null
15539+ * @v: pointer of type atomic_t
15540+ * @hint: probable value of the atomic before the increment
15541+ *
15542+ * This version of atomic_inc_not_zero() gives a hint of probable
15543+ * value of the atomic. This helps processor to not read the memory
15544+ * before doing the atomic read/modify/write cycle, lowering
15545+ * number of bus transactions on some arches.
15546+ *
15547+ * Returns: 0 if increment was not done, 1 otherwise.
15548+ */
15549+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15550+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15551+{
15552+ int val, c = hint, new;
15553+
15554+ /* sanity test, should be removed by compiler if hint is a constant */
15555+ if (!hint)
15556+ return __atomic_add_unless(v, 1, 0);
15557+
15558+ do {
15559+ asm volatile("incl %0\n"
15560+
15561+#ifdef CONFIG_PAX_REFCOUNT
15562+ "jno 0f\n"
15563+ "decl %0\n"
15564+ "int $4\n0:\n"
15565+ _ASM_EXTABLE(0b, 0b)
15566+#endif
15567+
15568+ : "=r" (new)
15569+ : "0" (c));
15570+
15571+ val = atomic_cmpxchg(v, c, new);
15572+ if (val == c)
15573+ return 1;
15574+ c = val;
15575+ } while (c);
15576+
15577+ return 0;
15578+}
15579+
15580+/**
15581 * atomic_inc_short - increment of a short integer
15582 * @v: pointer to type int
15583 *
15584@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15585 }
15586
15587 /* These are x86-specific, used by some header files */
15588-#define atomic_clear_mask(mask, addr) \
15589- asm volatile(LOCK_PREFIX "andl %0,%1" \
15590- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15591+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15592+{
15593+ asm volatile(LOCK_PREFIX "andl %1,%0"
15594+ : "+m" (v->counter)
15595+ : "r" (~(mask))
15596+ : "memory");
15597+}
15598
15599-#define atomic_set_mask(mask, addr) \
15600- asm volatile(LOCK_PREFIX "orl %0,%1" \
15601- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15602- : "memory")
15603+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15604+{
15605+ asm volatile(LOCK_PREFIX "andl %1,%0"
15606+ : "+m" (v->counter)
15607+ : "r" (~(mask))
15608+ : "memory");
15609+}
15610+
15611+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15612+{
15613+ asm volatile(LOCK_PREFIX "orl %1,%0"
15614+ : "+m" (v->counter)
15615+ : "r" (mask)
15616+ : "memory");
15617+}
15618+
15619+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15620+{
15621+ asm volatile(LOCK_PREFIX "orl %1,%0"
15622+ : "+m" (v->counter)
15623+ : "r" (mask)
15624+ : "memory");
15625+}
15626
15627 #ifdef CONFIG_X86_32
15628 # include <asm/atomic64_32.h>
15629diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15630index b154de7..bf18a5a 100644
15631--- a/arch/x86/include/asm/atomic64_32.h
15632+++ b/arch/x86/include/asm/atomic64_32.h
15633@@ -12,6 +12,14 @@ typedef struct {
15634 u64 __aligned(8) counter;
15635 } atomic64_t;
15636
15637+#ifdef CONFIG_PAX_REFCOUNT
15638+typedef struct {
15639+ u64 __aligned(8) counter;
15640+} atomic64_unchecked_t;
15641+#else
15642+typedef atomic64_t atomic64_unchecked_t;
15643+#endif
15644+
15645 #define ATOMIC64_INIT(val) { (val) }
15646
15647 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15648@@ -37,21 +45,31 @@ typedef struct {
15649 ATOMIC64_DECL_ONE(sym##_386)
15650
15651 ATOMIC64_DECL_ONE(add_386);
15652+ATOMIC64_DECL_ONE(add_unchecked_386);
15653 ATOMIC64_DECL_ONE(sub_386);
15654+ATOMIC64_DECL_ONE(sub_unchecked_386);
15655 ATOMIC64_DECL_ONE(inc_386);
15656+ATOMIC64_DECL_ONE(inc_unchecked_386);
15657 ATOMIC64_DECL_ONE(dec_386);
15658+ATOMIC64_DECL_ONE(dec_unchecked_386);
15659 #endif
15660
15661 #define alternative_atomic64(f, out, in...) \
15662 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15663
15664 ATOMIC64_DECL(read);
15665+ATOMIC64_DECL(read_unchecked);
15666 ATOMIC64_DECL(set);
15667+ATOMIC64_DECL(set_unchecked);
15668 ATOMIC64_DECL(xchg);
15669 ATOMIC64_DECL(add_return);
15670+ATOMIC64_DECL(add_return_unchecked);
15671 ATOMIC64_DECL(sub_return);
15672+ATOMIC64_DECL(sub_return_unchecked);
15673 ATOMIC64_DECL(inc_return);
15674+ATOMIC64_DECL(inc_return_unchecked);
15675 ATOMIC64_DECL(dec_return);
15676+ATOMIC64_DECL(dec_return_unchecked);
15677 ATOMIC64_DECL(dec_if_positive);
15678 ATOMIC64_DECL(inc_not_zero);
15679 ATOMIC64_DECL(add_unless);
15680@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15681 }
15682
15683 /**
15684+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15685+ * @p: pointer to type atomic64_unchecked_t
15686+ * @o: expected value
15687+ * @n: new value
15688+ *
15689+ * Atomically sets @v to @n if it was equal to @o and returns
15690+ * the old value.
15691+ */
15692+
15693+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15694+{
15695+ return cmpxchg64(&v->counter, o, n);
15696+}
15697+
15698+/**
15699 * atomic64_xchg - xchg atomic64 variable
15700 * @v: pointer to type atomic64_t
15701 * @n: value to assign
15702@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15703 }
15704
15705 /**
15706+ * atomic64_set_unchecked - set atomic64 variable
15707+ * @v: pointer to type atomic64_unchecked_t
15708+ * @n: value to assign
15709+ *
15710+ * Atomically sets the value of @v to @n.
15711+ */
15712+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15713+{
15714+ unsigned high = (unsigned)(i >> 32);
15715+ unsigned low = (unsigned)i;
15716+ alternative_atomic64(set, /* no output */,
15717+ "S" (v), "b" (low), "c" (high)
15718+ : "eax", "edx", "memory");
15719+}
15720+
15721+/**
15722 * atomic64_read - read atomic64 variable
15723 * @v: pointer to type atomic64_t
15724 *
15725@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15726 }
15727
15728 /**
15729+ * atomic64_read_unchecked - read atomic64 variable
15730+ * @v: pointer to type atomic64_unchecked_t
15731+ *
15732+ * Atomically reads the value of @v and returns it.
15733+ */
15734+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15735+{
15736+ long long r;
15737+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15738+ return r;
15739+ }
15740+
15741+/**
15742 * atomic64_add_return - add and return
15743 * @i: integer value to add
15744 * @v: pointer to type atomic64_t
15745@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15746 return i;
15747 }
15748
15749+/**
15750+ * atomic64_add_return_unchecked - add and return
15751+ * @i: integer value to add
15752+ * @v: pointer to type atomic64_unchecked_t
15753+ *
15754+ * Atomically adds @i to @v and returns @i + *@v
15755+ */
15756+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15757+{
15758+ alternative_atomic64(add_return_unchecked,
15759+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15760+ ASM_NO_INPUT_CLOBBER("memory"));
15761+ return i;
15762+}
15763+
15764 /*
15765 * Other variants with different arithmetic operators:
15766 */
15767@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15768 return a;
15769 }
15770
15771+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15772+{
15773+ long long a;
15774+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15775+ "S" (v) : "memory", "ecx");
15776+ return a;
15777+}
15778+
15779 static inline long long atomic64_dec_return(atomic64_t *v)
15780 {
15781 long long a;
15782@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15783 }
15784
15785 /**
15786+ * atomic64_add_unchecked - add integer to atomic64 variable
15787+ * @i: integer value to add
15788+ * @v: pointer to type atomic64_unchecked_t
15789+ *
15790+ * Atomically adds @i to @v.
15791+ */
15792+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15793+{
15794+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15795+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15796+ ASM_NO_INPUT_CLOBBER("memory"));
15797+ return i;
15798+}
15799+
15800+/**
15801 * atomic64_sub - subtract the atomic64 variable
15802 * @i: integer value to subtract
15803 * @v: pointer to type atomic64_t
15804diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15805index f8d273e..02f39f3 100644
15806--- a/arch/x86/include/asm/atomic64_64.h
15807+++ b/arch/x86/include/asm/atomic64_64.h
15808@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15809 }
15810
15811 /**
15812+ * atomic64_read_unchecked - read atomic64 variable
15813+ * @v: pointer of type atomic64_unchecked_t
15814+ *
15815+ * Atomically reads the value of @v.
15816+ * Doesn't imply a read memory barrier.
15817+ */
15818+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15819+{
15820+ return ACCESS_ONCE((v)->counter);
15821+}
15822+
15823+/**
15824 * atomic64_set - set atomic64 variable
15825 * @v: pointer to type atomic64_t
15826 * @i: required value
15827@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15828 }
15829
15830 /**
15831+ * atomic64_set_unchecked - set atomic64 variable
15832+ * @v: pointer to type atomic64_unchecked_t
15833+ * @i: required value
15834+ *
15835+ * Atomically sets the value of @v to @i.
15836+ */
15837+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15838+{
15839+ v->counter = i;
15840+}
15841+
15842+/**
15843 * atomic64_add - add integer to atomic64 variable
15844 * @i: integer value to add
15845 * @v: pointer to type atomic64_t
15846@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15847 */
15848 static inline void atomic64_add(long i, atomic64_t *v)
15849 {
15850+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15851+
15852+#ifdef CONFIG_PAX_REFCOUNT
15853+ "jno 0f\n"
15854+ LOCK_PREFIX "subq %1,%0\n"
15855+ "int $4\n0:\n"
15856+ _ASM_EXTABLE(0b, 0b)
15857+#endif
15858+
15859+ : "=m" (v->counter)
15860+ : "er" (i), "m" (v->counter));
15861+}
15862+
15863+/**
15864+ * atomic64_add_unchecked - add integer to atomic64 variable
15865+ * @i: integer value to add
15866+ * @v: pointer to type atomic64_unchecked_t
15867+ *
15868+ * Atomically adds @i to @v.
15869+ */
15870+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15871+{
15872 asm volatile(LOCK_PREFIX "addq %1,%0"
15873 : "=m" (v->counter)
15874 : "er" (i), "m" (v->counter));
15875@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15876 */
15877 static inline void atomic64_sub(long i, atomic64_t *v)
15878 {
15879- asm volatile(LOCK_PREFIX "subq %1,%0"
15880+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15881+
15882+#ifdef CONFIG_PAX_REFCOUNT
15883+ "jno 0f\n"
15884+ LOCK_PREFIX "addq %1,%0\n"
15885+ "int $4\n0:\n"
15886+ _ASM_EXTABLE(0b, 0b)
15887+#endif
15888+
15889+ : "=m" (v->counter)
15890+ : "er" (i), "m" (v->counter));
15891+}
15892+
15893+/**
15894+ * atomic64_sub_unchecked - subtract the atomic64 variable
15895+ * @i: integer value to subtract
15896+ * @v: pointer to type atomic64_unchecked_t
15897+ *
15898+ * Atomically subtracts @i from @v.
15899+ */
15900+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15901+{
15902+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15903 : "=m" (v->counter)
15904 : "er" (i), "m" (v->counter));
15905 }
15906@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15907 */
15908 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15909 {
15910- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15911+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15912 }
15913
15914 /**
15915@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15916 */
15917 static inline void atomic64_inc(atomic64_t *v)
15918 {
15919+ asm volatile(LOCK_PREFIX "incq %0\n"
15920+
15921+#ifdef CONFIG_PAX_REFCOUNT
15922+ "jno 0f\n"
15923+ LOCK_PREFIX "decq %0\n"
15924+ "int $4\n0:\n"
15925+ _ASM_EXTABLE(0b, 0b)
15926+#endif
15927+
15928+ : "=m" (v->counter)
15929+ : "m" (v->counter));
15930+}
15931+
15932+/**
15933+ * atomic64_inc_unchecked - increment atomic64 variable
15934+ * @v: pointer to type atomic64_unchecked_t
15935+ *
15936+ * Atomically increments @v by 1.
15937+ */
15938+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15939+{
15940 asm volatile(LOCK_PREFIX "incq %0"
15941 : "=m" (v->counter)
15942 : "m" (v->counter));
15943@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15944 */
15945 static inline void atomic64_dec(atomic64_t *v)
15946 {
15947- asm volatile(LOCK_PREFIX "decq %0"
15948+ asm volatile(LOCK_PREFIX "decq %0\n"
15949+
15950+#ifdef CONFIG_PAX_REFCOUNT
15951+ "jno 0f\n"
15952+ LOCK_PREFIX "incq %0\n"
15953+ "int $4\n0:\n"
15954+ _ASM_EXTABLE(0b, 0b)
15955+#endif
15956+
15957+ : "=m" (v->counter)
15958+ : "m" (v->counter));
15959+}
15960+
15961+/**
15962+ * atomic64_dec_unchecked - decrement atomic64 variable
15963+ * @v: pointer to type atomic64_t
15964+ *
15965+ * Atomically decrements @v by 1.
15966+ */
15967+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15968+{
15969+ asm volatile(LOCK_PREFIX "decq %0\n"
15970 : "=m" (v->counter)
15971 : "m" (v->counter));
15972 }
15973@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15974 */
15975 static inline int atomic64_dec_and_test(atomic64_t *v)
15976 {
15977- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15978+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15979 }
15980
15981 /**
15982@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15983 */
15984 static inline int atomic64_inc_and_test(atomic64_t *v)
15985 {
15986- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15987+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15988 }
15989
15990 /**
15991@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15992 */
15993 static inline int atomic64_add_negative(long i, atomic64_t *v)
15994 {
15995- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15996+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15997 }
15998
15999 /**
16000@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16001 */
16002 static inline long atomic64_add_return(long i, atomic64_t *v)
16003 {
16004+ return i + xadd_check_overflow(&v->counter, i);
16005+}
16006+
16007+/**
16008+ * atomic64_add_return_unchecked - add and return
16009+ * @i: integer value to add
16010+ * @v: pointer to type atomic64_unchecked_t
16011+ *
16012+ * Atomically adds @i to @v and returns @i + @v
16013+ */
16014+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16015+{
16016 return i + xadd(&v->counter, i);
16017 }
16018
16019@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16020 }
16021
16022 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16023+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16024+{
16025+ return atomic64_add_return_unchecked(1, v);
16026+}
16027 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16028
16029 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16030@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16031 return cmpxchg(&v->counter, old, new);
16032 }
16033
16034+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16035+{
16036+ return cmpxchg(&v->counter, old, new);
16037+}
16038+
16039 static inline long atomic64_xchg(atomic64_t *v, long new)
16040 {
16041 return xchg(&v->counter, new);
16042@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16043 */
16044 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16045 {
16046- long c, old;
16047+ long c, old, new;
16048 c = atomic64_read(v);
16049 for (;;) {
16050- if (unlikely(c == (u)))
16051+ if (unlikely(c == u))
16052 break;
16053- old = atomic64_cmpxchg((v), c, c + (a));
16054+
16055+ asm volatile("add %2,%0\n"
16056+
16057+#ifdef CONFIG_PAX_REFCOUNT
16058+ "jno 0f\n"
16059+ "sub %2,%0\n"
16060+ "int $4\n0:\n"
16061+ _ASM_EXTABLE(0b, 0b)
16062+#endif
16063+
16064+ : "=r" (new)
16065+ : "0" (c), "ir" (a));
16066+
16067+ old = atomic64_cmpxchg(v, c, new);
16068 if (likely(old == c))
16069 break;
16070 c = old;
16071 }
16072- return c != (u);
16073+ return c != u;
16074 }
16075
16076 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16077diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16078index 2ab1eb3..1e8cc5d 100644
16079--- a/arch/x86/include/asm/barrier.h
16080+++ b/arch/x86/include/asm/barrier.h
16081@@ -57,7 +57,7 @@
16082 do { \
16083 compiletime_assert_atomic_type(*p); \
16084 smp_mb(); \
16085- ACCESS_ONCE(*p) = (v); \
16086+ ACCESS_ONCE_RW(*p) = (v); \
16087 } while (0)
16088
16089 #define smp_load_acquire(p) \
16090@@ -74,7 +74,7 @@ do { \
16091 do { \
16092 compiletime_assert_atomic_type(*p); \
16093 barrier(); \
16094- ACCESS_ONCE(*p) = (v); \
16095+ ACCESS_ONCE_RW(*p) = (v); \
16096 } while (0)
16097
16098 #define smp_load_acquire(p) \
16099diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16100index cfe3b95..d01b118 100644
16101--- a/arch/x86/include/asm/bitops.h
16102+++ b/arch/x86/include/asm/bitops.h
16103@@ -50,7 +50,7 @@
16104 * a mask operation on a byte.
16105 */
16106 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16107-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16108+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16109 #define CONST_MASK(nr) (1 << ((nr) & 7))
16110
16111 /**
16112@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16113 */
16114 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16115 {
16116- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16117+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16118 }
16119
16120 /**
16121@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16122 */
16123 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16124 {
16125- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16126+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16127 }
16128
16129 /**
16130@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16131 */
16132 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16133 {
16134- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16135+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16136 }
16137
16138 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16139@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16140 *
16141 * Undefined if no bit exists, so code should check against 0 first.
16142 */
16143-static inline unsigned long __ffs(unsigned long word)
16144+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16145 {
16146 asm("rep; bsf %1,%0"
16147 : "=r" (word)
16148@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16149 *
16150 * Undefined if no zero exists, so code should check against ~0UL first.
16151 */
16152-static inline unsigned long ffz(unsigned long word)
16153+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16154 {
16155 asm("rep; bsf %1,%0"
16156 : "=r" (word)
16157@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16158 *
16159 * Undefined if no set bit exists, so code should check against 0 first.
16160 */
16161-static inline unsigned long __fls(unsigned long word)
16162+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16163 {
16164 asm("bsr %1,%0"
16165 : "=r" (word)
16166@@ -434,7 +434,7 @@ static inline int ffs(int x)
16167 * set bit if value is nonzero. The last (most significant) bit is
16168 * at position 32.
16169 */
16170-static inline int fls(int x)
16171+static inline int __intentional_overflow(-1) fls(int x)
16172 {
16173 int r;
16174
16175@@ -476,7 +476,7 @@ static inline int fls(int x)
16176 * at position 64.
16177 */
16178 #ifdef CONFIG_X86_64
16179-static __always_inline int fls64(__u64 x)
16180+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16181 {
16182 int bitpos = -1;
16183 /*
16184diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16185index 4fa687a..60f2d39 100644
16186--- a/arch/x86/include/asm/boot.h
16187+++ b/arch/x86/include/asm/boot.h
16188@@ -6,10 +6,15 @@
16189 #include <uapi/asm/boot.h>
16190
16191 /* Physical address where kernel should be loaded. */
16192-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16193+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16194 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16195 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16196
16197+#ifndef __ASSEMBLY__
16198+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16199+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16200+#endif
16201+
16202 /* Minimum kernel alignment, as a power of two */
16203 #ifdef CONFIG_X86_64
16204 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16205diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16206index 48f99f1..d78ebf9 100644
16207--- a/arch/x86/include/asm/cache.h
16208+++ b/arch/x86/include/asm/cache.h
16209@@ -5,12 +5,13 @@
16210
16211 /* L1 cache line size */
16212 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16213-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16214+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16215
16216 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16217+#define __read_only __attribute__((__section__(".data..read_only")))
16218
16219 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16220-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16221+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16222
16223 #ifdef CONFIG_X86_VSMP
16224 #ifdef CONFIG_SMP
16225diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16226index 76659b6..72b8439 100644
16227--- a/arch/x86/include/asm/calling.h
16228+++ b/arch/x86/include/asm/calling.h
16229@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16230 #define RSP 152
16231 #define SS 160
16232
16233-#define ARGOFFSET R11
16234-#define SWFRAME ORIG_RAX
16235+#define ARGOFFSET R15
16236
16237 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16238- subq $9*8+\addskip, %rsp
16239- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16240- movq_cfi rdi, 8*8
16241- movq_cfi rsi, 7*8
16242- movq_cfi rdx, 6*8
16243+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16244+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16245+ movq_cfi rdi, RDI
16246+ movq_cfi rsi, RSI
16247+ movq_cfi rdx, RDX
16248
16249 .if \save_rcx
16250- movq_cfi rcx, 5*8
16251+ movq_cfi rcx, RCX
16252 .endif
16253
16254 .if \rax_enosys
16255- movq $-ENOSYS, 4*8(%rsp)
16256+ movq $-ENOSYS, RAX(%rsp)
16257 .else
16258- movq_cfi rax, 4*8
16259+ movq_cfi rax, RAX
16260 .endif
16261
16262 .if \save_r891011
16263- movq_cfi r8, 3*8
16264- movq_cfi r9, 2*8
16265- movq_cfi r10, 1*8
16266- movq_cfi r11, 0*8
16267+ movq_cfi r8, R8
16268+ movq_cfi r9, R9
16269+ movq_cfi r10, R10
16270+ movq_cfi r11, R11
16271 .endif
16272
16273+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16274+ movq_cfi r12, R12
16275+#endif
16276+
16277 .endm
16278
16279-#define ARG_SKIP (9*8)
16280+#define ARG_SKIP ORIG_RAX
16281
16282 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16283 rstor_r8910=1, rstor_rdx=1
16284+
16285+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16286+ movq_cfi_restore R12, r12
16287+#endif
16288+
16289 .if \rstor_r11
16290- movq_cfi_restore 0*8, r11
16291+ movq_cfi_restore R11, r11
16292 .endif
16293
16294 .if \rstor_r8910
16295- movq_cfi_restore 1*8, r10
16296- movq_cfi_restore 2*8, r9
16297- movq_cfi_restore 3*8, r8
16298+ movq_cfi_restore R10, r10
16299+ movq_cfi_restore R9, r9
16300+ movq_cfi_restore R8, r8
16301 .endif
16302
16303 .if \rstor_rax
16304- movq_cfi_restore 4*8, rax
16305+ movq_cfi_restore RAX, rax
16306 .endif
16307
16308 .if \rstor_rcx
16309- movq_cfi_restore 5*8, rcx
16310+ movq_cfi_restore RCX, rcx
16311 .endif
16312
16313 .if \rstor_rdx
16314- movq_cfi_restore 6*8, rdx
16315+ movq_cfi_restore RDX, rdx
16316 .endif
16317
16318- movq_cfi_restore 7*8, rsi
16319- movq_cfi_restore 8*8, rdi
16320+ movq_cfi_restore RSI, rsi
16321+ movq_cfi_restore RDI, rdi
16322
16323- .if ARG_SKIP+\addskip > 0
16324- addq $ARG_SKIP+\addskip, %rsp
16325- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16326+ .if ORIG_RAX+\addskip > 0
16327+ addq $ORIG_RAX+\addskip, %rsp
16328+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16329 .endif
16330 .endm
16331
16332- .macro LOAD_ARGS offset, skiprax=0
16333- movq \offset(%rsp), %r11
16334- movq \offset+8(%rsp), %r10
16335- movq \offset+16(%rsp), %r9
16336- movq \offset+24(%rsp), %r8
16337- movq \offset+40(%rsp), %rcx
16338- movq \offset+48(%rsp), %rdx
16339- movq \offset+56(%rsp), %rsi
16340- movq \offset+64(%rsp), %rdi
16341+ .macro LOAD_ARGS skiprax=0
16342+ movq R11(%rsp), %r11
16343+ movq R10(%rsp), %r10
16344+ movq R9(%rsp), %r9
16345+ movq R8(%rsp), %r8
16346+ movq RCX(%rsp), %rcx
16347+ movq RDX(%rsp), %rdx
16348+ movq RSI(%rsp), %rsi
16349+ movq RDI(%rsp), %rdi
16350 .if \skiprax
16351 .else
16352- movq \offset+72(%rsp), %rax
16353+ movq ORIG_RAX(%rsp), %rax
16354 .endif
16355 .endm
16356
16357-#define REST_SKIP (6*8)
16358-
16359 .macro SAVE_REST
16360- subq $REST_SKIP, %rsp
16361- CFI_ADJUST_CFA_OFFSET REST_SKIP
16362- movq_cfi rbx, 5*8
16363- movq_cfi rbp, 4*8
16364- movq_cfi r12, 3*8
16365- movq_cfi r13, 2*8
16366- movq_cfi r14, 1*8
16367- movq_cfi r15, 0*8
16368+ movq_cfi rbx, RBX
16369+ movq_cfi rbp, RBP
16370+
16371+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16372+ movq_cfi r12, R12
16373+#endif
16374+
16375+ movq_cfi r13, R13
16376+ movq_cfi r14, R14
16377+ movq_cfi r15, R15
16378 .endm
16379
16380 .macro RESTORE_REST
16381- movq_cfi_restore 0*8, r15
16382- movq_cfi_restore 1*8, r14
16383- movq_cfi_restore 2*8, r13
16384- movq_cfi_restore 3*8, r12
16385- movq_cfi_restore 4*8, rbp
16386- movq_cfi_restore 5*8, rbx
16387- addq $REST_SKIP, %rsp
16388- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16389+ movq_cfi_restore R15, r15
16390+ movq_cfi_restore R14, r14
16391+ movq_cfi_restore R13, r13
16392+
16393+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16394+ movq_cfi_restore R12, r12
16395+#endif
16396+
16397+ movq_cfi_restore RBP, rbp
16398+ movq_cfi_restore RBX, rbx
16399 .endm
16400
16401 .macro SAVE_ALL
16402diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16403index f50de69..2b0a458 100644
16404--- a/arch/x86/include/asm/checksum_32.h
16405+++ b/arch/x86/include/asm/checksum_32.h
16406@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16407 int len, __wsum sum,
16408 int *src_err_ptr, int *dst_err_ptr);
16409
16410+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16411+ int len, __wsum sum,
16412+ int *src_err_ptr, int *dst_err_ptr);
16413+
16414+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16415+ int len, __wsum sum,
16416+ int *src_err_ptr, int *dst_err_ptr);
16417+
16418 /*
16419 * Note: when you get a NULL pointer exception here this means someone
16420 * passed in an incorrect kernel address to one of these functions.
16421@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16422
16423 might_sleep();
16424 stac();
16425- ret = csum_partial_copy_generic((__force void *)src, dst,
16426+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16427 len, sum, err_ptr, NULL);
16428 clac();
16429
16430@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16431 might_sleep();
16432 if (access_ok(VERIFY_WRITE, dst, len)) {
16433 stac();
16434- ret = csum_partial_copy_generic(src, (__force void *)dst,
16435+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16436 len, sum, NULL, err_ptr);
16437 clac();
16438 return ret;
16439diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16440index 99c105d7..2f667ac 100644
16441--- a/arch/x86/include/asm/cmpxchg.h
16442+++ b/arch/x86/include/asm/cmpxchg.h
16443@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16444 __compiletime_error("Bad argument size for cmpxchg");
16445 extern void __xadd_wrong_size(void)
16446 __compiletime_error("Bad argument size for xadd");
16447+extern void __xadd_check_overflow_wrong_size(void)
16448+ __compiletime_error("Bad argument size for xadd_check_overflow");
16449 extern void __add_wrong_size(void)
16450 __compiletime_error("Bad argument size for add");
16451+extern void __add_check_overflow_wrong_size(void)
16452+ __compiletime_error("Bad argument size for add_check_overflow");
16453
16454 /*
16455 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16456@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16457 __ret; \
16458 })
16459
16460+#ifdef CONFIG_PAX_REFCOUNT
16461+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16462+ ({ \
16463+ __typeof__ (*(ptr)) __ret = (arg); \
16464+ switch (sizeof(*(ptr))) { \
16465+ case __X86_CASE_L: \
16466+ asm volatile (lock #op "l %0, %1\n" \
16467+ "jno 0f\n" \
16468+ "mov %0,%1\n" \
16469+ "int $4\n0:\n" \
16470+ _ASM_EXTABLE(0b, 0b) \
16471+ : "+r" (__ret), "+m" (*(ptr)) \
16472+ : : "memory", "cc"); \
16473+ break; \
16474+ case __X86_CASE_Q: \
16475+ asm volatile (lock #op "q %q0, %1\n" \
16476+ "jno 0f\n" \
16477+ "mov %0,%1\n" \
16478+ "int $4\n0:\n" \
16479+ _ASM_EXTABLE(0b, 0b) \
16480+ : "+r" (__ret), "+m" (*(ptr)) \
16481+ : : "memory", "cc"); \
16482+ break; \
16483+ default: \
16484+ __ ## op ## _check_overflow_wrong_size(); \
16485+ } \
16486+ __ret; \
16487+ })
16488+#else
16489+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16490+#endif
16491+
16492 /*
16493 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16494 * Since this is generally used to protect other memory information, we
16495@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16496 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16497 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16498
16499+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16500+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16501+
16502 #define __add(ptr, inc, lock) \
16503 ({ \
16504 __typeof__ (*(ptr)) __ret = (inc); \
16505diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16506index 59c6c40..5e0b22c 100644
16507--- a/arch/x86/include/asm/compat.h
16508+++ b/arch/x86/include/asm/compat.h
16509@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16510 typedef u32 compat_uint_t;
16511 typedef u32 compat_ulong_t;
16512 typedef u64 __attribute__((aligned(4))) compat_u64;
16513-typedef u32 compat_uptr_t;
16514+typedef u32 __user compat_uptr_t;
16515
16516 struct compat_timespec {
16517 compat_time_t tv_sec;
16518diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16519index aede2c3..40d7a8f 100644
16520--- a/arch/x86/include/asm/cpufeature.h
16521+++ b/arch/x86/include/asm/cpufeature.h
16522@@ -212,7 +212,7 @@
16523 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16524 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16525 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16526-
16527+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16528
16529 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16530 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16531@@ -220,7 +220,7 @@
16532 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16533 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16534 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16535-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16536+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16537 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16538 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16539 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16540@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16541 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16542 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16543 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16544+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16545
16546 #if __GNUC__ >= 4
16547 extern void warn_pre_alternatives(void);
16548@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16549
16550 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16551 t_warn:
16552- warn_pre_alternatives();
16553+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16554+ warn_pre_alternatives();
16555 return false;
16556 #endif
16557
16558@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16559 ".section .discard,\"aw\",@progbits\n"
16560 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16561 ".previous\n"
16562- ".section .altinstr_replacement,\"ax\"\n"
16563+ ".section .altinstr_replacement,\"a\"\n"
16564 "3: movb $1,%0\n"
16565 "4:\n"
16566 ".previous\n"
16567@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16568 " .byte 2b - 1b\n" /* src len */
16569 " .byte 4f - 3f\n" /* repl len */
16570 ".previous\n"
16571- ".section .altinstr_replacement,\"ax\"\n"
16572+ ".section .altinstr_replacement,\"a\"\n"
16573 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16574 "4:\n"
16575 ".previous\n"
16576@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16577 ".section .discard,\"aw\",@progbits\n"
16578 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16579 ".previous\n"
16580- ".section .altinstr_replacement,\"ax\"\n"
16581+ ".section .altinstr_replacement,\"a\"\n"
16582 "3: movb $0,%0\n"
16583 "4:\n"
16584 ".previous\n"
16585@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16586 ".section .discard,\"aw\",@progbits\n"
16587 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16588 ".previous\n"
16589- ".section .altinstr_replacement,\"ax\"\n"
16590+ ".section .altinstr_replacement,\"a\"\n"
16591 "5: movb $1,%0\n"
16592 "6:\n"
16593 ".previous\n"
16594diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16595index a94b82e..59ecefa 100644
16596--- a/arch/x86/include/asm/desc.h
16597+++ b/arch/x86/include/asm/desc.h
16598@@ -4,6 +4,7 @@
16599 #include <asm/desc_defs.h>
16600 #include <asm/ldt.h>
16601 #include <asm/mmu.h>
16602+#include <asm/pgtable.h>
16603
16604 #include <linux/smp.h>
16605 #include <linux/percpu.h>
16606@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16607
16608 desc->type = (info->read_exec_only ^ 1) << 1;
16609 desc->type |= info->contents << 2;
16610+ desc->type |= info->seg_not_present ^ 1;
16611
16612 desc->s = 1;
16613 desc->dpl = 0x3;
16614@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16615 }
16616
16617 extern struct desc_ptr idt_descr;
16618-extern gate_desc idt_table[];
16619-extern struct desc_ptr debug_idt_descr;
16620-extern gate_desc debug_idt_table[];
16621-
16622-struct gdt_page {
16623- struct desc_struct gdt[GDT_ENTRIES];
16624-} __attribute__((aligned(PAGE_SIZE)));
16625-
16626-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16627+extern gate_desc idt_table[IDT_ENTRIES];
16628+extern const struct desc_ptr debug_idt_descr;
16629+extern gate_desc debug_idt_table[IDT_ENTRIES];
16630
16631+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16632 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16633 {
16634- return per_cpu(gdt_page, cpu).gdt;
16635+ return cpu_gdt_table[cpu];
16636 }
16637
16638 #ifdef CONFIG_X86_64
16639@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16640 unsigned long base, unsigned dpl, unsigned flags,
16641 unsigned short seg)
16642 {
16643- gate->a = (seg << 16) | (base & 0xffff);
16644- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16645+ gate->gate.offset_low = base;
16646+ gate->gate.seg = seg;
16647+ gate->gate.reserved = 0;
16648+ gate->gate.type = type;
16649+ gate->gate.s = 0;
16650+ gate->gate.dpl = dpl;
16651+ gate->gate.p = 1;
16652+ gate->gate.offset_high = base >> 16;
16653 }
16654
16655 #endif
16656@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16657
16658 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16659 {
16660+ pax_open_kernel();
16661 memcpy(&idt[entry], gate, sizeof(*gate));
16662+ pax_close_kernel();
16663 }
16664
16665 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16666 {
16667+ pax_open_kernel();
16668 memcpy(&ldt[entry], desc, 8);
16669+ pax_close_kernel();
16670 }
16671
16672 static inline void
16673@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16674 default: size = sizeof(*gdt); break;
16675 }
16676
16677+ pax_open_kernel();
16678 memcpy(&gdt[entry], desc, size);
16679+ pax_close_kernel();
16680 }
16681
16682 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16683@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16684
16685 static inline void native_load_tr_desc(void)
16686 {
16687+ pax_open_kernel();
16688 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16689+ pax_close_kernel();
16690 }
16691
16692 static inline void native_load_gdt(const struct desc_ptr *dtr)
16693@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16694 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16695 unsigned int i;
16696
16697+ pax_open_kernel();
16698 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16699 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16700+ pax_close_kernel();
16701 }
16702
16703 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16704@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16705 preempt_enable();
16706 }
16707
16708-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16709+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16710 {
16711 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16712 }
16713@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16714 }
16715
16716 #ifdef CONFIG_X86_64
16717-static inline void set_nmi_gate(int gate, void *addr)
16718+static inline void set_nmi_gate(int gate, const void *addr)
16719 {
16720 gate_desc s;
16721
16722@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16723 #endif
16724
16725 #ifdef CONFIG_TRACING
16726-extern struct desc_ptr trace_idt_descr;
16727-extern gate_desc trace_idt_table[];
16728+extern const struct desc_ptr trace_idt_descr;
16729+extern gate_desc trace_idt_table[IDT_ENTRIES];
16730 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16731 {
16732 write_idt_entry(trace_idt_table, entry, gate);
16733 }
16734
16735-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16736+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16737 unsigned dpl, unsigned ist, unsigned seg)
16738 {
16739 gate_desc s;
16740@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16741 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16742 #endif
16743
16744-static inline void _set_gate(int gate, unsigned type, void *addr,
16745+static inline void _set_gate(int gate, unsigned type, const void *addr,
16746 unsigned dpl, unsigned ist, unsigned seg)
16747 {
16748 gate_desc s;
16749@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16750 #define set_intr_gate(n, addr) \
16751 do { \
16752 BUG_ON((unsigned)n > 0xFF); \
16753- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16754+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16755 __KERNEL_CS); \
16756- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16757+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16758 0, 0, __KERNEL_CS); \
16759 } while (0)
16760
16761@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16762 /*
16763 * This routine sets up an interrupt gate at directory privilege level 3.
16764 */
16765-static inline void set_system_intr_gate(unsigned int n, void *addr)
16766+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16767 {
16768 BUG_ON((unsigned)n > 0xFF);
16769 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16770 }
16771
16772-static inline void set_system_trap_gate(unsigned int n, void *addr)
16773+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16774 {
16775 BUG_ON((unsigned)n > 0xFF);
16776 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16777 }
16778
16779-static inline void set_trap_gate(unsigned int n, void *addr)
16780+static inline void set_trap_gate(unsigned int n, const void *addr)
16781 {
16782 BUG_ON((unsigned)n > 0xFF);
16783 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16784@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16785 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16786 {
16787 BUG_ON((unsigned)n > 0xFF);
16788- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16789+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16790 }
16791
16792-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16793+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16794 {
16795 BUG_ON((unsigned)n > 0xFF);
16796 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16797 }
16798
16799-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16800+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16801 {
16802 BUG_ON((unsigned)n > 0xFF);
16803 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16804@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16805 else
16806 load_idt((const struct desc_ptr *)&idt_descr);
16807 }
16808+
16809+#ifdef CONFIG_X86_32
16810+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16811+{
16812+ struct desc_struct d;
16813+
16814+ if (likely(limit))
16815+ limit = (limit - 1UL) >> PAGE_SHIFT;
16816+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16817+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16818+}
16819+#endif
16820+
16821 #endif /* _ASM_X86_DESC_H */
16822diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16823index 278441f..b95a174 100644
16824--- a/arch/x86/include/asm/desc_defs.h
16825+++ b/arch/x86/include/asm/desc_defs.h
16826@@ -31,6 +31,12 @@ struct desc_struct {
16827 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16828 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16829 };
16830+ struct {
16831+ u16 offset_low;
16832+ u16 seg;
16833+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16834+ unsigned offset_high: 16;
16835+ } gate;
16836 };
16837 } __attribute__((packed));
16838
16839diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16840index ced283a..ffe04cc 100644
16841--- a/arch/x86/include/asm/div64.h
16842+++ b/arch/x86/include/asm/div64.h
16843@@ -39,7 +39,7 @@
16844 __mod; \
16845 })
16846
16847-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16848+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16849 {
16850 union {
16851 u64 v64;
16852diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16853index ca3347a..1a5082a 100644
16854--- a/arch/x86/include/asm/elf.h
16855+++ b/arch/x86/include/asm/elf.h
16856@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16857
16858 #include <asm/vdso.h>
16859
16860-#ifdef CONFIG_X86_64
16861-extern unsigned int vdso64_enabled;
16862-#endif
16863 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16864 extern unsigned int vdso32_enabled;
16865 #endif
16866@@ -249,7 +246,25 @@ extern int force_personality32;
16867 the loader. We need to make sure that it is out of the way of the program
16868 that it will "exec", and that there is sufficient room for the brk. */
16869
16870+#ifdef CONFIG_PAX_SEGMEXEC
16871+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16872+#else
16873 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16874+#endif
16875+
16876+#ifdef CONFIG_PAX_ASLR
16877+#ifdef CONFIG_X86_32
16878+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16879+
16880+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16881+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16882+#else
16883+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16884+
16885+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16886+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16887+#endif
16888+#endif
16889
16890 /* This yields a mask that user programs can use to figure out what
16891 instruction set this CPU supports. This could be done in user space,
16892@@ -298,17 +313,13 @@ do { \
16893
16894 #define ARCH_DLINFO \
16895 do { \
16896- if (vdso64_enabled) \
16897- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16898- (unsigned long __force)current->mm->context.vdso); \
16899+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16900 } while (0)
16901
16902 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16903 #define ARCH_DLINFO_X32 \
16904 do { \
16905- if (vdso64_enabled) \
16906- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16907- (unsigned long __force)current->mm->context.vdso); \
16908+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16909 } while (0)
16910
16911 #define AT_SYSINFO 32
16912@@ -323,10 +334,10 @@ else \
16913
16914 #endif /* !CONFIG_X86_32 */
16915
16916-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16917+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16918
16919 #define VDSO_ENTRY \
16920- ((unsigned long)current->mm->context.vdso + \
16921+ (current->mm->context.vdso + \
16922 selected_vdso32->sym___kernel_vsyscall)
16923
16924 struct linux_binprm;
16925@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16926 int uses_interp);
16927 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16928
16929-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16930-#define arch_randomize_brk arch_randomize_brk
16931-
16932 /*
16933 * True on X86_32 or when emulating IA32 on X86_64
16934 */
16935diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16936index 77a99ac..39ff7f5 100644
16937--- a/arch/x86/include/asm/emergency-restart.h
16938+++ b/arch/x86/include/asm/emergency-restart.h
16939@@ -1,6 +1,6 @@
16940 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16941 #define _ASM_X86_EMERGENCY_RESTART_H
16942
16943-extern void machine_emergency_restart(void);
16944+extern void machine_emergency_restart(void) __noreturn;
16945
16946 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16947diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16948index 1c7eefe..d0e4702 100644
16949--- a/arch/x86/include/asm/floppy.h
16950+++ b/arch/x86/include/asm/floppy.h
16951@@ -229,18 +229,18 @@ static struct fd_routine_l {
16952 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16953 } fd_routine[] = {
16954 {
16955- request_dma,
16956- free_dma,
16957- get_dma_residue,
16958- dma_mem_alloc,
16959- hard_dma_setup
16960+ ._request_dma = request_dma,
16961+ ._free_dma = free_dma,
16962+ ._get_dma_residue = get_dma_residue,
16963+ ._dma_mem_alloc = dma_mem_alloc,
16964+ ._dma_setup = hard_dma_setup
16965 },
16966 {
16967- vdma_request_dma,
16968- vdma_nop,
16969- vdma_get_dma_residue,
16970- vdma_mem_alloc,
16971- vdma_dma_setup
16972+ ._request_dma = vdma_request_dma,
16973+ ._free_dma = vdma_nop,
16974+ ._get_dma_residue = vdma_get_dma_residue,
16975+ ._dma_mem_alloc = vdma_mem_alloc,
16976+ ._dma_setup = vdma_dma_setup
16977 }
16978 };
16979
16980diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16981index f895358..800c60d 100644
16982--- a/arch/x86/include/asm/fpu-internal.h
16983+++ b/arch/x86/include/asm/fpu-internal.h
16984@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16985 #define user_insn(insn, output, input...) \
16986 ({ \
16987 int err; \
16988+ pax_open_userland(); \
16989 asm volatile(ASM_STAC "\n" \
16990- "1:" #insn "\n\t" \
16991+ "1:" \
16992+ __copyuser_seg \
16993+ #insn "\n\t" \
16994 "2: " ASM_CLAC "\n" \
16995 ".section .fixup,\"ax\"\n" \
16996 "3: movl $-1,%[err]\n" \
16997@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16998 _ASM_EXTABLE(1b, 3b) \
16999 : [err] "=r" (err), output \
17000 : "0"(0), input); \
17001+ pax_close_userland(); \
17002 err; \
17003 })
17004
17005@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17006 "fnclex\n\t"
17007 "emms\n\t"
17008 "fildl %P[addr]" /* set F?P to defined value */
17009- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17010+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17011 }
17012
17013 return fpu_restore_checking(&tsk->thread.fpu);
17014diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17015index b4c1f54..e290c08 100644
17016--- a/arch/x86/include/asm/futex.h
17017+++ b/arch/x86/include/asm/futex.h
17018@@ -12,6 +12,7 @@
17019 #include <asm/smap.h>
17020
17021 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17022+ typecheck(u32 __user *, uaddr); \
17023 asm volatile("\t" ASM_STAC "\n" \
17024 "1:\t" insn "\n" \
17025 "2:\t" ASM_CLAC "\n" \
17026@@ -20,15 +21,16 @@
17027 "\tjmp\t2b\n" \
17028 "\t.previous\n" \
17029 _ASM_EXTABLE(1b, 3b) \
17030- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17031+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17032 : "i" (-EFAULT), "0" (oparg), "1" (0))
17033
17034 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17035+ typecheck(u32 __user *, uaddr); \
17036 asm volatile("\t" ASM_STAC "\n" \
17037 "1:\tmovl %2, %0\n" \
17038 "\tmovl\t%0, %3\n" \
17039 "\t" insn "\n" \
17040- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17041+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17042 "\tjnz\t1b\n" \
17043 "3:\t" ASM_CLAC "\n" \
17044 "\t.section .fixup,\"ax\"\n" \
17045@@ -38,7 +40,7 @@
17046 _ASM_EXTABLE(1b, 4b) \
17047 _ASM_EXTABLE(2b, 4b) \
17048 : "=&a" (oldval), "=&r" (ret), \
17049- "+m" (*uaddr), "=&r" (tem) \
17050+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17051 : "r" (oparg), "i" (-EFAULT), "1" (0))
17052
17053 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17054@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17055
17056 pagefault_disable();
17057
17058+ pax_open_userland();
17059 switch (op) {
17060 case FUTEX_OP_SET:
17061- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17062+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17063 break;
17064 case FUTEX_OP_ADD:
17065- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17066+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17067 uaddr, oparg);
17068 break;
17069 case FUTEX_OP_OR:
17070@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17071 default:
17072 ret = -ENOSYS;
17073 }
17074+ pax_close_userland();
17075
17076 pagefault_enable();
17077
17078diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17079index 9662290..49ca5e5 100644
17080--- a/arch/x86/include/asm/hw_irq.h
17081+++ b/arch/x86/include/asm/hw_irq.h
17082@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17083 #endif /* CONFIG_X86_LOCAL_APIC */
17084
17085 /* Statistics */
17086-extern atomic_t irq_err_count;
17087-extern atomic_t irq_mis_count;
17088+extern atomic_unchecked_t irq_err_count;
17089+extern atomic_unchecked_t irq_mis_count;
17090
17091 /* EISA */
17092 extern void eisa_set_level_irq(unsigned int irq);
17093diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17094index ccffa53..3c90c87 100644
17095--- a/arch/x86/include/asm/i8259.h
17096+++ b/arch/x86/include/asm/i8259.h
17097@@ -62,7 +62,7 @@ struct legacy_pic {
17098 void (*init)(int auto_eoi);
17099 int (*irq_pending)(unsigned int irq);
17100 void (*make_irq)(unsigned int irq);
17101-};
17102+} __do_const;
17103
17104 extern struct legacy_pic *legacy_pic;
17105 extern struct legacy_pic null_legacy_pic;
17106diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17107index 34a5b93..27e40a6 100644
17108--- a/arch/x86/include/asm/io.h
17109+++ b/arch/x86/include/asm/io.h
17110@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17111 "m" (*(volatile type __force *)addr) barrier); }
17112
17113 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17114-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17115-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17116+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17117+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17118
17119 build_mmio_read(__readb, "b", unsigned char, "=q", )
17120-build_mmio_read(__readw, "w", unsigned short, "=r", )
17121-build_mmio_read(__readl, "l", unsigned int, "=r", )
17122+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17123+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17124
17125 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17126 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17127@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17128 * this function
17129 */
17130
17131-static inline phys_addr_t virt_to_phys(volatile void *address)
17132+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17133 {
17134 return __pa(address);
17135 }
17136@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17137 return ioremap_nocache(offset, size);
17138 }
17139
17140-extern void iounmap(volatile void __iomem *addr);
17141+extern void iounmap(const volatile void __iomem *addr);
17142
17143 extern void set_iounmap_nonlazy(void);
17144
17145@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17146
17147 #include <linux/vmalloc.h>
17148
17149+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17150+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17151+{
17152+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17153+}
17154+
17155+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17156+{
17157+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17158+}
17159+
17160 /*
17161 * Convert a virtual cached pointer to an uncached pointer
17162 */
17163diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17164index 0a8b519..80e7d5b 100644
17165--- a/arch/x86/include/asm/irqflags.h
17166+++ b/arch/x86/include/asm/irqflags.h
17167@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17168 sti; \
17169 sysexit
17170
17171+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17172+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17173+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17174+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17175+
17176 #else
17177 #define INTERRUPT_RETURN iret
17178 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17179diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17180index 4421b5d..8543006 100644
17181--- a/arch/x86/include/asm/kprobes.h
17182+++ b/arch/x86/include/asm/kprobes.h
17183@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17184 #define RELATIVEJUMP_SIZE 5
17185 #define RELATIVECALL_OPCODE 0xe8
17186 #define RELATIVE_ADDR_SIZE 4
17187-#define MAX_STACK_SIZE 64
17188-#define MIN_STACK_SIZE(ADDR) \
17189- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17190- THREAD_SIZE - (unsigned long)(ADDR))) \
17191- ? (MAX_STACK_SIZE) \
17192- : (((unsigned long)current_thread_info()) + \
17193- THREAD_SIZE - (unsigned long)(ADDR)))
17194+#define MAX_STACK_SIZE 64UL
17195+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17196
17197 #define flush_insn_slot(p) do { } while (0)
17198
17199diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17200index d89c6b8..e711c69 100644
17201--- a/arch/x86/include/asm/kvm_host.h
17202+++ b/arch/x86/include/asm/kvm_host.h
17203@@ -51,7 +51,7 @@
17204 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17205
17206 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17207-#define CR3_PCID_INVD (1UL << 63)
17208+#define CR3_PCID_INVD (1ULL << 63)
17209 #define CR4_RESERVED_BITS \
17210 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17211 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17212diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17213index 4ad6560..75c7bdd 100644
17214--- a/arch/x86/include/asm/local.h
17215+++ b/arch/x86/include/asm/local.h
17216@@ -10,33 +10,97 @@ typedef struct {
17217 atomic_long_t a;
17218 } local_t;
17219
17220+typedef struct {
17221+ atomic_long_unchecked_t a;
17222+} local_unchecked_t;
17223+
17224 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17225
17226 #define local_read(l) atomic_long_read(&(l)->a)
17227+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17228 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17229+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17230
17231 static inline void local_inc(local_t *l)
17232 {
17233- asm volatile(_ASM_INC "%0"
17234+ asm volatile(_ASM_INC "%0\n"
17235+
17236+#ifdef CONFIG_PAX_REFCOUNT
17237+ "jno 0f\n"
17238+ _ASM_DEC "%0\n"
17239+ "int $4\n0:\n"
17240+ _ASM_EXTABLE(0b, 0b)
17241+#endif
17242+
17243+ : "+m" (l->a.counter));
17244+}
17245+
17246+static inline void local_inc_unchecked(local_unchecked_t *l)
17247+{
17248+ asm volatile(_ASM_INC "%0\n"
17249 : "+m" (l->a.counter));
17250 }
17251
17252 static inline void local_dec(local_t *l)
17253 {
17254- asm volatile(_ASM_DEC "%0"
17255+ asm volatile(_ASM_DEC "%0\n"
17256+
17257+#ifdef CONFIG_PAX_REFCOUNT
17258+ "jno 0f\n"
17259+ _ASM_INC "%0\n"
17260+ "int $4\n0:\n"
17261+ _ASM_EXTABLE(0b, 0b)
17262+#endif
17263+
17264+ : "+m" (l->a.counter));
17265+}
17266+
17267+static inline void local_dec_unchecked(local_unchecked_t *l)
17268+{
17269+ asm volatile(_ASM_DEC "%0\n"
17270 : "+m" (l->a.counter));
17271 }
17272
17273 static inline void local_add(long i, local_t *l)
17274 {
17275- asm volatile(_ASM_ADD "%1,%0"
17276+ asm volatile(_ASM_ADD "%1,%0\n"
17277+
17278+#ifdef CONFIG_PAX_REFCOUNT
17279+ "jno 0f\n"
17280+ _ASM_SUB "%1,%0\n"
17281+ "int $4\n0:\n"
17282+ _ASM_EXTABLE(0b, 0b)
17283+#endif
17284+
17285+ : "+m" (l->a.counter)
17286+ : "ir" (i));
17287+}
17288+
17289+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17290+{
17291+ asm volatile(_ASM_ADD "%1,%0\n"
17292 : "+m" (l->a.counter)
17293 : "ir" (i));
17294 }
17295
17296 static inline void local_sub(long i, local_t *l)
17297 {
17298- asm volatile(_ASM_SUB "%1,%0"
17299+ asm volatile(_ASM_SUB "%1,%0\n"
17300+
17301+#ifdef CONFIG_PAX_REFCOUNT
17302+ "jno 0f\n"
17303+ _ASM_ADD "%1,%0\n"
17304+ "int $4\n0:\n"
17305+ _ASM_EXTABLE(0b, 0b)
17306+#endif
17307+
17308+ : "+m" (l->a.counter)
17309+ : "ir" (i));
17310+}
17311+
17312+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17313+{
17314+ asm volatile(_ASM_SUB "%1,%0\n"
17315 : "+m" (l->a.counter)
17316 : "ir" (i));
17317 }
17318@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17319 */
17320 static inline int local_sub_and_test(long i, local_t *l)
17321 {
17322- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17323+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17324 }
17325
17326 /**
17327@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17328 */
17329 static inline int local_dec_and_test(local_t *l)
17330 {
17331- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17332+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17333 }
17334
17335 /**
17336@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17337 */
17338 static inline int local_inc_and_test(local_t *l)
17339 {
17340- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17341+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17342 }
17343
17344 /**
17345@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17346 */
17347 static inline int local_add_negative(long i, local_t *l)
17348 {
17349- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17350+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17351 }
17352
17353 /**
17354@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17355 static inline long local_add_return(long i, local_t *l)
17356 {
17357 long __i = i;
17358+ asm volatile(_ASM_XADD "%0, %1\n"
17359+
17360+#ifdef CONFIG_PAX_REFCOUNT
17361+ "jno 0f\n"
17362+ _ASM_MOV "%0,%1\n"
17363+ "int $4\n0:\n"
17364+ _ASM_EXTABLE(0b, 0b)
17365+#endif
17366+
17367+ : "+r" (i), "+m" (l->a.counter)
17368+ : : "memory");
17369+ return i + __i;
17370+}
17371+
17372+/**
17373+ * local_add_return_unchecked - add and return
17374+ * @i: integer value to add
17375+ * @l: pointer to type local_unchecked_t
17376+ *
17377+ * Atomically adds @i to @l and returns @i + @l
17378+ */
17379+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17380+{
17381+ long __i = i;
17382 asm volatile(_ASM_XADD "%0, %1;"
17383 : "+r" (i), "+m" (l->a.counter)
17384 : : "memory");
17385@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17386
17387 #define local_cmpxchg(l, o, n) \
17388 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17389+#define local_cmpxchg_unchecked(l, o, n) \
17390+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17391 /* Always has a lock prefix */
17392 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17393
17394diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17395new file mode 100644
17396index 0000000..2bfd3ba
17397--- /dev/null
17398+++ b/arch/x86/include/asm/mman.h
17399@@ -0,0 +1,15 @@
17400+#ifndef _X86_MMAN_H
17401+#define _X86_MMAN_H
17402+
17403+#include <uapi/asm/mman.h>
17404+
17405+#ifdef __KERNEL__
17406+#ifndef __ASSEMBLY__
17407+#ifdef CONFIG_X86_32
17408+#define arch_mmap_check i386_mmap_check
17409+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17410+#endif
17411+#endif
17412+#endif
17413+
17414+#endif /* X86_MMAN_H */
17415diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17416index 876e74e..e20bfb1 100644
17417--- a/arch/x86/include/asm/mmu.h
17418+++ b/arch/x86/include/asm/mmu.h
17419@@ -9,7 +9,7 @@
17420 * we put the segment information here.
17421 */
17422 typedef struct {
17423- void *ldt;
17424+ struct desc_struct *ldt;
17425 int size;
17426
17427 #ifdef CONFIG_X86_64
17428@@ -18,7 +18,19 @@ typedef struct {
17429 #endif
17430
17431 struct mutex lock;
17432- void __user *vdso;
17433+ unsigned long vdso;
17434+
17435+#ifdef CONFIG_X86_32
17436+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17437+ unsigned long user_cs_base;
17438+ unsigned long user_cs_limit;
17439+
17440+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17441+ cpumask_t cpu_user_cs_mask;
17442+#endif
17443+
17444+#endif
17445+#endif
17446 } mm_context_t;
17447
17448 #ifdef CONFIG_SMP
17449diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17450index 4b75d59..8ffacb6 100644
17451--- a/arch/x86/include/asm/mmu_context.h
17452+++ b/arch/x86/include/asm/mmu_context.h
17453@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17454
17455 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17456 {
17457+
17458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17459+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17460+ unsigned int i;
17461+ pgd_t *pgd;
17462+
17463+ pax_open_kernel();
17464+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17465+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17466+ set_pgd_batched(pgd+i, native_make_pgd(0));
17467+ pax_close_kernel();
17468+ }
17469+#endif
17470+
17471 #ifdef CONFIG_SMP
17472 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17473 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17474@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17475 struct task_struct *tsk)
17476 {
17477 unsigned cpu = smp_processor_id();
17478+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17479+ int tlbstate = TLBSTATE_OK;
17480+#endif
17481
17482 if (likely(prev != next)) {
17483 #ifdef CONFIG_SMP
17484+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17485+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17486+#endif
17487 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17488 this_cpu_write(cpu_tlbstate.active_mm, next);
17489 #endif
17490 cpumask_set_cpu(cpu, mm_cpumask(next));
17491
17492 /* Re-load page tables */
17493+#ifdef CONFIG_PAX_PER_CPU_PGD
17494+ pax_open_kernel();
17495+
17496+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17497+ if (static_cpu_has(X86_FEATURE_PCID))
17498+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17499+ else
17500+#endif
17501+
17502+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17503+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17504+ pax_close_kernel();
17505+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17506+
17507+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17508+ if (static_cpu_has(X86_FEATURE_PCID)) {
17509+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17510+ u64 descriptor[2];
17511+ descriptor[0] = PCID_USER;
17512+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17513+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17514+ descriptor[0] = PCID_KERNEL;
17515+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17516+ }
17517+ } else {
17518+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17519+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17520+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17521+ else
17522+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17523+ }
17524+ } else
17525+#endif
17526+
17527+ load_cr3(get_cpu_pgd(cpu, kernel));
17528+#else
17529 load_cr3(next->pgd);
17530+#endif
17531 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17532
17533 /* Stop flush ipis for the previous mm */
17534@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17535 */
17536 if (unlikely(prev->context.ldt != next->context.ldt))
17537 load_LDT_nolock(&next->context);
17538+
17539+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17540+ if (!(__supported_pte_mask & _PAGE_NX)) {
17541+ smp_mb__before_atomic();
17542+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17543+ smp_mb__after_atomic();
17544+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17545+ }
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17550+ prev->context.user_cs_limit != next->context.user_cs_limit))
17551+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17552+#ifdef CONFIG_SMP
17553+ else if (unlikely(tlbstate != TLBSTATE_OK))
17554+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17555+#endif
17556+#endif
17557+
17558 }
17559+ else {
17560+
17561+#ifdef CONFIG_PAX_PER_CPU_PGD
17562+ pax_open_kernel();
17563+
17564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17565+ if (static_cpu_has(X86_FEATURE_PCID))
17566+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17567+ else
17568+#endif
17569+
17570+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17571+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17572+ pax_close_kernel();
17573+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17574+
17575+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17576+ if (static_cpu_has(X86_FEATURE_PCID)) {
17577+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17578+ u64 descriptor[2];
17579+ descriptor[0] = PCID_USER;
17580+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17581+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17582+ descriptor[0] = PCID_KERNEL;
17583+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17584+ }
17585+ } else {
17586+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17587+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17588+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17589+ else
17590+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17591+ }
17592+ } else
17593+#endif
17594+
17595+ load_cr3(get_cpu_pgd(cpu, kernel));
17596+#endif
17597+
17598 #ifdef CONFIG_SMP
17599- else {
17600 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17601 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17602
17603@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17604 * tlb flush IPI delivery. We must reload CR3
17605 * to make sure to use no freed page tables.
17606 */
17607+
17608+#ifndef CONFIG_PAX_PER_CPU_PGD
17609 load_cr3(next->pgd);
17610 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17611+#endif
17612+
17613 load_LDT_nolock(&next->context);
17614+
17615+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17616+ if (!(__supported_pte_mask & _PAGE_NX))
17617+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17618+#endif
17619+
17620+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17621+#ifdef CONFIG_PAX_PAGEEXEC
17622+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17623+#endif
17624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17625+#endif
17626+
17627 }
17628+#endif
17629 }
17630-#endif
17631 }
17632
17633 #define activate_mm(prev, next) \
17634diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17635index e3b7819..b257c64 100644
17636--- a/arch/x86/include/asm/module.h
17637+++ b/arch/x86/include/asm/module.h
17638@@ -5,6 +5,7 @@
17639
17640 #ifdef CONFIG_X86_64
17641 /* X86_64 does not define MODULE_PROC_FAMILY */
17642+#define MODULE_PROC_FAMILY ""
17643 #elif defined CONFIG_M486
17644 #define MODULE_PROC_FAMILY "486 "
17645 #elif defined CONFIG_M586
17646@@ -57,8 +58,20 @@
17647 #error unknown processor family
17648 #endif
17649
17650-#ifdef CONFIG_X86_32
17651-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17652+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17653+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17654+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17655+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17656+#else
17657+#define MODULE_PAX_KERNEXEC ""
17658 #endif
17659
17660+#ifdef CONFIG_PAX_MEMORY_UDEREF
17661+#define MODULE_PAX_UDEREF "UDEREF "
17662+#else
17663+#define MODULE_PAX_UDEREF ""
17664+#endif
17665+
17666+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17667+
17668 #endif /* _ASM_X86_MODULE_H */
17669diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17670index 5f2fc44..106caa6 100644
17671--- a/arch/x86/include/asm/nmi.h
17672+++ b/arch/x86/include/asm/nmi.h
17673@@ -36,26 +36,35 @@ enum {
17674
17675 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17676
17677+struct nmiaction;
17678+
17679+struct nmiwork {
17680+ const struct nmiaction *action;
17681+ u64 max_duration;
17682+ struct irq_work irq_work;
17683+};
17684+
17685 struct nmiaction {
17686 struct list_head list;
17687 nmi_handler_t handler;
17688- u64 max_duration;
17689- struct irq_work irq_work;
17690 unsigned long flags;
17691 const char *name;
17692-};
17693+ struct nmiwork *work;
17694+} __do_const;
17695
17696 #define register_nmi_handler(t, fn, fg, n, init...) \
17697 ({ \
17698- static struct nmiaction init fn##_na = { \
17699+ static struct nmiwork fn##_nw; \
17700+ static const struct nmiaction init fn##_na = { \
17701 .handler = (fn), \
17702 .name = (n), \
17703 .flags = (fg), \
17704+ .work = &fn##_nw, \
17705 }; \
17706 __register_nmi_handler((t), &fn##_na); \
17707 })
17708
17709-int __register_nmi_handler(unsigned int, struct nmiaction *);
17710+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17711
17712 void unregister_nmi_handler(unsigned int, const char *);
17713
17714diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17715index 802dde3..9183e68 100644
17716--- a/arch/x86/include/asm/page.h
17717+++ b/arch/x86/include/asm/page.h
17718@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17719 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17720
17721 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17722+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17723
17724 #define __boot_va(x) __va(x)
17725 #define __boot_pa(x) __pa(x)
17726@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17727 * virt_to_page(kaddr) returns a valid pointer if and only if
17728 * virt_addr_valid(kaddr) returns true.
17729 */
17730-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17731 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17732 extern bool __virt_addr_valid(unsigned long kaddr);
17733 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17734
17735+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17736+#define virt_to_page(kaddr) \
17737+ ({ \
17738+ const void *__kaddr = (const void *)(kaddr); \
17739+ BUG_ON(!virt_addr_valid(__kaddr)); \
17740+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17741+ })
17742+#else
17743+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17744+#endif
17745+
17746 #endif /* __ASSEMBLY__ */
17747
17748 #include <asm-generic/memory_model.h>
17749diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17750index b3bebf9..13ac22e 100644
17751--- a/arch/x86/include/asm/page_64.h
17752+++ b/arch/x86/include/asm/page_64.h
17753@@ -7,9 +7,9 @@
17754
17755 /* duplicated to the one in bootmem.h */
17756 extern unsigned long max_pfn;
17757-extern unsigned long phys_base;
17758+extern const unsigned long phys_base;
17759
17760-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17761+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17762 {
17763 unsigned long y = x - __START_KERNEL_map;
17764
17765@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17766 }
17767
17768 #ifdef CONFIG_DEBUG_VIRTUAL
17769-extern unsigned long __phys_addr(unsigned long);
17770-extern unsigned long __phys_addr_symbol(unsigned long);
17771+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17772+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17773 #else
17774 #define __phys_addr(x) __phys_addr_nodebug(x)
17775 #define __phys_addr_symbol(x) \
17776diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17777index 32444ae..1a1624b 100644
17778--- a/arch/x86/include/asm/paravirt.h
17779+++ b/arch/x86/include/asm/paravirt.h
17780@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17781 return (pmd_t) { ret };
17782 }
17783
17784-static inline pmdval_t pmd_val(pmd_t pmd)
17785+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17786 {
17787 pmdval_t ret;
17788
17789@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17790 val);
17791 }
17792
17793+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17794+{
17795+ pgdval_t val = native_pgd_val(pgd);
17796+
17797+ if (sizeof(pgdval_t) > sizeof(long))
17798+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17799+ val, (u64)val >> 32);
17800+ else
17801+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17802+ val);
17803+}
17804+
17805 static inline void pgd_clear(pgd_t *pgdp)
17806 {
17807 set_pgd(pgdp, __pgd(0));
17808@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17809 pv_mmu_ops.set_fixmap(idx, phys, flags);
17810 }
17811
17812+#ifdef CONFIG_PAX_KERNEXEC
17813+static inline unsigned long pax_open_kernel(void)
17814+{
17815+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17816+}
17817+
17818+static inline unsigned long pax_close_kernel(void)
17819+{
17820+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17821+}
17822+#else
17823+static inline unsigned long pax_open_kernel(void) { return 0; }
17824+static inline unsigned long pax_close_kernel(void) { return 0; }
17825+#endif
17826+
17827 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17828
17829 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17830@@ -906,7 +933,7 @@ extern void default_banner(void);
17831
17832 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17833 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17834-#define PARA_INDIRECT(addr) *%cs:addr
17835+#define PARA_INDIRECT(addr) *%ss:addr
17836 #endif
17837
17838 #define INTERRUPT_RETURN \
17839@@ -981,6 +1008,21 @@ extern void default_banner(void);
17840 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17841 CLBR_NONE, \
17842 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17843+
17844+#define GET_CR0_INTO_RDI \
17845+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17846+ mov %rax,%rdi
17847+
17848+#define SET_RDI_INTO_CR0 \
17849+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17850+
17851+#define GET_CR3_INTO_RDI \
17852+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17853+ mov %rax,%rdi
17854+
17855+#define SET_RDI_INTO_CR3 \
17856+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17857+
17858 #endif /* CONFIG_X86_32 */
17859
17860 #endif /* __ASSEMBLY__ */
17861diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17862index 7549b8b..f0edfda 100644
17863--- a/arch/x86/include/asm/paravirt_types.h
17864+++ b/arch/x86/include/asm/paravirt_types.h
17865@@ -84,7 +84,7 @@ struct pv_init_ops {
17866 */
17867 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17868 unsigned long addr, unsigned len);
17869-};
17870+} __no_const __no_randomize_layout;
17871
17872
17873 struct pv_lazy_ops {
17874@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17875 void (*enter)(void);
17876 void (*leave)(void);
17877 void (*flush)(void);
17878-};
17879+} __no_randomize_layout;
17880
17881 struct pv_time_ops {
17882 unsigned long long (*sched_clock)(void);
17883 unsigned long long (*steal_clock)(int cpu);
17884 unsigned long (*get_tsc_khz)(void);
17885-};
17886+} __no_const __no_randomize_layout;
17887
17888 struct pv_cpu_ops {
17889 /* hooks for various privileged instructions */
17890@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17891
17892 void (*start_context_switch)(struct task_struct *prev);
17893 void (*end_context_switch)(struct task_struct *next);
17894-};
17895+} __no_const __no_randomize_layout;
17896
17897 struct pv_irq_ops {
17898 /*
17899@@ -215,7 +215,7 @@ struct pv_irq_ops {
17900 #ifdef CONFIG_X86_64
17901 void (*adjust_exception_frame)(void);
17902 #endif
17903-};
17904+} __no_randomize_layout;
17905
17906 struct pv_apic_ops {
17907 #ifdef CONFIG_X86_LOCAL_APIC
17908@@ -223,7 +223,7 @@ struct pv_apic_ops {
17909 unsigned long start_eip,
17910 unsigned long start_esp);
17911 #endif
17912-};
17913+} __no_const __no_randomize_layout;
17914
17915 struct pv_mmu_ops {
17916 unsigned long (*read_cr2)(void);
17917@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17918 struct paravirt_callee_save make_pud;
17919
17920 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17921+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17922 #endif /* PAGETABLE_LEVELS == 4 */
17923 #endif /* PAGETABLE_LEVELS >= 3 */
17924
17925@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17926 an mfn. We can tell which is which from the index. */
17927 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17928 phys_addr_t phys, pgprot_t flags);
17929-};
17930+
17931+#ifdef CONFIG_PAX_KERNEXEC
17932+ unsigned long (*pax_open_kernel)(void);
17933+ unsigned long (*pax_close_kernel)(void);
17934+#endif
17935+
17936+} __no_randomize_layout;
17937
17938 struct arch_spinlock;
17939 #ifdef CONFIG_SMP
17940@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17941 struct pv_lock_ops {
17942 struct paravirt_callee_save lock_spinning;
17943 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17944-};
17945+} __no_randomize_layout;
17946
17947 /* This contains all the paravirt structures: we get a convenient
17948 * number for each function using the offset which we use to indicate
17949- * what to patch. */
17950+ * what to patch.
17951+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17952+ */
17953+
17954 struct paravirt_patch_template {
17955 struct pv_init_ops pv_init_ops;
17956 struct pv_time_ops pv_time_ops;
17957@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17958 struct pv_apic_ops pv_apic_ops;
17959 struct pv_mmu_ops pv_mmu_ops;
17960 struct pv_lock_ops pv_lock_ops;
17961-};
17962+} __no_randomize_layout;
17963
17964 extern struct pv_info pv_info;
17965 extern struct pv_init_ops pv_init_ops;
17966diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17967index c4412e9..90e88c5 100644
17968--- a/arch/x86/include/asm/pgalloc.h
17969+++ b/arch/x86/include/asm/pgalloc.h
17970@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17971 pmd_t *pmd, pte_t *pte)
17972 {
17973 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17974+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17975+}
17976+
17977+static inline void pmd_populate_user(struct mm_struct *mm,
17978+ pmd_t *pmd, pte_t *pte)
17979+{
17980+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17981 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17982 }
17983
17984@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17985
17986 #ifdef CONFIG_X86_PAE
17987 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17988+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17989+{
17990+ pud_populate(mm, pudp, pmd);
17991+}
17992 #else /* !CONFIG_X86_PAE */
17993 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17994 {
17995 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17996 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17997 }
17998+
17999+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18000+{
18001+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18002+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18003+}
18004 #endif /* CONFIG_X86_PAE */
18005
18006 #if PAGETABLE_LEVELS > 3
18007@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18008 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18009 }
18010
18011+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18012+{
18013+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18014+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18015+}
18016+
18017 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18018 {
18019 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18020diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18021index 206a87f..1623b06 100644
18022--- a/arch/x86/include/asm/pgtable-2level.h
18023+++ b/arch/x86/include/asm/pgtable-2level.h
18024@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18025
18026 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18027 {
18028+ pax_open_kernel();
18029 *pmdp = pmd;
18030+ pax_close_kernel();
18031 }
18032
18033 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18034diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18035index 81bb91b..9392125 100644
18036--- a/arch/x86/include/asm/pgtable-3level.h
18037+++ b/arch/x86/include/asm/pgtable-3level.h
18038@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18039
18040 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18041 {
18042+ pax_open_kernel();
18043 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18044+ pax_close_kernel();
18045 }
18046
18047 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18048 {
18049+ pax_open_kernel();
18050 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18051+ pax_close_kernel();
18052 }
18053
18054 /*
18055diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18056index e8a5454..1539359 100644
18057--- a/arch/x86/include/asm/pgtable.h
18058+++ b/arch/x86/include/asm/pgtable.h
18059@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18060
18061 #ifndef __PAGETABLE_PUD_FOLDED
18062 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18063+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18064 #define pgd_clear(pgd) native_pgd_clear(pgd)
18065 #endif
18066
18067@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18068
18069 #define arch_end_context_switch(prev) do {} while(0)
18070
18071+#define pax_open_kernel() native_pax_open_kernel()
18072+#define pax_close_kernel() native_pax_close_kernel()
18073 #endif /* CONFIG_PARAVIRT */
18074
18075+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18076+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18077+
18078+#ifdef CONFIG_PAX_KERNEXEC
18079+static inline unsigned long native_pax_open_kernel(void)
18080+{
18081+ unsigned long cr0;
18082+
18083+ preempt_disable();
18084+ barrier();
18085+ cr0 = read_cr0() ^ X86_CR0_WP;
18086+ BUG_ON(cr0 & X86_CR0_WP);
18087+ write_cr0(cr0);
18088+ barrier();
18089+ return cr0 ^ X86_CR0_WP;
18090+}
18091+
18092+static inline unsigned long native_pax_close_kernel(void)
18093+{
18094+ unsigned long cr0;
18095+
18096+ barrier();
18097+ cr0 = read_cr0() ^ X86_CR0_WP;
18098+ BUG_ON(!(cr0 & X86_CR0_WP));
18099+ write_cr0(cr0);
18100+ barrier();
18101+ preempt_enable_no_resched();
18102+ return cr0 ^ X86_CR0_WP;
18103+}
18104+#else
18105+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18106+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18107+#endif
18108+
18109 /*
18110 * The following only work if pte_present() is true.
18111 * Undefined behaviour if not..
18112 */
18113+static inline int pte_user(pte_t pte)
18114+{
18115+ return pte_val(pte) & _PAGE_USER;
18116+}
18117+
18118 static inline int pte_dirty(pte_t pte)
18119 {
18120 return pte_flags(pte) & _PAGE_DIRTY;
18121@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18122 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18123 }
18124
18125+static inline unsigned long pgd_pfn(pgd_t pgd)
18126+{
18127+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18128+}
18129+
18130 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18131
18132 static inline int pmd_large(pmd_t pte)
18133@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18134 return pte_clear_flags(pte, _PAGE_RW);
18135 }
18136
18137+static inline pte_t pte_mkread(pte_t pte)
18138+{
18139+ return __pte(pte_val(pte) | _PAGE_USER);
18140+}
18141+
18142 static inline pte_t pte_mkexec(pte_t pte)
18143 {
18144- return pte_clear_flags(pte, _PAGE_NX);
18145+#ifdef CONFIG_X86_PAE
18146+ if (__supported_pte_mask & _PAGE_NX)
18147+ return pte_clear_flags(pte, _PAGE_NX);
18148+ else
18149+#endif
18150+ return pte_set_flags(pte, _PAGE_USER);
18151+}
18152+
18153+static inline pte_t pte_exprotect(pte_t pte)
18154+{
18155+#ifdef CONFIG_X86_PAE
18156+ if (__supported_pte_mask & _PAGE_NX)
18157+ return pte_set_flags(pte, _PAGE_NX);
18158+ else
18159+#endif
18160+ return pte_clear_flags(pte, _PAGE_USER);
18161 }
18162
18163 static inline pte_t pte_mkdirty(pte_t pte)
18164@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18165 #endif
18166
18167 #ifndef __ASSEMBLY__
18168+
18169+#ifdef CONFIG_PAX_PER_CPU_PGD
18170+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18171+enum cpu_pgd_type {kernel = 0, user = 1};
18172+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18173+{
18174+ return cpu_pgd[cpu][type];
18175+}
18176+#endif
18177+
18178 #include <linux/mm_types.h>
18179 #include <linux/mmdebug.h>
18180 #include <linux/log2.h>
18181@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18182 * Currently stuck as a macro due to indirect forward reference to
18183 * linux/mmzone.h's __section_mem_map_addr() definition:
18184 */
18185-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18186+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18187
18188 /* Find an entry in the second-level page table.. */
18189 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18190@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18191 * Currently stuck as a macro due to indirect forward reference to
18192 * linux/mmzone.h's __section_mem_map_addr() definition:
18193 */
18194-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18195+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18196
18197 /* to find an entry in a page-table-directory. */
18198 static inline unsigned long pud_index(unsigned long address)
18199@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18200
18201 static inline int pgd_bad(pgd_t pgd)
18202 {
18203- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18204+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18205 }
18206
18207 static inline int pgd_none(pgd_t pgd)
18208@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18209 * pgd_offset() returns a (pgd_t *)
18210 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18211 */
18212-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18213+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18214+
18215+#ifdef CONFIG_PAX_PER_CPU_PGD
18216+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18217+#endif
18218+
18219 /*
18220 * a shortcut which implies the use of the kernel's pgd, instead
18221 * of a process's
18222@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18223 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18224 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18225
18226+#ifdef CONFIG_X86_32
18227+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18228+#else
18229+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18230+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18231+
18232+#ifdef CONFIG_PAX_MEMORY_UDEREF
18233+#ifdef __ASSEMBLY__
18234+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18235+#else
18236+extern unsigned long pax_user_shadow_base;
18237+extern pgdval_t clone_pgd_mask;
18238+#endif
18239+#endif
18240+
18241+#endif
18242+
18243 #ifndef __ASSEMBLY__
18244
18245 extern int direct_gbpages;
18246@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18247 * dst and src can be on the same page, but the range must not overlap,
18248 * and must not cross a page boundary.
18249 */
18250-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18251+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18252 {
18253- memcpy(dst, src, count * sizeof(pgd_t));
18254+ pax_open_kernel();
18255+ while (count--)
18256+ *dst++ = *src++;
18257+ pax_close_kernel();
18258 }
18259
18260+#ifdef CONFIG_PAX_PER_CPU_PGD
18261+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18262+#endif
18263+
18264+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18265+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18266+#else
18267+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18268+#endif
18269+
18270 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18271 static inline int page_level_shift(enum pg_level level)
18272 {
18273diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18274index b6c0b40..3535d47 100644
18275--- a/arch/x86/include/asm/pgtable_32.h
18276+++ b/arch/x86/include/asm/pgtable_32.h
18277@@ -25,9 +25,6 @@
18278 struct mm_struct;
18279 struct vm_area_struct;
18280
18281-extern pgd_t swapper_pg_dir[1024];
18282-extern pgd_t initial_page_table[1024];
18283-
18284 static inline void pgtable_cache_init(void) { }
18285 static inline void check_pgt_cache(void) { }
18286 void paging_init(void);
18287@@ -45,6 +42,12 @@ void paging_init(void);
18288 # include <asm/pgtable-2level.h>
18289 #endif
18290
18291+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18292+extern pgd_t initial_page_table[PTRS_PER_PGD];
18293+#ifdef CONFIG_X86_PAE
18294+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18295+#endif
18296+
18297 #if defined(CONFIG_HIGHPTE)
18298 #define pte_offset_map(dir, address) \
18299 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18300@@ -59,12 +62,17 @@ void paging_init(void);
18301 /* Clear a kernel PTE and flush it from the TLB */
18302 #define kpte_clear_flush(ptep, vaddr) \
18303 do { \
18304+ pax_open_kernel(); \
18305 pte_clear(&init_mm, (vaddr), (ptep)); \
18306+ pax_close_kernel(); \
18307 __flush_tlb_one((vaddr)); \
18308 } while (0)
18309
18310 #endif /* !__ASSEMBLY__ */
18311
18312+#define HAVE_ARCH_UNMAPPED_AREA
18313+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18314+
18315 /*
18316 * kern_addr_valid() is (1) for FLATMEM and (0) for
18317 * SPARSEMEM and DISCONTIGMEM
18318diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18319index 9fb2f2b..b04b4bf 100644
18320--- a/arch/x86/include/asm/pgtable_32_types.h
18321+++ b/arch/x86/include/asm/pgtable_32_types.h
18322@@ -8,7 +8,7 @@
18323 */
18324 #ifdef CONFIG_X86_PAE
18325 # include <asm/pgtable-3level_types.h>
18326-# define PMD_SIZE (1UL << PMD_SHIFT)
18327+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18328 # define PMD_MASK (~(PMD_SIZE - 1))
18329 #else
18330 # include <asm/pgtable-2level_types.h>
18331@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18332 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18333 #endif
18334
18335+#ifdef CONFIG_PAX_KERNEXEC
18336+#ifndef __ASSEMBLY__
18337+extern unsigned char MODULES_EXEC_VADDR[];
18338+extern unsigned char MODULES_EXEC_END[];
18339+#endif
18340+#include <asm/boot.h>
18341+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18342+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18343+#else
18344+#define ktla_ktva(addr) (addr)
18345+#define ktva_ktla(addr) (addr)
18346+#endif
18347+
18348 #define MODULES_VADDR VMALLOC_START
18349 #define MODULES_END VMALLOC_END
18350 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18351diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18352index 4572b2f..4430113 100644
18353--- a/arch/x86/include/asm/pgtable_64.h
18354+++ b/arch/x86/include/asm/pgtable_64.h
18355@@ -16,11 +16,16 @@
18356
18357 extern pud_t level3_kernel_pgt[512];
18358 extern pud_t level3_ident_pgt[512];
18359+extern pud_t level3_vmalloc_start_pgt[512];
18360+extern pud_t level3_vmalloc_end_pgt[512];
18361+extern pud_t level3_vmemmap_pgt[512];
18362+extern pud_t level2_vmemmap_pgt[512];
18363 extern pmd_t level2_kernel_pgt[512];
18364 extern pmd_t level2_fixmap_pgt[512];
18365-extern pmd_t level2_ident_pgt[512];
18366+extern pmd_t level2_ident_pgt[512*2];
18367 extern pte_t level1_fixmap_pgt[512];
18368-extern pgd_t init_level4_pgt[];
18369+extern pte_t level1_vsyscall_pgt[512];
18370+extern pgd_t init_level4_pgt[512];
18371
18372 #define swapper_pg_dir init_level4_pgt
18373
18374@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18375
18376 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18377 {
18378+ pax_open_kernel();
18379 *pmdp = pmd;
18380+ pax_close_kernel();
18381 }
18382
18383 static inline void native_pmd_clear(pmd_t *pmd)
18384@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18385
18386 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18387 {
18388+ pax_open_kernel();
18389 *pudp = pud;
18390+ pax_close_kernel();
18391 }
18392
18393 static inline void native_pud_clear(pud_t *pud)
18394@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18395
18396 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18397 {
18398+ pax_open_kernel();
18399+ *pgdp = pgd;
18400+ pax_close_kernel();
18401+}
18402+
18403+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18404+{
18405 *pgdp = pgd;
18406 }
18407
18408diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18409index 602b602..acb53ed 100644
18410--- a/arch/x86/include/asm/pgtable_64_types.h
18411+++ b/arch/x86/include/asm/pgtable_64_types.h
18412@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18413 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18414 #define MODULES_END _AC(0xffffffffff000000, UL)
18415 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18416+#define MODULES_EXEC_VADDR MODULES_VADDR
18417+#define MODULES_EXEC_END MODULES_END
18418 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18419 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18420 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18421 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18422
18423+#define ktla_ktva(addr) (addr)
18424+#define ktva_ktla(addr) (addr)
18425+
18426 #define EARLY_DYNAMIC_PAGE_TABLES 64
18427
18428 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18429diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18430index 25bcd4a..bf3f815 100644
18431--- a/arch/x86/include/asm/pgtable_types.h
18432+++ b/arch/x86/include/asm/pgtable_types.h
18433@@ -110,8 +110,10 @@
18434
18435 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18436 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18437-#else
18438+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18439 #define _PAGE_NX (_AT(pteval_t, 0))
18440+#else
18441+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18442 #endif
18443
18444 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18445@@ -167,6 +169,9 @@ enum page_cache_mode {
18446 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18447 _PAGE_ACCESSED)
18448
18449+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18450+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18451+
18452 #define __PAGE_KERNEL_EXEC \
18453 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18454 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18455@@ -174,7 +179,7 @@ enum page_cache_mode {
18456 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18457 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18458 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18459-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18460+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18461 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18462 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18463 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18464@@ -220,7 +225,7 @@ enum page_cache_mode {
18465 #ifdef CONFIG_X86_64
18466 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18467 #else
18468-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18469+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18470 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18471 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18472 #endif
18473@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18474 {
18475 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18476 }
18477+#endif
18478
18479+#if PAGETABLE_LEVELS == 3
18480+#include <asm-generic/pgtable-nopud.h>
18481+#endif
18482+
18483+#if PAGETABLE_LEVELS == 2
18484+#include <asm-generic/pgtable-nopmd.h>
18485+#endif
18486+
18487+#ifndef __ASSEMBLY__
18488 #if PAGETABLE_LEVELS > 3
18489 typedef struct { pudval_t pud; } pud_t;
18490
18491@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18492 return pud.pud;
18493 }
18494 #else
18495-#include <asm-generic/pgtable-nopud.h>
18496-
18497 static inline pudval_t native_pud_val(pud_t pud)
18498 {
18499 return native_pgd_val(pud.pgd);
18500@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18501 return pmd.pmd;
18502 }
18503 #else
18504-#include <asm-generic/pgtable-nopmd.h>
18505-
18506 static inline pmdval_t native_pmd_val(pmd_t pmd)
18507 {
18508 return native_pgd_val(pmd.pud.pgd);
18509@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18510
18511 extern pteval_t __supported_pte_mask;
18512 extern void set_nx(void);
18513-extern int nx_enabled;
18514
18515 #define pgprot_writecombine pgprot_writecombine
18516 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18517diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18518index 8f327184..368fb29 100644
18519--- a/arch/x86/include/asm/preempt.h
18520+++ b/arch/x86/include/asm/preempt.h
18521@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18522 */
18523 static __always_inline bool __preempt_count_dec_and_test(void)
18524 {
18525- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18526+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18527 }
18528
18529 /*
18530diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18531index a092a0c..8e9640b 100644
18532--- a/arch/x86/include/asm/processor.h
18533+++ b/arch/x86/include/asm/processor.h
18534@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18535 /* Index into per_cpu list: */
18536 u16 cpu_index;
18537 u32 microcode;
18538-};
18539+} __randomize_layout;
18540
18541 #define X86_VENDOR_INTEL 0
18542 #define X86_VENDOR_CYRIX 1
18543@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18544 : "memory");
18545 }
18546
18547+/* invpcid (%rdx),%rax */
18548+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18549+
18550+#define INVPCID_SINGLE_ADDRESS 0UL
18551+#define INVPCID_SINGLE_CONTEXT 1UL
18552+#define INVPCID_ALL_GLOBAL 2UL
18553+#define INVPCID_ALL_NONGLOBAL 3UL
18554+
18555+#define PCID_KERNEL 0UL
18556+#define PCID_USER 1UL
18557+#define PCID_NOFLUSH (1UL << 63)
18558+
18559 static inline void load_cr3(pgd_t *pgdir)
18560 {
18561- write_cr3(__pa(pgdir));
18562+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18563 }
18564
18565 #ifdef CONFIG_X86_32
18566@@ -282,7 +294,7 @@ struct tss_struct {
18567
18568 } ____cacheline_aligned;
18569
18570-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18571+extern struct tss_struct init_tss[NR_CPUS];
18572
18573 /*
18574 * Save the original ist values for checking stack pointers during debugging
18575@@ -479,6 +491,7 @@ struct thread_struct {
18576 unsigned short ds;
18577 unsigned short fsindex;
18578 unsigned short gsindex;
18579+ unsigned short ss;
18580 #endif
18581 #ifdef CONFIG_X86_32
18582 unsigned long ip;
18583@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18584 extern unsigned long mmu_cr4_features;
18585 extern u32 *trampoline_cr4_features;
18586
18587-static inline void set_in_cr4(unsigned long mask)
18588-{
18589- unsigned long cr4;
18590-
18591- mmu_cr4_features |= mask;
18592- if (trampoline_cr4_features)
18593- *trampoline_cr4_features = mmu_cr4_features;
18594- cr4 = read_cr4();
18595- cr4 |= mask;
18596- write_cr4(cr4);
18597-}
18598-
18599-static inline void clear_in_cr4(unsigned long mask)
18600-{
18601- unsigned long cr4;
18602-
18603- mmu_cr4_features &= ~mask;
18604- if (trampoline_cr4_features)
18605- *trampoline_cr4_features = mmu_cr4_features;
18606- cr4 = read_cr4();
18607- cr4 &= ~mask;
18608- write_cr4(cr4);
18609-}
18610+extern void set_in_cr4(unsigned long mask);
18611+extern void clear_in_cr4(unsigned long mask);
18612
18613 typedef struct {
18614 unsigned long seg;
18615@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18616 */
18617 #define TASK_SIZE PAGE_OFFSET
18618 #define TASK_SIZE_MAX TASK_SIZE
18619+
18620+#ifdef CONFIG_PAX_SEGMEXEC
18621+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18622+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18623+#else
18624 #define STACK_TOP TASK_SIZE
18625-#define STACK_TOP_MAX STACK_TOP
18626+#endif
18627+
18628+#define STACK_TOP_MAX TASK_SIZE
18629
18630 #define INIT_THREAD { \
18631- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18632+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18633 .vm86_info = NULL, \
18634 .sysenter_cs = __KERNEL_CS, \
18635 .io_bitmap_ptr = NULL, \
18636@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18637 */
18638 #define INIT_TSS { \
18639 .x86_tss = { \
18640- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18641+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18642 .ss0 = __KERNEL_DS, \
18643 .ss1 = __KERNEL_CS, \
18644 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18645@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18646 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18647
18648 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18649-#define KSTK_TOP(info) \
18650-({ \
18651- unsigned long *__ptr = (unsigned long *)(info); \
18652- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18653-})
18654+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18655
18656 /*
18657 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18658@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18659 #define task_pt_regs(task) \
18660 ({ \
18661 struct pt_regs *__regs__; \
18662- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18663+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18664 __regs__ - 1; \
18665 })
18666
18667@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18668 * particular problem by preventing anything from being mapped
18669 * at the maximum canonical address.
18670 */
18671-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18672+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18673
18674 /* This decides where the kernel will search for a free chunk of vm
18675 * space during mmap's.
18676 */
18677 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18678- 0xc0000000 : 0xFFFFe000)
18679+ 0xc0000000 : 0xFFFFf000)
18680
18681 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18682 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18683@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18684 #define STACK_TOP_MAX TASK_SIZE_MAX
18685
18686 #define INIT_THREAD { \
18687- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18688+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18689 }
18690
18691 #define INIT_TSS { \
18692- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18693+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18694 }
18695
18696 /*
18697@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18698 */
18699 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18700
18701+#ifdef CONFIG_PAX_SEGMEXEC
18702+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18703+#endif
18704+
18705 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18706
18707 /* Get/set a process' ability to use the timestamp counter instruction */
18708@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18709 return 0;
18710 }
18711
18712-extern unsigned long arch_align_stack(unsigned long sp);
18713+#define arch_align_stack(x) ((x) & ~0xfUL)
18714 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18715
18716 void default_idle(void);
18717@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18718 #define xen_set_default_idle 0
18719 #endif
18720
18721-void stop_this_cpu(void *dummy);
18722+void stop_this_cpu(void *dummy) __noreturn;
18723 void df_debug(struct pt_regs *regs, long error_code);
18724 #endif /* _ASM_X86_PROCESSOR_H */
18725diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18726index 86fc2bb..bd5049a 100644
18727--- a/arch/x86/include/asm/ptrace.h
18728+++ b/arch/x86/include/asm/ptrace.h
18729@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18730 }
18731
18732 /*
18733- * user_mode_vm(regs) determines whether a register set came from user mode.
18734+ * user_mode(regs) determines whether a register set came from user mode.
18735 * This is true if V8086 mode was enabled OR if the register set was from
18736 * protected mode with RPL-3 CS value. This tricky test checks that with
18737 * one comparison. Many places in the kernel can bypass this full check
18738- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18739+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18740+ * be used.
18741 */
18742-static inline int user_mode(struct pt_regs *regs)
18743+static inline int user_mode_novm(struct pt_regs *regs)
18744 {
18745 #ifdef CONFIG_X86_32
18746 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18747 #else
18748- return !!(regs->cs & 3);
18749+ return !!(regs->cs & SEGMENT_RPL_MASK);
18750 #endif
18751 }
18752
18753-static inline int user_mode_vm(struct pt_regs *regs)
18754+static inline int user_mode(struct pt_regs *regs)
18755 {
18756 #ifdef CONFIG_X86_32
18757 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18758 USER_RPL;
18759 #else
18760- return user_mode(regs);
18761+ return user_mode_novm(regs);
18762 #endif
18763 }
18764
18765@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18766 #ifdef CONFIG_X86_64
18767 static inline bool user_64bit_mode(struct pt_regs *regs)
18768 {
18769+ unsigned long cs = regs->cs & 0xffff;
18770 #ifndef CONFIG_PARAVIRT
18771 /*
18772 * On non-paravirt systems, this is the only long mode CPL 3
18773 * selector. We do not allow long mode selectors in the LDT.
18774 */
18775- return regs->cs == __USER_CS;
18776+ return cs == __USER_CS;
18777 #else
18778 /* Headers are too twisted for this to go in paravirt.h. */
18779- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18780+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18781 #endif
18782 }
18783
18784@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18785 * Traps from the kernel do not save sp and ss.
18786 * Use the helper function to retrieve sp.
18787 */
18788- if (offset == offsetof(struct pt_regs, sp) &&
18789- regs->cs == __KERNEL_CS)
18790- return kernel_stack_pointer(regs);
18791+ if (offset == offsetof(struct pt_regs, sp)) {
18792+ unsigned long cs = regs->cs & 0xffff;
18793+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18794+ return kernel_stack_pointer(regs);
18795+ }
18796 #endif
18797 return *(unsigned long *)((unsigned long)regs + offset);
18798 }
18799diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18800index ae0e241..e80b10b 100644
18801--- a/arch/x86/include/asm/qrwlock.h
18802+++ b/arch/x86/include/asm/qrwlock.h
18803@@ -7,8 +7,8 @@
18804 #define queue_write_unlock queue_write_unlock
18805 static inline void queue_write_unlock(struct qrwlock *lock)
18806 {
18807- barrier();
18808- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18809+ barrier();
18810+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18811 }
18812 #endif
18813
18814diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18815index 9c6b890..5305f53 100644
18816--- a/arch/x86/include/asm/realmode.h
18817+++ b/arch/x86/include/asm/realmode.h
18818@@ -22,16 +22,14 @@ struct real_mode_header {
18819 #endif
18820 /* APM/BIOS reboot */
18821 u32 machine_real_restart_asm;
18822-#ifdef CONFIG_X86_64
18823 u32 machine_real_restart_seg;
18824-#endif
18825 };
18826
18827 /* This must match data at trampoline_32/64.S */
18828 struct trampoline_header {
18829 #ifdef CONFIG_X86_32
18830 u32 start;
18831- u16 gdt_pad;
18832+ u16 boot_cs;
18833 u16 gdt_limit;
18834 u32 gdt_base;
18835 #else
18836diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18837index a82c4f1..ac45053 100644
18838--- a/arch/x86/include/asm/reboot.h
18839+++ b/arch/x86/include/asm/reboot.h
18840@@ -6,13 +6,13 @@
18841 struct pt_regs;
18842
18843 struct machine_ops {
18844- void (*restart)(char *cmd);
18845- void (*halt)(void);
18846- void (*power_off)(void);
18847+ void (* __noreturn restart)(char *cmd);
18848+ void (* __noreturn halt)(void);
18849+ void (* __noreturn power_off)(void);
18850 void (*shutdown)(void);
18851 void (*crash_shutdown)(struct pt_regs *);
18852- void (*emergency_restart)(void);
18853-};
18854+ void (* __noreturn emergency_restart)(void);
18855+} __no_const;
18856
18857 extern struct machine_ops machine_ops;
18858
18859diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18860index 8f7866a..e442f20 100644
18861--- a/arch/x86/include/asm/rmwcc.h
18862+++ b/arch/x86/include/asm/rmwcc.h
18863@@ -3,7 +3,34 @@
18864
18865 #ifdef CC_HAVE_ASM_GOTO
18866
18867-#define __GEN_RMWcc(fullop, var, cc, ...) \
18868+#ifdef CONFIG_PAX_REFCOUNT
18869+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18870+do { \
18871+ asm_volatile_goto (fullop \
18872+ ";jno 0f\n" \
18873+ fullantiop \
18874+ ";int $4\n0:\n" \
18875+ _ASM_EXTABLE(0b, 0b) \
18876+ ";j" cc " %l[cc_label]" \
18877+ : : "m" (var), ## __VA_ARGS__ \
18878+ : "memory" : cc_label); \
18879+ return 0; \
18880+cc_label: \
18881+ return 1; \
18882+} while (0)
18883+#else
18884+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18885+do { \
18886+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18887+ : : "m" (var), ## __VA_ARGS__ \
18888+ : "memory" : cc_label); \
18889+ return 0; \
18890+cc_label: \
18891+ return 1; \
18892+} while (0)
18893+#endif
18894+
18895+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18896 do { \
18897 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18898 : : "m" (var), ## __VA_ARGS__ \
18899@@ -13,15 +40,46 @@ cc_label: \
18900 return 1; \
18901 } while (0)
18902
18903-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18904- __GEN_RMWcc(op " " arg0, var, cc)
18905+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18906+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18907
18908-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18909- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18910+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18911+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18912+
18913+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18914+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18915+
18916+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18917+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18918
18919 #else /* !CC_HAVE_ASM_GOTO */
18920
18921-#define __GEN_RMWcc(fullop, var, cc, ...) \
18922+#ifdef CONFIG_PAX_REFCOUNT
18923+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18924+do { \
18925+ char c; \
18926+ asm volatile (fullop \
18927+ ";jno 0f\n" \
18928+ fullantiop \
18929+ ";int $4\n0:\n" \
18930+ _ASM_EXTABLE(0b, 0b) \
18931+ "; set" cc " %1" \
18932+ : "+m" (var), "=qm" (c) \
18933+ : __VA_ARGS__ : "memory"); \
18934+ return c != 0; \
18935+} while (0)
18936+#else
18937+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18938+do { \
18939+ char c; \
18940+ asm volatile (fullop "; set" cc " %1" \
18941+ : "+m" (var), "=qm" (c) \
18942+ : __VA_ARGS__ : "memory"); \
18943+ return c != 0; \
18944+} while (0)
18945+#endif
18946+
18947+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18948 do { \
18949 char c; \
18950 asm volatile (fullop "; set" cc " %1" \
18951@@ -30,11 +88,17 @@ do { \
18952 return c != 0; \
18953 } while (0)
18954
18955-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18956- __GEN_RMWcc(op " " arg0, var, cc)
18957+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18958+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18959+
18960+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18961+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18962+
18963+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18964+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18965
18966-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18967- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18968+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18969+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18970
18971 #endif /* CC_HAVE_ASM_GOTO */
18972
18973diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18974index cad82c9..2e5c5c1 100644
18975--- a/arch/x86/include/asm/rwsem.h
18976+++ b/arch/x86/include/asm/rwsem.h
18977@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18978 {
18979 asm volatile("# beginning down_read\n\t"
18980 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18981+
18982+#ifdef CONFIG_PAX_REFCOUNT
18983+ "jno 0f\n"
18984+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18985+ "int $4\n0:\n"
18986+ _ASM_EXTABLE(0b, 0b)
18987+#endif
18988+
18989 /* adds 0x00000001 */
18990 " jns 1f\n"
18991 " call call_rwsem_down_read_failed\n"
18992@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18993 "1:\n\t"
18994 " mov %1,%2\n\t"
18995 " add %3,%2\n\t"
18996+
18997+#ifdef CONFIG_PAX_REFCOUNT
18998+ "jno 0f\n"
18999+ "sub %3,%2\n"
19000+ "int $4\n0:\n"
19001+ _ASM_EXTABLE(0b, 0b)
19002+#endif
19003+
19004 " jle 2f\n\t"
19005 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19006 " jnz 1b\n\t"
19007@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19008 long tmp;
19009 asm volatile("# beginning down_write\n\t"
19010 LOCK_PREFIX " xadd %1,(%2)\n\t"
19011+
19012+#ifdef CONFIG_PAX_REFCOUNT
19013+ "jno 0f\n"
19014+ "mov %1,(%2)\n"
19015+ "int $4\n0:\n"
19016+ _ASM_EXTABLE(0b, 0b)
19017+#endif
19018+
19019 /* adds 0xffff0001, returns the old value */
19020 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19021 /* was the active mask 0 before? */
19022@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19023 long tmp;
19024 asm volatile("# beginning __up_read\n\t"
19025 LOCK_PREFIX " xadd %1,(%2)\n\t"
19026+
19027+#ifdef CONFIG_PAX_REFCOUNT
19028+ "jno 0f\n"
19029+ "mov %1,(%2)\n"
19030+ "int $4\n0:\n"
19031+ _ASM_EXTABLE(0b, 0b)
19032+#endif
19033+
19034 /* subtracts 1, returns the old value */
19035 " jns 1f\n\t"
19036 " call call_rwsem_wake\n" /* expects old value in %edx */
19037@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19038 long tmp;
19039 asm volatile("# beginning __up_write\n\t"
19040 LOCK_PREFIX " xadd %1,(%2)\n\t"
19041+
19042+#ifdef CONFIG_PAX_REFCOUNT
19043+ "jno 0f\n"
19044+ "mov %1,(%2)\n"
19045+ "int $4\n0:\n"
19046+ _ASM_EXTABLE(0b, 0b)
19047+#endif
19048+
19049 /* subtracts 0xffff0001, returns the old value */
19050 " jns 1f\n\t"
19051 " call call_rwsem_wake\n" /* expects old value in %edx */
19052@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19053 {
19054 asm volatile("# beginning __downgrade_write\n\t"
19055 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19056+
19057+#ifdef CONFIG_PAX_REFCOUNT
19058+ "jno 0f\n"
19059+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19060+ "int $4\n0:\n"
19061+ _ASM_EXTABLE(0b, 0b)
19062+#endif
19063+
19064 /*
19065 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19066 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19067@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19068 */
19069 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19070 {
19071- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19072+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19073+
19074+#ifdef CONFIG_PAX_REFCOUNT
19075+ "jno 0f\n"
19076+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19077+ "int $4\n0:\n"
19078+ _ASM_EXTABLE(0b, 0b)
19079+#endif
19080+
19081 : "+m" (sem->count)
19082 : "er" (delta));
19083 }
19084@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19085 */
19086 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19087 {
19088- return delta + xadd(&sem->count, delta);
19089+ return delta + xadd_check_overflow(&sem->count, delta);
19090 }
19091
19092 #endif /* __KERNEL__ */
19093diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19094index db257a5..b91bc77 100644
19095--- a/arch/x86/include/asm/segment.h
19096+++ b/arch/x86/include/asm/segment.h
19097@@ -73,10 +73,15 @@
19098 * 26 - ESPFIX small SS
19099 * 27 - per-cpu [ offset to per-cpu data area ]
19100 * 28 - stack_canary-20 [ for stack protector ]
19101- * 29 - unused
19102- * 30 - unused
19103+ * 29 - PCI BIOS CS
19104+ * 30 - PCI BIOS DS
19105 * 31 - TSS for double fault handler
19106 */
19107+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19108+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19109+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19110+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19111+
19112 #define GDT_ENTRY_TLS_MIN 6
19113 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19114
19115@@ -88,6 +93,8 @@
19116
19117 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19118
19119+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19120+
19121 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19122
19123 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19124@@ -113,6 +120,12 @@
19125 #define __KERNEL_STACK_CANARY 0
19126 #endif
19127
19128+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19129+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19130+
19131+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19132+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19133+
19134 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19135
19136 /*
19137@@ -140,7 +153,7 @@
19138 */
19139
19140 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19141-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19142+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19143
19144
19145 #else
19146@@ -164,6 +177,8 @@
19147 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19148 #define __USER32_DS __USER_DS
19149
19150+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19151+
19152 #define GDT_ENTRY_TSS 8 /* needs two entries */
19153 #define GDT_ENTRY_LDT 10 /* needs two entries */
19154 #define GDT_ENTRY_TLS_MIN 12
19155@@ -172,6 +187,8 @@
19156 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19157 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19158
19159+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19160+
19161 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19162 #define FS_TLS 0
19163 #define GS_TLS 1
19164@@ -179,12 +196,14 @@
19165 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19166 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19167
19168-#define GDT_ENTRIES 16
19169+#define GDT_ENTRIES 17
19170
19171 #endif
19172
19173 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19174+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19175 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19176+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19177 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19178 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19179 #ifndef CONFIG_PARAVIRT
19180@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19181 {
19182 unsigned long __limit;
19183 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19184- return __limit + 1;
19185+ return __limit;
19186 }
19187
19188 #endif /* !__ASSEMBLY__ */
19189diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19190index 8d3120f..352b440 100644
19191--- a/arch/x86/include/asm/smap.h
19192+++ b/arch/x86/include/asm/smap.h
19193@@ -25,11 +25,40 @@
19194
19195 #include <asm/alternative-asm.h>
19196
19197+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19198+#define ASM_PAX_OPEN_USERLAND \
19199+ 661: jmp 663f; \
19200+ .pushsection .altinstr_replacement, "a" ; \
19201+ 662: pushq %rax; nop; \
19202+ .popsection ; \
19203+ .pushsection .altinstructions, "a" ; \
19204+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19205+ .popsection ; \
19206+ call __pax_open_userland; \
19207+ popq %rax; \
19208+ 663:
19209+
19210+#define ASM_PAX_CLOSE_USERLAND \
19211+ 661: jmp 663f; \
19212+ .pushsection .altinstr_replacement, "a" ; \
19213+ 662: pushq %rax; nop; \
19214+ .popsection; \
19215+ .pushsection .altinstructions, "a" ; \
19216+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19217+ .popsection; \
19218+ call __pax_close_userland; \
19219+ popq %rax; \
19220+ 663:
19221+#else
19222+#define ASM_PAX_OPEN_USERLAND
19223+#define ASM_PAX_CLOSE_USERLAND
19224+#endif
19225+
19226 #ifdef CONFIG_X86_SMAP
19227
19228 #define ASM_CLAC \
19229 661: ASM_NOP3 ; \
19230- .pushsection .altinstr_replacement, "ax" ; \
19231+ .pushsection .altinstr_replacement, "a" ; \
19232 662: __ASM_CLAC ; \
19233 .popsection ; \
19234 .pushsection .altinstructions, "a" ; \
19235@@ -38,7 +67,7 @@
19236
19237 #define ASM_STAC \
19238 661: ASM_NOP3 ; \
19239- .pushsection .altinstr_replacement, "ax" ; \
19240+ .pushsection .altinstr_replacement, "a" ; \
19241 662: __ASM_STAC ; \
19242 .popsection ; \
19243 .pushsection .altinstructions, "a" ; \
19244@@ -56,6 +85,37 @@
19245
19246 #include <asm/alternative.h>
19247
19248+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19249+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19250+
19251+extern void __pax_open_userland(void);
19252+static __always_inline unsigned long pax_open_userland(void)
19253+{
19254+
19255+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19256+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19257+ :
19258+ : [open] "i" (__pax_open_userland)
19259+ : "memory", "rax");
19260+#endif
19261+
19262+ return 0;
19263+}
19264+
19265+extern void __pax_close_userland(void);
19266+static __always_inline unsigned long pax_close_userland(void)
19267+{
19268+
19269+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19270+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19271+ :
19272+ : [close] "i" (__pax_close_userland)
19273+ : "memory", "rax");
19274+#endif
19275+
19276+ return 0;
19277+}
19278+
19279 #ifdef CONFIG_X86_SMAP
19280
19281 static __always_inline void clac(void)
19282diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19283index 8cd1cc3..827e09e 100644
19284--- a/arch/x86/include/asm/smp.h
19285+++ b/arch/x86/include/asm/smp.h
19286@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19287 /* cpus sharing the last level cache: */
19288 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19289 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19290-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19291+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19292
19293 static inline struct cpumask *cpu_sibling_mask(int cpu)
19294 {
19295@@ -78,7 +78,7 @@ struct smp_ops {
19296
19297 void (*send_call_func_ipi)(const struct cpumask *mask);
19298 void (*send_call_func_single_ipi)(int cpu);
19299-};
19300+} __no_const;
19301
19302 /* Globals due to paravirt */
19303 extern void set_cpu_sibling_map(int cpu);
19304@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19305 extern int safe_smp_processor_id(void);
19306
19307 #elif defined(CONFIG_X86_64_SMP)
19308-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19309-
19310-#define stack_smp_processor_id() \
19311-({ \
19312- struct thread_info *ti; \
19313- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19314- ti->cpu; \
19315-})
19316+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19317+#define stack_smp_processor_id() raw_smp_processor_id()
19318 #define safe_smp_processor_id() smp_processor_id()
19319
19320 #endif
19321diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19322index 6a99859..03cb807 100644
19323--- a/arch/x86/include/asm/stackprotector.h
19324+++ b/arch/x86/include/asm/stackprotector.h
19325@@ -47,7 +47,7 @@
19326 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19327 */
19328 #define GDT_STACK_CANARY_INIT \
19329- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19330+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19331
19332 /*
19333 * Initialize the stackprotector canary value.
19334@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19335
19336 static inline void load_stack_canary_segment(void)
19337 {
19338-#ifdef CONFIG_X86_32
19339+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19340 asm volatile ("mov %0, %%gs" : : "r" (0));
19341 #endif
19342 }
19343diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19344index 70bbe39..4ae2bd4 100644
19345--- a/arch/x86/include/asm/stacktrace.h
19346+++ b/arch/x86/include/asm/stacktrace.h
19347@@ -11,28 +11,20 @@
19348
19349 extern int kstack_depth_to_print;
19350
19351-struct thread_info;
19352+struct task_struct;
19353 struct stacktrace_ops;
19354
19355-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19356- unsigned long *stack,
19357- unsigned long bp,
19358- const struct stacktrace_ops *ops,
19359- void *data,
19360- unsigned long *end,
19361- int *graph);
19362+typedef unsigned long walk_stack_t(struct task_struct *task,
19363+ void *stack_start,
19364+ unsigned long *stack,
19365+ unsigned long bp,
19366+ const struct stacktrace_ops *ops,
19367+ void *data,
19368+ unsigned long *end,
19369+ int *graph);
19370
19371-extern unsigned long
19372-print_context_stack(struct thread_info *tinfo,
19373- unsigned long *stack, unsigned long bp,
19374- const struct stacktrace_ops *ops, void *data,
19375- unsigned long *end, int *graph);
19376-
19377-extern unsigned long
19378-print_context_stack_bp(struct thread_info *tinfo,
19379- unsigned long *stack, unsigned long bp,
19380- const struct stacktrace_ops *ops, void *data,
19381- unsigned long *end, int *graph);
19382+extern walk_stack_t print_context_stack;
19383+extern walk_stack_t print_context_stack_bp;
19384
19385 /* Generic stack tracer with callbacks */
19386
19387@@ -40,7 +32,7 @@ struct stacktrace_ops {
19388 void (*address)(void *data, unsigned long address, int reliable);
19389 /* On negative return stop dumping */
19390 int (*stack)(void *data, char *name);
19391- walk_stack_t walk_stack;
19392+ walk_stack_t *walk_stack;
19393 };
19394
19395 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19396diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19397index 751bf4b..a1278b5 100644
19398--- a/arch/x86/include/asm/switch_to.h
19399+++ b/arch/x86/include/asm/switch_to.h
19400@@ -112,7 +112,7 @@ do { \
19401 "call __switch_to\n\t" \
19402 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19403 __switch_canary \
19404- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19405+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19406 "movq %%rax,%%rdi\n\t" \
19407 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19408 "jnz ret_from_fork\n\t" \
19409@@ -123,7 +123,7 @@ do { \
19410 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19411 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19412 [_tif_fork] "i" (_TIF_FORK), \
19413- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19414+ [thread_info] "m" (current_tinfo), \
19415 [current_task] "m" (current_task) \
19416 __switch_canary_iparam \
19417 : "memory", "cc" __EXTRA_CLOBBER)
19418diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19419index 547e344..6be1175 100644
19420--- a/arch/x86/include/asm/thread_info.h
19421+++ b/arch/x86/include/asm/thread_info.h
19422@@ -24,7 +24,6 @@ struct exec_domain;
19423 #include <linux/atomic.h>
19424
19425 struct thread_info {
19426- struct task_struct *task; /* main task structure */
19427 struct exec_domain *exec_domain; /* execution domain */
19428 __u32 flags; /* low level flags */
19429 __u32 status; /* thread synchronous flags */
19430@@ -33,13 +32,13 @@ struct thread_info {
19431 mm_segment_t addr_limit;
19432 struct restart_block restart_block;
19433 void __user *sysenter_return;
19434+ unsigned long lowest_stack;
19435 unsigned int sig_on_uaccess_error:1;
19436 unsigned int uaccess_err:1; /* uaccess failed */
19437 };
19438
19439-#define INIT_THREAD_INFO(tsk) \
19440+#define INIT_THREAD_INFO \
19441 { \
19442- .task = &tsk, \
19443 .exec_domain = &default_exec_domain, \
19444 .flags = 0, \
19445 .cpu = 0, \
19446@@ -50,7 +49,7 @@ struct thread_info {
19447 }, \
19448 }
19449
19450-#define init_thread_info (init_thread_union.thread_info)
19451+#define init_thread_info (init_thread_union.stack)
19452 #define init_stack (init_thread_union.stack)
19453
19454 #else /* !__ASSEMBLY__ */
19455@@ -91,6 +90,7 @@ struct thread_info {
19456 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19457 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19458 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19459+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19460
19461 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19462 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19463@@ -115,17 +115,18 @@ struct thread_info {
19464 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19465 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19466 #define _TIF_X32 (1 << TIF_X32)
19467+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19468
19469 /* work to do in syscall_trace_enter() */
19470 #define _TIF_WORK_SYSCALL_ENTRY \
19471 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19472 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19473- _TIF_NOHZ)
19474+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19475
19476 /* work to do in syscall_trace_leave() */
19477 #define _TIF_WORK_SYSCALL_EXIT \
19478 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19479- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19480+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19481
19482 /* work to do on interrupt/exception return */
19483 #define _TIF_WORK_MASK \
19484@@ -136,7 +137,7 @@ struct thread_info {
19485 /* work to do on any return to user space */
19486 #define _TIF_ALLWORK_MASK \
19487 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19488- _TIF_NOHZ)
19489+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19490
19491 /* Only used for 64 bit */
19492 #define _TIF_DO_NOTIFY_MASK \
19493@@ -151,7 +152,6 @@ struct thread_info {
19494 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19495
19496 #define STACK_WARN (THREAD_SIZE/8)
19497-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19498
19499 /*
19500 * macros/functions for gaining access to the thread information structure
19501@@ -162,26 +162,18 @@ struct thread_info {
19502
19503 DECLARE_PER_CPU(unsigned long, kernel_stack);
19504
19505+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19506+
19507 static inline struct thread_info *current_thread_info(void)
19508 {
19509- struct thread_info *ti;
19510- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19511- KERNEL_STACK_OFFSET - THREAD_SIZE);
19512- return ti;
19513+ return this_cpu_read_stable(current_tinfo);
19514 }
19515
19516 #else /* !__ASSEMBLY__ */
19517
19518 /* how to get the thread information struct from ASM */
19519 #define GET_THREAD_INFO(reg) \
19520- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19521- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19522-
19523-/*
19524- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19525- * a certain register (to be used in assembler memory operands).
19526- */
19527-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19528+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19529
19530 #endif
19531
19532@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19533 extern void arch_task_cache_init(void);
19534 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19535 extern void arch_release_task_struct(struct task_struct *tsk);
19536+
19537+#define __HAVE_THREAD_FUNCTIONS
19538+#define task_thread_info(task) (&(task)->tinfo)
19539+#define task_stack_page(task) ((task)->stack)
19540+#define setup_thread_stack(p, org) do {} while (0)
19541+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19542+
19543 #endif
19544 #endif /* _ASM_X86_THREAD_INFO_H */
19545diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19546index 04905bf..1178cdf 100644
19547--- a/arch/x86/include/asm/tlbflush.h
19548+++ b/arch/x86/include/asm/tlbflush.h
19549@@ -17,18 +17,44 @@
19550
19551 static inline void __native_flush_tlb(void)
19552 {
19553+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19554+ u64 descriptor[2];
19555+
19556+ descriptor[0] = PCID_KERNEL;
19557+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19558+ return;
19559+ }
19560+
19561+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19562+ if (static_cpu_has(X86_FEATURE_PCID)) {
19563+ unsigned int cpu = raw_get_cpu();
19564+
19565+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19566+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19567+ raw_put_cpu_no_resched();
19568+ return;
19569+ }
19570+#endif
19571+
19572 native_write_cr3(native_read_cr3());
19573 }
19574
19575 static inline void __native_flush_tlb_global_irq_disabled(void)
19576 {
19577- unsigned long cr4;
19578+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19579+ u64 descriptor[2];
19580
19581- cr4 = native_read_cr4();
19582- /* clear PGE */
19583- native_write_cr4(cr4 & ~X86_CR4_PGE);
19584- /* write old PGE again and flush TLBs */
19585- native_write_cr4(cr4);
19586+ descriptor[0] = PCID_KERNEL;
19587+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19588+ } else {
19589+ unsigned long cr4;
19590+
19591+ cr4 = native_read_cr4();
19592+ /* clear PGE */
19593+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19594+ /* write old PGE again and flush TLBs */
19595+ native_write_cr4(cr4);
19596+ }
19597 }
19598
19599 static inline void __native_flush_tlb_global(void)
19600@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19601
19602 static inline void __native_flush_tlb_single(unsigned long addr)
19603 {
19604+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19605+ u64 descriptor[2];
19606+
19607+ descriptor[0] = PCID_KERNEL;
19608+ descriptor[1] = addr;
19609+
19610+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19611+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19612+ if (addr < TASK_SIZE_MAX)
19613+ descriptor[1] += pax_user_shadow_base;
19614+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19615+ }
19616+
19617+ descriptor[0] = PCID_USER;
19618+ descriptor[1] = addr;
19619+#endif
19620+
19621+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19622+ return;
19623+ }
19624+
19625+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19626+ if (static_cpu_has(X86_FEATURE_PCID)) {
19627+ unsigned int cpu = raw_get_cpu();
19628+
19629+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19630+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19631+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19632+ raw_put_cpu_no_resched();
19633+
19634+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19635+ addr += pax_user_shadow_base;
19636+ }
19637+#endif
19638+
19639 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19640 }
19641
19642diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19643index 0d592e0..cbc90a3 100644
19644--- a/arch/x86/include/asm/uaccess.h
19645+++ b/arch/x86/include/asm/uaccess.h
19646@@ -7,6 +7,7 @@
19647 #include <linux/compiler.h>
19648 #include <linux/thread_info.h>
19649 #include <linux/string.h>
19650+#include <linux/spinlock.h>
19651 #include <asm/asm.h>
19652 #include <asm/page.h>
19653 #include <asm/smap.h>
19654@@ -29,7 +30,12 @@
19655
19656 #define get_ds() (KERNEL_DS)
19657 #define get_fs() (current_thread_info()->addr_limit)
19658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19659+void __set_fs(mm_segment_t x);
19660+void set_fs(mm_segment_t x);
19661+#else
19662 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19663+#endif
19664
19665 #define segment_eq(a, b) ((a).seg == (b).seg)
19666
19667@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19668 * checks that the pointer is in the user space range - after calling
19669 * this function, memory access functions may still return -EFAULT.
19670 */
19671-#define access_ok(type, addr, size) \
19672- likely(!__range_not_ok(addr, size, user_addr_max()))
19673+extern int _cond_resched(void);
19674+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19675+#define access_ok(type, addr, size) \
19676+({ \
19677+ unsigned long __size = size; \
19678+ unsigned long __addr = (unsigned long)addr; \
19679+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19680+ if (__ret_ao && __size) { \
19681+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19682+ unsigned long __end_ao = __addr + __size - 1; \
19683+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19684+ while (__addr_ao <= __end_ao) { \
19685+ char __c_ao; \
19686+ __addr_ao += PAGE_SIZE; \
19687+ if (__size > PAGE_SIZE) \
19688+ _cond_resched(); \
19689+ if (__get_user(__c_ao, (char __user *)__addr)) \
19690+ break; \
19691+ if (type != VERIFY_WRITE) { \
19692+ __addr = __addr_ao; \
19693+ continue; \
19694+ } \
19695+ if (__put_user(__c_ao, (char __user *)__addr)) \
19696+ break; \
19697+ __addr = __addr_ao; \
19698+ } \
19699+ } \
19700+ } \
19701+ __ret_ao; \
19702+})
19703
19704 /*
19705 * The exception table consists of pairs of addresses relative to the
19706@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19707 extern int __get_user_bad(void);
19708
19709 /*
19710- * This is a type: either unsigned long, if the argument fits into
19711- * that type, or otherwise unsigned long long.
19712+ * This is a type: either (un)signed int, if the argument fits into
19713+ * that type, or otherwise (un)signed long long.
19714 */
19715 #define __inttype(x) \
19716-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19717+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19718+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19719+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19720
19721 /**
19722 * get_user: - Get a simple variable from user space.
19723@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19724 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19725 __chk_user_ptr(ptr); \
19726 might_fault(); \
19727+ pax_open_userland(); \
19728 asm volatile("call __get_user_%P3" \
19729 : "=a" (__ret_gu), "=r" (__val_gu) \
19730 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19731 (x) = (__typeof__(*(ptr))) __val_gu; \
19732+ pax_close_userland(); \
19733 __ret_gu; \
19734 })
19735
19736@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19737 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19738 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19739
19740-
19741+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19742+#define __copyuser_seg "gs;"
19743+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19744+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19745+#else
19746+#define __copyuser_seg
19747+#define __COPYUSER_SET_ES
19748+#define __COPYUSER_RESTORE_ES
19749+#endif
19750
19751 #ifdef CONFIG_X86_32
19752 #define __put_user_asm_u64(x, addr, err, errret) \
19753 asm volatile(ASM_STAC "\n" \
19754- "1: movl %%eax,0(%2)\n" \
19755- "2: movl %%edx,4(%2)\n" \
19756+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19757+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19758 "3: " ASM_CLAC "\n" \
19759 ".section .fixup,\"ax\"\n" \
19760 "4: movl %3,%0\n" \
19761@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19762
19763 #define __put_user_asm_ex_u64(x, addr) \
19764 asm volatile(ASM_STAC "\n" \
19765- "1: movl %%eax,0(%1)\n" \
19766- "2: movl %%edx,4(%1)\n" \
19767+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19768+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19769 "3: " ASM_CLAC "\n" \
19770 _ASM_EXTABLE_EX(1b, 2b) \
19771 _ASM_EXTABLE_EX(2b, 3b) \
19772@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19773 __typeof__(*(ptr)) __pu_val; \
19774 __chk_user_ptr(ptr); \
19775 might_fault(); \
19776- __pu_val = x; \
19777+ __pu_val = (x); \
19778+ pax_open_userland(); \
19779 switch (sizeof(*(ptr))) { \
19780 case 1: \
19781 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19782@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19783 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19784 break; \
19785 } \
19786+ pax_close_userland(); \
19787 __ret_pu; \
19788 })
19789
19790@@ -355,8 +403,10 @@ do { \
19791 } while (0)
19792
19793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19794+do { \
19795+ pax_open_userland(); \
19796 asm volatile(ASM_STAC "\n" \
19797- "1: mov"itype" %2,%"rtype"1\n" \
19798+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19799 "2: " ASM_CLAC "\n" \
19800 ".section .fixup,\"ax\"\n" \
19801 "3: mov %3,%0\n" \
19802@@ -364,8 +414,10 @@ do { \
19803 " jmp 2b\n" \
19804 ".previous\n" \
19805 _ASM_EXTABLE(1b, 3b) \
19806- : "=r" (err), ltype(x) \
19807- : "m" (__m(addr)), "i" (errret), "0" (err))
19808+ : "=r" (err), ltype (x) \
19809+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19810+ pax_close_userland(); \
19811+} while (0)
19812
19813 #define __get_user_size_ex(x, ptr, size) \
19814 do { \
19815@@ -389,7 +441,7 @@ do { \
19816 } while (0)
19817
19818 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19819- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19820+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19821 "2:\n" \
19822 _ASM_EXTABLE_EX(1b, 2b) \
19823 : ltype(x) : "m" (__m(addr)))
19824@@ -406,13 +458,24 @@ do { \
19825 int __gu_err; \
19826 unsigned long __gu_val; \
19827 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19828- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19829+ (x) = (__typeof__(*(ptr)))__gu_val; \
19830 __gu_err; \
19831 })
19832
19833 /* FIXME: this hack is definitely wrong -AK */
19834 struct __large_struct { unsigned long buf[100]; };
19835-#define __m(x) (*(struct __large_struct __user *)(x))
19836+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19837+#define ____m(x) \
19838+({ \
19839+ unsigned long ____x = (unsigned long)(x); \
19840+ if (____x < pax_user_shadow_base) \
19841+ ____x += pax_user_shadow_base; \
19842+ (typeof(x))____x; \
19843+})
19844+#else
19845+#define ____m(x) (x)
19846+#endif
19847+#define __m(x) (*(struct __large_struct __user *)____m(x))
19848
19849 /*
19850 * Tell gcc we read from memory instead of writing: this is because
19851@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19852 * aliasing issues.
19853 */
19854 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19855+do { \
19856+ pax_open_userland(); \
19857 asm volatile(ASM_STAC "\n" \
19858- "1: mov"itype" %"rtype"1,%2\n" \
19859+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19860 "2: " ASM_CLAC "\n" \
19861 ".section .fixup,\"ax\"\n" \
19862 "3: mov %3,%0\n" \
19863@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19864 ".previous\n" \
19865 _ASM_EXTABLE(1b, 3b) \
19866 : "=r"(err) \
19867- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19868+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19869+ pax_close_userland(); \
19870+} while (0)
19871
19872 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19873- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19874+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19875 "2:\n" \
19876 _ASM_EXTABLE_EX(1b, 2b) \
19877 : : ltype(x), "m" (__m(addr)))
19878@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19879 */
19880 #define uaccess_try do { \
19881 current_thread_info()->uaccess_err = 0; \
19882+ pax_open_userland(); \
19883 stac(); \
19884 barrier();
19885
19886 #define uaccess_catch(err) \
19887 clac(); \
19888+ pax_close_userland(); \
19889 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19890 } while (0)
19891
19892@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19893 * On error, the variable @x is set to zero.
19894 */
19895
19896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19897+#define __get_user(x, ptr) get_user((x), (ptr))
19898+#else
19899 #define __get_user(x, ptr) \
19900 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19901+#endif
19902
19903 /**
19904 * __put_user: - Write a simple value into user space, with less checking.
19905@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19906 * Returns zero on success, or -EFAULT on error.
19907 */
19908
19909+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19910+#define __put_user(x, ptr) put_user((x), (ptr))
19911+#else
19912 #define __put_user(x, ptr) \
19913 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19914+#endif
19915
19916 #define __get_user_unaligned __get_user
19917 #define __put_user_unaligned __put_user
19918@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19919 #define get_user_ex(x, ptr) do { \
19920 unsigned long __gue_val; \
19921 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19922- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19923+ (x) = (__typeof__(*(ptr)))__gue_val; \
19924 } while (0)
19925
19926 #define put_user_try uaccess_try
19927@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19928 extern __must_check long strnlen_user(const char __user *str, long n);
19929
19930 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19931-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19932+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19933
19934 extern void __cmpxchg_wrong_size(void)
19935 __compiletime_error("Bad argument size for cmpxchg");
19936@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19937 __typeof__(ptr) __uval = (uval); \
19938 __typeof__(*(ptr)) __old = (old); \
19939 __typeof__(*(ptr)) __new = (new); \
19940+ pax_open_userland(); \
19941 switch (size) { \
19942 case 1: \
19943 { \
19944 asm volatile("\t" ASM_STAC "\n" \
19945- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19946+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19947 "2:\t" ASM_CLAC "\n" \
19948 "\t.section .fixup, \"ax\"\n" \
19949 "3:\tmov %3, %0\n" \
19950 "\tjmp 2b\n" \
19951 "\t.previous\n" \
19952 _ASM_EXTABLE(1b, 3b) \
19953- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19954+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19955 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19956 : "memory" \
19957 ); \
19958@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19959 case 2: \
19960 { \
19961 asm volatile("\t" ASM_STAC "\n" \
19962- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19963+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19964 "2:\t" ASM_CLAC "\n" \
19965 "\t.section .fixup, \"ax\"\n" \
19966 "3:\tmov %3, %0\n" \
19967 "\tjmp 2b\n" \
19968 "\t.previous\n" \
19969 _ASM_EXTABLE(1b, 3b) \
19970- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19971+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19972 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19973 : "memory" \
19974 ); \
19975@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19976 case 4: \
19977 { \
19978 asm volatile("\t" ASM_STAC "\n" \
19979- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19980+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19981 "2:\t" ASM_CLAC "\n" \
19982 "\t.section .fixup, \"ax\"\n" \
19983 "3:\tmov %3, %0\n" \
19984 "\tjmp 2b\n" \
19985 "\t.previous\n" \
19986 _ASM_EXTABLE(1b, 3b) \
19987- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19988+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19989 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19990 : "memory" \
19991 ); \
19992@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19993 __cmpxchg_wrong_size(); \
19994 \
19995 asm volatile("\t" ASM_STAC "\n" \
19996- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19997+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19998 "2:\t" ASM_CLAC "\n" \
19999 "\t.section .fixup, \"ax\"\n" \
20000 "3:\tmov %3, %0\n" \
20001 "\tjmp 2b\n" \
20002 "\t.previous\n" \
20003 _ASM_EXTABLE(1b, 3b) \
20004- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20005+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20006 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20007 : "memory" \
20008 ); \
20009@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
20010 default: \
20011 __cmpxchg_wrong_size(); \
20012 } \
20013+ pax_close_userland(); \
20014 *__uval = __old; \
20015 __ret; \
20016 })
20017@@ -636,17 +715,6 @@ extern struct movsl_mask {
20018
20019 #define ARCH_HAS_NOCACHE_UACCESS 1
20020
20021-#ifdef CONFIG_X86_32
20022-# include <asm/uaccess_32.h>
20023-#else
20024-# include <asm/uaccess_64.h>
20025-#endif
20026-
20027-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20028- unsigned n);
20029-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20030- unsigned n);
20031-
20032 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20033 # define copy_user_diag __compiletime_error
20034 #else
20035@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20036 extern void copy_user_diag("copy_from_user() buffer size is too small")
20037 copy_from_user_overflow(void);
20038 extern void copy_user_diag("copy_to_user() buffer size is too small")
20039-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20040+copy_to_user_overflow(void);
20041
20042 #undef copy_user_diag
20043
20044@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20045
20046 extern void
20047 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20048-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20049+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20050 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20051
20052 #else
20053@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20054
20055 #endif
20056
20057+#ifdef CONFIG_X86_32
20058+# include <asm/uaccess_32.h>
20059+#else
20060+# include <asm/uaccess_64.h>
20061+#endif
20062+
20063 static inline unsigned long __must_check
20064 copy_from_user(void *to, const void __user *from, unsigned long n)
20065 {
20066- int sz = __compiletime_object_size(to);
20067+ size_t sz = __compiletime_object_size(to);
20068
20069 might_fault();
20070
20071@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20072 * case, and do only runtime checking for non-constant sizes.
20073 */
20074
20075- if (likely(sz < 0 || sz >= n))
20076- n = _copy_from_user(to, from, n);
20077- else if(__builtin_constant_p(n))
20078- copy_from_user_overflow();
20079- else
20080- __copy_from_user_overflow(sz, n);
20081+ if (likely(sz != (size_t)-1 && sz < n)) {
20082+ if(__builtin_constant_p(n))
20083+ copy_from_user_overflow();
20084+ else
20085+ __copy_from_user_overflow(sz, n);
20086+ } else if (access_ok(VERIFY_READ, from, n))
20087+ n = __copy_from_user(to, from, n);
20088+ else if ((long)n > 0)
20089+ memset(to, 0, n);
20090
20091 return n;
20092 }
20093@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20094 static inline unsigned long __must_check
20095 copy_to_user(void __user *to, const void *from, unsigned long n)
20096 {
20097- int sz = __compiletime_object_size(from);
20098+ size_t sz = __compiletime_object_size(from);
20099
20100 might_fault();
20101
20102 /* See the comment in copy_from_user() above. */
20103- if (likely(sz < 0 || sz >= n))
20104- n = _copy_to_user(to, from, n);
20105- else if(__builtin_constant_p(n))
20106- copy_to_user_overflow();
20107- else
20108- __copy_to_user_overflow(sz, n);
20109+ if (likely(sz != (size_t)-1 && sz < n)) {
20110+ if(__builtin_constant_p(n))
20111+ copy_to_user_overflow();
20112+ else
20113+ __copy_to_user_overflow(sz, n);
20114+ } else if (access_ok(VERIFY_WRITE, to, n))
20115+ n = __copy_to_user(to, from, n);
20116
20117 return n;
20118 }
20119diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20120index 3c03a5d..edb68ae 100644
20121--- a/arch/x86/include/asm/uaccess_32.h
20122+++ b/arch/x86/include/asm/uaccess_32.h
20123@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20124 * anything, so this is accurate.
20125 */
20126
20127-static __always_inline unsigned long __must_check
20128+static __always_inline __size_overflow(3) unsigned long __must_check
20129 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20130 {
20131+ if ((long)n < 0)
20132+ return n;
20133+
20134+ check_object_size(from, n, true);
20135+
20136 if (__builtin_constant_p(n)) {
20137 unsigned long ret;
20138
20139@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20140 __copy_to_user(void __user *to, const void *from, unsigned long n)
20141 {
20142 might_fault();
20143+
20144 return __copy_to_user_inatomic(to, from, n);
20145 }
20146
20147-static __always_inline unsigned long
20148+static __always_inline __size_overflow(3) unsigned long
20149 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20150 {
20151+ if ((long)n < 0)
20152+ return n;
20153+
20154 /* Avoid zeroing the tail if the copy fails..
20155 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20156 * but as the zeroing behaviour is only significant when n is not
20157@@ -137,6 +146,12 @@ static __always_inline unsigned long
20158 __copy_from_user(void *to, const void __user *from, unsigned long n)
20159 {
20160 might_fault();
20161+
20162+ if ((long)n < 0)
20163+ return n;
20164+
20165+ check_object_size(to, n, false);
20166+
20167 if (__builtin_constant_p(n)) {
20168 unsigned long ret;
20169
20170@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20171 const void __user *from, unsigned long n)
20172 {
20173 might_fault();
20174+
20175+ if ((long)n < 0)
20176+ return n;
20177+
20178 if (__builtin_constant_p(n)) {
20179 unsigned long ret;
20180
20181@@ -181,7 +200,10 @@ static __always_inline unsigned long
20182 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20183 unsigned long n)
20184 {
20185- return __copy_from_user_ll_nocache_nozero(to, from, n);
20186+ if ((long)n < 0)
20187+ return n;
20188+
20189+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20190 }
20191
20192 #endif /* _ASM_X86_UACCESS_32_H */
20193diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20194index 12a26b9..c36fff5 100644
20195--- a/arch/x86/include/asm/uaccess_64.h
20196+++ b/arch/x86/include/asm/uaccess_64.h
20197@@ -10,6 +10,9 @@
20198 #include <asm/alternative.h>
20199 #include <asm/cpufeature.h>
20200 #include <asm/page.h>
20201+#include <asm/pgtable.h>
20202+
20203+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20204
20205 /*
20206 * Copy To/From Userspace
20207@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20208 __must_check unsigned long
20209 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20210
20211-static __always_inline __must_check unsigned long
20212-copy_user_generic(void *to, const void *from, unsigned len)
20213+static __always_inline __must_check __size_overflow(3) unsigned long
20214+copy_user_generic(void *to, const void *from, unsigned long len)
20215 {
20216 unsigned ret;
20217
20218@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20219 }
20220
20221 __must_check unsigned long
20222-copy_in_user(void __user *to, const void __user *from, unsigned len);
20223+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20224
20225 static __always_inline __must_check
20226-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20227+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20228 {
20229- int ret = 0;
20230+ size_t sz = __compiletime_object_size(dst);
20231+ unsigned ret = 0;
20232+
20233+ if (size > INT_MAX)
20234+ return size;
20235+
20236+ check_object_size(dst, size, false);
20237+
20238+#ifdef CONFIG_PAX_MEMORY_UDEREF
20239+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20240+ return size;
20241+#endif
20242+
20243+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20244+ if(__builtin_constant_p(size))
20245+ copy_from_user_overflow();
20246+ else
20247+ __copy_from_user_overflow(sz, size);
20248+ return size;
20249+ }
20250
20251 if (!__builtin_constant_p(size))
20252- return copy_user_generic(dst, (__force void *)src, size);
20253+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20254 switch (size) {
20255- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20256+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20257 ret, "b", "b", "=q", 1);
20258 return ret;
20259- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20260+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20261 ret, "w", "w", "=r", 2);
20262 return ret;
20263- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20264+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20265 ret, "l", "k", "=r", 4);
20266 return ret;
20267- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20268+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20269 ret, "q", "", "=r", 8);
20270 return ret;
20271 case 10:
20272- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20273+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20274 ret, "q", "", "=r", 10);
20275 if (unlikely(ret))
20276 return ret;
20277 __get_user_asm(*(u16 *)(8 + (char *)dst),
20278- (u16 __user *)(8 + (char __user *)src),
20279+ (const u16 __user *)(8 + (const char __user *)src),
20280 ret, "w", "w", "=r", 2);
20281 return ret;
20282 case 16:
20283- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20284+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20285 ret, "q", "", "=r", 16);
20286 if (unlikely(ret))
20287 return ret;
20288 __get_user_asm(*(u64 *)(8 + (char *)dst),
20289- (u64 __user *)(8 + (char __user *)src),
20290+ (const u64 __user *)(8 + (const char __user *)src),
20291 ret, "q", "", "=r", 8);
20292 return ret;
20293 default:
20294- return copy_user_generic(dst, (__force void *)src, size);
20295+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20296 }
20297 }
20298
20299 static __always_inline __must_check
20300-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20301+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20302 {
20303 might_fault();
20304 return __copy_from_user_nocheck(dst, src, size);
20305 }
20306
20307 static __always_inline __must_check
20308-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20309+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20310 {
20311- int ret = 0;
20312+ size_t sz = __compiletime_object_size(src);
20313+ unsigned ret = 0;
20314+
20315+ if (size > INT_MAX)
20316+ return size;
20317+
20318+ check_object_size(src, size, true);
20319+
20320+#ifdef CONFIG_PAX_MEMORY_UDEREF
20321+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20322+ return size;
20323+#endif
20324+
20325+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20326+ if(__builtin_constant_p(size))
20327+ copy_to_user_overflow();
20328+ else
20329+ __copy_to_user_overflow(sz, size);
20330+ return size;
20331+ }
20332
20333 if (!__builtin_constant_p(size))
20334- return copy_user_generic((__force void *)dst, src, size);
20335+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20336 switch (size) {
20337- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20338+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20339 ret, "b", "b", "iq", 1);
20340 return ret;
20341- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20342+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20343 ret, "w", "w", "ir", 2);
20344 return ret;
20345- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20346+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20347 ret, "l", "k", "ir", 4);
20348 return ret;
20349- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20350+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20351 ret, "q", "", "er", 8);
20352 return ret;
20353 case 10:
20354- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20355+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20356 ret, "q", "", "er", 10);
20357 if (unlikely(ret))
20358 return ret;
20359 asm("":::"memory");
20360- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20361+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20362 ret, "w", "w", "ir", 2);
20363 return ret;
20364 case 16:
20365- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20366+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20367 ret, "q", "", "er", 16);
20368 if (unlikely(ret))
20369 return ret;
20370 asm("":::"memory");
20371- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20372+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20373 ret, "q", "", "er", 8);
20374 return ret;
20375 default:
20376- return copy_user_generic((__force void *)dst, src, size);
20377+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20378 }
20379 }
20380
20381 static __always_inline __must_check
20382-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20383+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20384 {
20385 might_fault();
20386 return __copy_to_user_nocheck(dst, src, size);
20387 }
20388
20389 static __always_inline __must_check
20390-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20391+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20392 {
20393- int ret = 0;
20394+ unsigned ret = 0;
20395
20396 might_fault();
20397+
20398+ if (size > INT_MAX)
20399+ return size;
20400+
20401+#ifdef CONFIG_PAX_MEMORY_UDEREF
20402+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20403+ return size;
20404+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20405+ return size;
20406+#endif
20407+
20408 if (!__builtin_constant_p(size))
20409- return copy_user_generic((__force void *)dst,
20410- (__force void *)src, size);
20411+ return copy_user_generic((__force_kernel void *)____m(dst),
20412+ (__force_kernel const void *)____m(src), size);
20413 switch (size) {
20414 case 1: {
20415 u8 tmp;
20416- __get_user_asm(tmp, (u8 __user *)src,
20417+ __get_user_asm(tmp, (const u8 __user *)src,
20418 ret, "b", "b", "=q", 1);
20419 if (likely(!ret))
20420 __put_user_asm(tmp, (u8 __user *)dst,
20421@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20422 }
20423 case 2: {
20424 u16 tmp;
20425- __get_user_asm(tmp, (u16 __user *)src,
20426+ __get_user_asm(tmp, (const u16 __user *)src,
20427 ret, "w", "w", "=r", 2);
20428 if (likely(!ret))
20429 __put_user_asm(tmp, (u16 __user *)dst,
20430@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20431
20432 case 4: {
20433 u32 tmp;
20434- __get_user_asm(tmp, (u32 __user *)src,
20435+ __get_user_asm(tmp, (const u32 __user *)src,
20436 ret, "l", "k", "=r", 4);
20437 if (likely(!ret))
20438 __put_user_asm(tmp, (u32 __user *)dst,
20439@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20440 }
20441 case 8: {
20442 u64 tmp;
20443- __get_user_asm(tmp, (u64 __user *)src,
20444+ __get_user_asm(tmp, (const u64 __user *)src,
20445 ret, "q", "", "=r", 8);
20446 if (likely(!ret))
20447 __put_user_asm(tmp, (u64 __user *)dst,
20448@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20449 return ret;
20450 }
20451 default:
20452- return copy_user_generic((__force void *)dst,
20453- (__force void *)src, size);
20454+ return copy_user_generic((__force_kernel void *)____m(dst),
20455+ (__force_kernel const void *)____m(src), size);
20456 }
20457 }
20458
20459-static __must_check __always_inline int
20460-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20461+static __must_check __always_inline unsigned long
20462+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20463 {
20464 return __copy_from_user_nocheck(dst, src, size);
20465 }
20466
20467-static __must_check __always_inline int
20468-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20469+static __must_check __always_inline unsigned long
20470+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20471 {
20472 return __copy_to_user_nocheck(dst, src, size);
20473 }
20474
20475-extern long __copy_user_nocache(void *dst, const void __user *src,
20476- unsigned size, int zerorest);
20477+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20478+ unsigned long size, int zerorest);
20479
20480-static inline int
20481-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20482+static inline unsigned long
20483+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20484 {
20485 might_fault();
20486+
20487+ if (size > INT_MAX)
20488+ return size;
20489+
20490+#ifdef CONFIG_PAX_MEMORY_UDEREF
20491+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20492+ return size;
20493+#endif
20494+
20495 return __copy_user_nocache(dst, src, size, 1);
20496 }
20497
20498-static inline int
20499+static inline unsigned long
20500 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20501- unsigned size)
20502+ unsigned long size)
20503 {
20504+ if (size > INT_MAX)
20505+ return size;
20506+
20507+#ifdef CONFIG_PAX_MEMORY_UDEREF
20508+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20509+ return size;
20510+#endif
20511+
20512 return __copy_user_nocache(dst, src, size, 0);
20513 }
20514
20515 unsigned long
20516-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20517+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20518
20519 #endif /* _ASM_X86_UACCESS_64_H */
20520diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20521index 5b238981..77fdd78 100644
20522--- a/arch/x86/include/asm/word-at-a-time.h
20523+++ b/arch/x86/include/asm/word-at-a-time.h
20524@@ -11,7 +11,7 @@
20525 * and shift, for example.
20526 */
20527 struct word_at_a_time {
20528- const unsigned long one_bits, high_bits;
20529+ unsigned long one_bits, high_bits;
20530 };
20531
20532 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20533diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20534index f58a9c7..dc378042a 100644
20535--- a/arch/x86/include/asm/x86_init.h
20536+++ b/arch/x86/include/asm/x86_init.h
20537@@ -129,7 +129,7 @@ struct x86_init_ops {
20538 struct x86_init_timers timers;
20539 struct x86_init_iommu iommu;
20540 struct x86_init_pci pci;
20541-};
20542+} __no_const;
20543
20544 /**
20545 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20546@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20547 void (*setup_percpu_clockev)(void);
20548 void (*early_percpu_clock_init)(void);
20549 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20550-};
20551+} __no_const;
20552
20553 struct timespec;
20554
20555@@ -168,7 +168,7 @@ struct x86_platform_ops {
20556 void (*save_sched_clock_state)(void);
20557 void (*restore_sched_clock_state)(void);
20558 void (*apic_post_init)(void);
20559-};
20560+} __no_const;
20561
20562 struct pci_dev;
20563 struct msi_msg;
20564@@ -182,7 +182,7 @@ struct x86_msi_ops {
20565 void (*teardown_msi_irqs)(struct pci_dev *dev);
20566 void (*restore_msi_irqs)(struct pci_dev *dev);
20567 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20568-};
20569+} __no_const;
20570
20571 struct IO_APIC_route_entry;
20572 struct io_apic_irq_attr;
20573@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20574 unsigned int destination, int vector,
20575 struct io_apic_irq_attr *attr);
20576 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20577-};
20578+} __no_const;
20579
20580 extern struct x86_init_ops x86_init;
20581 extern struct x86_cpuinit_ops x86_cpuinit;
20582diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20583index 5eea099..ff7ef8d 100644
20584--- a/arch/x86/include/asm/xen/page.h
20585+++ b/arch/x86/include/asm/xen/page.h
20586@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20587 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20588 * cases needing an extended handling.
20589 */
20590-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20591+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20592 {
20593 unsigned long mfn;
20594
20595diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20596index c9a6d68..cb57f42 100644
20597--- a/arch/x86/include/asm/xsave.h
20598+++ b/arch/x86/include/asm/xsave.h
20599@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20600 if (unlikely(err))
20601 return -EFAULT;
20602
20603+ pax_open_userland();
20604 __asm__ __volatile__(ASM_STAC "\n"
20605- "1:"XSAVE"\n"
20606+ "1:"
20607+ __copyuser_seg
20608+ XSAVE"\n"
20609 "2: " ASM_CLAC "\n"
20610 xstate_fault
20611 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20612 : "memory");
20613+ pax_close_userland();
20614 return err;
20615 }
20616
20617@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20618 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20619 {
20620 int err = 0;
20621- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20622+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20623 u32 lmask = mask;
20624 u32 hmask = mask >> 32;
20625
20626+ pax_open_userland();
20627 __asm__ __volatile__(ASM_STAC "\n"
20628- "1:"XRSTOR"\n"
20629+ "1:"
20630+ __copyuser_seg
20631+ XRSTOR"\n"
20632 "2: " ASM_CLAC "\n"
20633 xstate_fault
20634 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20635 : "memory"); /* memory required? */
20636+ pax_close_userland();
20637 return err;
20638 }
20639
20640diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20641index d993e33..8db1b18 100644
20642--- a/arch/x86/include/uapi/asm/e820.h
20643+++ b/arch/x86/include/uapi/asm/e820.h
20644@@ -58,7 +58,7 @@ struct e820map {
20645 #define ISA_START_ADDRESS 0xa0000
20646 #define ISA_END_ADDRESS 0x100000
20647
20648-#define BIOS_BEGIN 0x000a0000
20649+#define BIOS_BEGIN 0x000c0000
20650 #define BIOS_END 0x00100000
20651
20652 #define BIOS_ROM_BASE 0xffe00000
20653diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20654index 7b0a55a..ad115bf 100644
20655--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20656+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20657@@ -49,7 +49,6 @@
20658 #define EFLAGS 144
20659 #define RSP 152
20660 #define SS 160
20661-#define ARGOFFSET R11
20662 #endif /* __ASSEMBLY__ */
20663
20664 /* top of stack page */
20665diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20666index 5d4502c..a567e09 100644
20667--- a/arch/x86/kernel/Makefile
20668+++ b/arch/x86/kernel/Makefile
20669@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20670 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20671 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20672 obj-y += probe_roms.o
20673-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20674+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20675 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20676 obj-$(CONFIG_X86_64) += mcount_64.o
20677 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20678diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20679index b5ddc96..490b4e4 100644
20680--- a/arch/x86/kernel/acpi/boot.c
20681+++ b/arch/x86/kernel/acpi/boot.c
20682@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20683 * If your system is blacklisted here, but you find that acpi=force
20684 * works for you, please contact linux-acpi@vger.kernel.org
20685 */
20686-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20687+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20688 /*
20689 * Boxes that need ACPI disabled
20690 */
20691@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20692 };
20693
20694 /* second table for DMI checks that should run after early-quirks */
20695-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20696+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20697 /*
20698 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20699 * which includes some code which overrides all temperature
20700diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20701index 3136820..e2c6577 100644
20702--- a/arch/x86/kernel/acpi/sleep.c
20703+++ b/arch/x86/kernel/acpi/sleep.c
20704@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20705 #else /* CONFIG_64BIT */
20706 #ifdef CONFIG_SMP
20707 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20708+
20709+ pax_open_kernel();
20710 early_gdt_descr.address =
20711 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20712+ pax_close_kernel();
20713+
20714 initial_gs = per_cpu_offset(smp_processor_id());
20715 #endif
20716 initial_code = (unsigned long)wakeup_long64;
20717diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20718index 665c6b7..eae4d56 100644
20719--- a/arch/x86/kernel/acpi/wakeup_32.S
20720+++ b/arch/x86/kernel/acpi/wakeup_32.S
20721@@ -29,13 +29,11 @@ wakeup_pmode_return:
20722 # and restore the stack ... but you need gdt for this to work
20723 movl saved_context_esp, %esp
20724
20725- movl %cs:saved_magic, %eax
20726- cmpl $0x12345678, %eax
20727+ cmpl $0x12345678, saved_magic
20728 jne bogus_magic
20729
20730 # jump to place where we left off
20731- movl saved_eip, %eax
20732- jmp *%eax
20733+ jmp *(saved_eip)
20734
20735 bogus_magic:
20736 jmp bogus_magic
20737diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20738index 703130f..27a155d 100644
20739--- a/arch/x86/kernel/alternative.c
20740+++ b/arch/x86/kernel/alternative.c
20741@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20742 */
20743 for (a = start; a < end; a++) {
20744 instr = (u8 *)&a->instr_offset + a->instr_offset;
20745+
20746+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20747+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20748+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20749+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20750+#endif
20751+
20752 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20753 BUG_ON(a->replacementlen > a->instrlen);
20754 BUG_ON(a->instrlen > sizeof(insnbuf));
20755@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20756 add_nops(insnbuf + a->replacementlen,
20757 a->instrlen - a->replacementlen);
20758
20759+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20760+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20761+ instr = ktva_ktla(instr);
20762+#endif
20763+
20764 text_poke_early(instr, insnbuf, a->instrlen);
20765 }
20766 }
20767@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20768 for (poff = start; poff < end; poff++) {
20769 u8 *ptr = (u8 *)poff + *poff;
20770
20771+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20772+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20773+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20774+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20775+#endif
20776+
20777 if (!*poff || ptr < text || ptr >= text_end)
20778 continue;
20779 /* turn DS segment override prefix into lock prefix */
20780- if (*ptr == 0x3e)
20781+ if (*ktla_ktva(ptr) == 0x3e)
20782 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20783 }
20784 mutex_unlock(&text_mutex);
20785@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20786 for (poff = start; poff < end; poff++) {
20787 u8 *ptr = (u8 *)poff + *poff;
20788
20789+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20790+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20791+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20792+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20793+#endif
20794+
20795 if (!*poff || ptr < text || ptr >= text_end)
20796 continue;
20797 /* turn lock prefix into DS segment override prefix */
20798- if (*ptr == 0xf0)
20799+ if (*ktla_ktva(ptr) == 0xf0)
20800 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20801 }
20802 mutex_unlock(&text_mutex);
20803@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20804
20805 BUG_ON(p->len > MAX_PATCH_LEN);
20806 /* prep the buffer with the original instructions */
20807- memcpy(insnbuf, p->instr, p->len);
20808+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20809 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20810 (unsigned long)p->instr, p->len);
20811
20812@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20813 if (!uniproc_patched || num_possible_cpus() == 1)
20814 free_init_pages("SMP alternatives",
20815 (unsigned long)__smp_locks,
20816- (unsigned long)__smp_locks_end);
20817+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20818 #endif
20819
20820 apply_paravirt(__parainstructions, __parainstructions_end);
20821@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20822 * instructions. And on the local CPU you need to be protected again NMI or MCE
20823 * handlers seeing an inconsistent instruction while you patch.
20824 */
20825-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20826+void *__kprobes text_poke_early(void *addr, const void *opcode,
20827 size_t len)
20828 {
20829 unsigned long flags;
20830 local_irq_save(flags);
20831- memcpy(addr, opcode, len);
20832+
20833+ pax_open_kernel();
20834+ memcpy(ktla_ktva(addr), opcode, len);
20835 sync_core();
20836+ pax_close_kernel();
20837+
20838 local_irq_restore(flags);
20839 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20840 that causes hangs on some VIA CPUs. */
20841@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20842 */
20843 void *text_poke(void *addr, const void *opcode, size_t len)
20844 {
20845- unsigned long flags;
20846- char *vaddr;
20847+ unsigned char *vaddr = ktla_ktva(addr);
20848 struct page *pages[2];
20849- int i;
20850+ size_t i;
20851
20852 if (!core_kernel_text((unsigned long)addr)) {
20853- pages[0] = vmalloc_to_page(addr);
20854- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20855+ pages[0] = vmalloc_to_page(vaddr);
20856+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20857 } else {
20858- pages[0] = virt_to_page(addr);
20859+ pages[0] = virt_to_page(vaddr);
20860 WARN_ON(!PageReserved(pages[0]));
20861- pages[1] = virt_to_page(addr + PAGE_SIZE);
20862+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20863 }
20864 BUG_ON(!pages[0]);
20865- local_irq_save(flags);
20866- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20867- if (pages[1])
20868- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20869- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20870- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20871- clear_fixmap(FIX_TEXT_POKE0);
20872- if (pages[1])
20873- clear_fixmap(FIX_TEXT_POKE1);
20874- local_flush_tlb();
20875- sync_core();
20876- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20877- that causes hangs on some VIA CPUs. */
20878+ text_poke_early(addr, opcode, len);
20879 for (i = 0; i < len; i++)
20880- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20881- local_irq_restore(flags);
20882+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20883 return addr;
20884 }
20885
20886@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20887 if (likely(!bp_patching_in_progress))
20888 return 0;
20889
20890- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20891+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20892 return 0;
20893
20894 /* set up the specified breakpoint handler */
20895@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20896 */
20897 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20898 {
20899- unsigned char int3 = 0xcc;
20900+ const unsigned char int3 = 0xcc;
20901
20902 bp_int3_handler = handler;
20903 bp_int3_addr = (u8 *)addr + sizeof(int3);
20904diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20905index 29b5b18..3bdfc29 100644
20906--- a/arch/x86/kernel/apic/apic.c
20907+++ b/arch/x86/kernel/apic/apic.c
20908@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20909 /*
20910 * Debug level, exported for io_apic.c
20911 */
20912-unsigned int apic_verbosity;
20913+int apic_verbosity;
20914
20915 int pic_mode;
20916
20917@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20918 apic_write(APIC_ESR, 0);
20919 v = apic_read(APIC_ESR);
20920 ack_APIC_irq();
20921- atomic_inc(&irq_err_count);
20922+ atomic_inc_unchecked(&irq_err_count);
20923
20924 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20925 smp_processor_id(), v);
20926diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20927index de918c4..32eed23 100644
20928--- a/arch/x86/kernel/apic/apic_flat_64.c
20929+++ b/arch/x86/kernel/apic/apic_flat_64.c
20930@@ -154,7 +154,7 @@ static int flat_probe(void)
20931 return 1;
20932 }
20933
20934-static struct apic apic_flat = {
20935+static struct apic apic_flat __read_only = {
20936 .name = "flat",
20937 .probe = flat_probe,
20938 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20939@@ -260,7 +260,7 @@ static int physflat_probe(void)
20940 return 0;
20941 }
20942
20943-static struct apic apic_physflat = {
20944+static struct apic apic_physflat __read_only = {
20945
20946 .name = "physical flat",
20947 .probe = physflat_probe,
20948diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20949index b205cdb..d8503ff 100644
20950--- a/arch/x86/kernel/apic/apic_noop.c
20951+++ b/arch/x86/kernel/apic/apic_noop.c
20952@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20953 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20954 }
20955
20956-struct apic apic_noop = {
20957+struct apic apic_noop __read_only = {
20958 .name = "noop",
20959 .probe = noop_probe,
20960 .acpi_madt_oem_check = NULL,
20961diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20962index c4a8d63..fe893ac 100644
20963--- a/arch/x86/kernel/apic/bigsmp_32.c
20964+++ b/arch/x86/kernel/apic/bigsmp_32.c
20965@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20966 return dmi_bigsmp;
20967 }
20968
20969-static struct apic apic_bigsmp = {
20970+static struct apic apic_bigsmp __read_only = {
20971
20972 .name = "bigsmp",
20973 .probe = probe_bigsmp,
20974diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20975index 3f5f604..309c0e6 100644
20976--- a/arch/x86/kernel/apic/io_apic.c
20977+++ b/arch/x86/kernel/apic/io_apic.c
20978@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20979 return ret;
20980 }
20981
20982-atomic_t irq_mis_count;
20983+atomic_unchecked_t irq_mis_count;
20984
20985 #ifdef CONFIG_GENERIC_PENDING_IRQ
20986 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20987@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20988 * at the cpu.
20989 */
20990 if (!(v & (1 << (i & 0x1f)))) {
20991- atomic_inc(&irq_mis_count);
20992+ atomic_inc_unchecked(&irq_mis_count);
20993
20994 eoi_ioapic_irq(irq, cfg);
20995 }
20996diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20997index bda4886..f9c7195 100644
20998--- a/arch/x86/kernel/apic/probe_32.c
20999+++ b/arch/x86/kernel/apic/probe_32.c
21000@@ -72,7 +72,7 @@ static int probe_default(void)
21001 return 1;
21002 }
21003
21004-static struct apic apic_default = {
21005+static struct apic apic_default __read_only = {
21006
21007 .name = "default",
21008 .probe = probe_default,
21009diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
21010index 6cedd79..023ff8e 100644
21011--- a/arch/x86/kernel/apic/vector.c
21012+++ b/arch/x86/kernel/apic/vector.c
21013@@ -21,7 +21,7 @@
21014
21015 static DEFINE_RAW_SPINLOCK(vector_lock);
21016
21017-void lock_vector_lock(void)
21018+void lock_vector_lock(void) __acquires(vector_lock)
21019 {
21020 /* Used to the online set of cpus does not change
21021 * during assign_irq_vector.
21022@@ -29,7 +29,7 @@ void lock_vector_lock(void)
21023 raw_spin_lock(&vector_lock);
21024 }
21025
21026-void unlock_vector_lock(void)
21027+void unlock_vector_lock(void) __releases(vector_lock)
21028 {
21029 raw_spin_unlock(&vector_lock);
21030 }
21031diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21032index e658f21..b695a1a 100644
21033--- a/arch/x86/kernel/apic/x2apic_cluster.c
21034+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21035@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21036 return notifier_from_errno(err);
21037 }
21038
21039-static struct notifier_block __refdata x2apic_cpu_notifier = {
21040+static struct notifier_block x2apic_cpu_notifier = {
21041 .notifier_call = update_clusterinfo,
21042 };
21043
21044@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21045 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21046 }
21047
21048-static struct apic apic_x2apic_cluster = {
21049+static struct apic apic_x2apic_cluster __read_only = {
21050
21051 .name = "cluster x2apic",
21052 .probe = x2apic_cluster_probe,
21053diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21054index 6fae733..5ca17af 100644
21055--- a/arch/x86/kernel/apic/x2apic_phys.c
21056+++ b/arch/x86/kernel/apic/x2apic_phys.c
21057@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21058 return apic == &apic_x2apic_phys;
21059 }
21060
21061-static struct apic apic_x2apic_phys = {
21062+static struct apic apic_x2apic_phys __read_only = {
21063
21064 .name = "physical x2apic",
21065 .probe = x2apic_phys_probe,
21066diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21067index 8e9dcfd..c61b3e4 100644
21068--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21069+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21070@@ -348,7 +348,7 @@ static int uv_probe(void)
21071 return apic == &apic_x2apic_uv_x;
21072 }
21073
21074-static struct apic __refdata apic_x2apic_uv_x = {
21075+static struct apic apic_x2apic_uv_x __read_only = {
21076
21077 .name = "UV large system",
21078 .probe = uv_probe,
21079diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21080index 927ec92..0dc3bd4 100644
21081--- a/arch/x86/kernel/apm_32.c
21082+++ b/arch/x86/kernel/apm_32.c
21083@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
21084 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21085 * even though they are called in protected mode.
21086 */
21087-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21088+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21089 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21090
21091 static const char driver_version[] = "1.16ac"; /* no spaces */
21092@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
21093 BUG_ON(cpu != 0);
21094 gdt = get_cpu_gdt_table(cpu);
21095 save_desc_40 = gdt[0x40 / 8];
21096+
21097+ pax_open_kernel();
21098 gdt[0x40 / 8] = bad_bios_desc;
21099+ pax_close_kernel();
21100
21101 apm_irq_save(flags);
21102 APM_DO_SAVE_SEGS;
21103@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21104 &call->esi);
21105 APM_DO_RESTORE_SEGS;
21106 apm_irq_restore(flags);
21107+
21108+ pax_open_kernel();
21109 gdt[0x40 / 8] = save_desc_40;
21110+ pax_close_kernel();
21111+
21112 put_cpu();
21113
21114 return call->eax & 0xff;
21115@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21116 BUG_ON(cpu != 0);
21117 gdt = get_cpu_gdt_table(cpu);
21118 save_desc_40 = gdt[0x40 / 8];
21119+
21120+ pax_open_kernel();
21121 gdt[0x40 / 8] = bad_bios_desc;
21122+ pax_close_kernel();
21123
21124 apm_irq_save(flags);
21125 APM_DO_SAVE_SEGS;
21126@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21127 &call->eax);
21128 APM_DO_RESTORE_SEGS;
21129 apm_irq_restore(flags);
21130+
21131+ pax_open_kernel();
21132 gdt[0x40 / 8] = save_desc_40;
21133+ pax_close_kernel();
21134+
21135 put_cpu();
21136 return error;
21137 }
21138@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21139 * code to that CPU.
21140 */
21141 gdt = get_cpu_gdt_table(0);
21142+
21143+ pax_open_kernel();
21144 set_desc_base(&gdt[APM_CS >> 3],
21145 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21146 set_desc_base(&gdt[APM_CS_16 >> 3],
21147 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21148 set_desc_base(&gdt[APM_DS >> 3],
21149 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21150+ pax_close_kernel();
21151
21152 proc_create("apm", 0, NULL, &apm_file_ops);
21153
21154diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21155index 9f6b934..cf5ffb3 100644
21156--- a/arch/x86/kernel/asm-offsets.c
21157+++ b/arch/x86/kernel/asm-offsets.c
21158@@ -32,6 +32,8 @@ void common(void) {
21159 OFFSET(TI_flags, thread_info, flags);
21160 OFFSET(TI_status, thread_info, status);
21161 OFFSET(TI_addr_limit, thread_info, addr_limit);
21162+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21163+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21164
21165 BLANK();
21166 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21167@@ -52,8 +54,26 @@ void common(void) {
21168 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21169 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21170 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21171+
21172+#ifdef CONFIG_PAX_KERNEXEC
21173+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21174 #endif
21175
21176+#ifdef CONFIG_PAX_MEMORY_UDEREF
21177+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21178+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21179+#ifdef CONFIG_X86_64
21180+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21181+#endif
21182+#endif
21183+
21184+#endif
21185+
21186+ BLANK();
21187+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21188+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21189+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21190+
21191 #ifdef CONFIG_XEN
21192 BLANK();
21193 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21194diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21195index fdcbb4d..036dd93 100644
21196--- a/arch/x86/kernel/asm-offsets_64.c
21197+++ b/arch/x86/kernel/asm-offsets_64.c
21198@@ -80,6 +80,7 @@ int main(void)
21199 BLANK();
21200 #undef ENTRY
21201
21202+ DEFINE(TSS_size, sizeof(struct tss_struct));
21203 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21204 BLANK();
21205
21206diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21207index 80091ae..0c5184f 100644
21208--- a/arch/x86/kernel/cpu/Makefile
21209+++ b/arch/x86/kernel/cpu/Makefile
21210@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21211 CFLAGS_REMOVE_perf_event.o = -pg
21212 endif
21213
21214-# Make sure load_percpu_segment has no stackprotector
21215-nostackp := $(call cc-option, -fno-stack-protector)
21216-CFLAGS_common.o := $(nostackp)
21217-
21218 obj-y := intel_cacheinfo.o scattered.o topology.o
21219 obj-y += common.o
21220 obj-y += rdrand.o
21221diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21222index 15c5df9..d9a604a 100644
21223--- a/arch/x86/kernel/cpu/amd.c
21224+++ b/arch/x86/kernel/cpu/amd.c
21225@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21226 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21227 {
21228 /* AMD errata T13 (order #21922) */
21229- if ((c->x86 == 6)) {
21230+ if (c->x86 == 6) {
21231 /* Duron Rev A0 */
21232 if (c->x86_model == 3 && c->x86_mask == 0)
21233 size = 64;
21234diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21235index c604965..1558f4a 100644
21236--- a/arch/x86/kernel/cpu/common.c
21237+++ b/arch/x86/kernel/cpu/common.c
21238@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21239
21240 static const struct cpu_dev *this_cpu = &default_cpu;
21241
21242-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21243-#ifdef CONFIG_X86_64
21244- /*
21245- * We need valid kernel segments for data and code in long mode too
21246- * IRET will check the segment types kkeil 2000/10/28
21247- * Also sysret mandates a special GDT layout
21248- *
21249- * TLS descriptors are currently at a different place compared to i386.
21250- * Hopefully nobody expects them at a fixed place (Wine?)
21251- */
21252- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21253- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21254- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21255- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21256- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21257- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21258-#else
21259- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21260- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21261- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21262- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21263- /*
21264- * Segments used for calling PnP BIOS have byte granularity.
21265- * They code segments and data segments have fixed 64k limits,
21266- * the transfer segment sizes are set at run time.
21267- */
21268- /* 32-bit code */
21269- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21270- /* 16-bit code */
21271- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21272- /* 16-bit data */
21273- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21274- /* 16-bit data */
21275- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21276- /* 16-bit data */
21277- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21278- /*
21279- * The APM segments have byte granularity and their bases
21280- * are set at run time. All have 64k limits.
21281- */
21282- /* 32-bit code */
21283- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21284- /* 16-bit code */
21285- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21286- /* data */
21287- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21288-
21289- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21290- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21291- GDT_STACK_CANARY_INIT
21292-#endif
21293-} };
21294-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21295-
21296 static int __init x86_xsave_setup(char *s)
21297 {
21298 if (strlen(s))
21299@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21300 }
21301 }
21302
21303+#ifdef CONFIG_X86_64
21304+static __init int setup_disable_pcid(char *arg)
21305+{
21306+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21307+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21308+
21309+#ifdef CONFIG_PAX_MEMORY_UDEREF
21310+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21311+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21312+#endif
21313+
21314+ return 1;
21315+}
21316+__setup("nopcid", setup_disable_pcid);
21317+
21318+static void setup_pcid(struct cpuinfo_x86 *c)
21319+{
21320+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21321+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21322+
21323+#ifdef CONFIG_PAX_MEMORY_UDEREF
21324+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21325+ pax_open_kernel();
21326+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21327+ pax_close_kernel();
21328+ printk("PAX: slow and weak UDEREF enabled\n");
21329+ } else
21330+ printk("PAX: UDEREF disabled\n");
21331+#endif
21332+
21333+ return;
21334+ }
21335+
21336+ printk("PAX: PCID detected\n");
21337+ set_in_cr4(X86_CR4_PCIDE);
21338+
21339+#ifdef CONFIG_PAX_MEMORY_UDEREF
21340+ pax_open_kernel();
21341+ clone_pgd_mask = ~(pgdval_t)0UL;
21342+ pax_close_kernel();
21343+ if (pax_user_shadow_base)
21344+ printk("PAX: weak UDEREF enabled\n");
21345+ else {
21346+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21347+ printk("PAX: strong UDEREF enabled\n");
21348+ }
21349+#endif
21350+
21351+ if (cpu_has(c, X86_FEATURE_INVPCID))
21352+ printk("PAX: INVPCID detected\n");
21353+}
21354+#endif
21355+
21356 /*
21357 * Some CPU features depend on higher CPUID levels, which may not always
21358 * be available due to CPUID level capping or broken virtualization
21359@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21360 {
21361 struct desc_ptr gdt_descr;
21362
21363- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21364+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21365 gdt_descr.size = GDT_SIZE - 1;
21366 load_gdt(&gdt_descr);
21367 /* Reload the per-cpu base */
21368@@ -895,6 +894,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21369 setup_smep(c);
21370 setup_smap(c);
21371
21372+#ifdef CONFIG_X86_32
21373+#ifdef CONFIG_PAX_PAGEEXEC
21374+ if (!(__supported_pte_mask & _PAGE_NX))
21375+ clear_cpu_cap(c, X86_FEATURE_PSE);
21376+#endif
21377+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21378+ clear_cpu_cap(c, X86_FEATURE_SEP);
21379+#endif
21380+#endif
21381+
21382+#ifdef CONFIG_X86_64
21383+ setup_pcid(c);
21384+#endif
21385+
21386 /*
21387 * The vendor-specific functions might have changed features.
21388 * Now we do "generic changes."
21389@@ -977,7 +990,7 @@ static void syscall32_cpu_init(void)
21390 void enable_sep_cpu(void)
21391 {
21392 int cpu = get_cpu();
21393- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21394+ struct tss_struct *tss = init_tss + cpu;
21395
21396 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21397 put_cpu();
21398@@ -1115,14 +1128,16 @@ static __init int setup_disablecpuid(char *arg)
21399 }
21400 __setup("clearcpuid=", setup_disablecpuid);
21401
21402+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21403+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21404+
21405 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21406- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21407+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21408 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21409
21410 #ifdef CONFIG_X86_64
21411-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21412-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21413- (unsigned long) debug_idt_table };
21414+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21415+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21416
21417 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21418 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21419@@ -1299,7 +1314,7 @@ void cpu_init(void)
21420 */
21421 load_ucode_ap();
21422
21423- t = &per_cpu(init_tss, cpu);
21424+ t = init_tss + cpu;
21425 oist = &per_cpu(orig_ist, cpu);
21426
21427 #ifdef CONFIG_NUMA
21428@@ -1331,7 +1346,6 @@ void cpu_init(void)
21429 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21430 barrier();
21431
21432- x86_configure_nx();
21433 enable_x2apic();
21434
21435 /*
21436@@ -1383,7 +1397,7 @@ void cpu_init(void)
21437 {
21438 int cpu = smp_processor_id();
21439 struct task_struct *curr = current;
21440- struct tss_struct *t = &per_cpu(init_tss, cpu);
21441+ struct tss_struct *t = init_tss + cpu;
21442 struct thread_struct *thread = &curr->thread;
21443
21444 wait_for_master_cpu(cpu);
21445diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21446index c703507..28535e3 100644
21447--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21448+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21449@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21450 };
21451
21452 #ifdef CONFIG_AMD_NB
21453+static struct attribute *default_attrs_amd_nb[] = {
21454+ &type.attr,
21455+ &level.attr,
21456+ &coherency_line_size.attr,
21457+ &physical_line_partition.attr,
21458+ &ways_of_associativity.attr,
21459+ &number_of_sets.attr,
21460+ &size.attr,
21461+ &shared_cpu_map.attr,
21462+ &shared_cpu_list.attr,
21463+ NULL,
21464+ NULL,
21465+ NULL,
21466+ NULL
21467+};
21468+
21469 static struct attribute **amd_l3_attrs(void)
21470 {
21471 static struct attribute **attrs;
21472@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21473
21474 n = ARRAY_SIZE(default_attrs);
21475
21476- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21477- n += 2;
21478-
21479- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21480- n += 1;
21481-
21482- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21483- if (attrs == NULL)
21484- return attrs = default_attrs;
21485-
21486- for (n = 0; default_attrs[n]; n++)
21487- attrs[n] = default_attrs[n];
21488+ attrs = default_attrs_amd_nb;
21489
21490 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21491 attrs[n++] = &cache_disable_0.attr;
21492@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21493 .default_attrs = default_attrs,
21494 };
21495
21496+#ifdef CONFIG_AMD_NB
21497+static struct kobj_type ktype_cache_amd_nb = {
21498+ .sysfs_ops = &sysfs_ops,
21499+ .default_attrs = default_attrs_amd_nb,
21500+};
21501+#endif
21502+
21503 static struct kobj_type ktype_percpu_entry = {
21504 .sysfs_ops = &sysfs_ops,
21505 };
21506@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21507 return retval;
21508 }
21509
21510+#ifdef CONFIG_AMD_NB
21511+ amd_l3_attrs();
21512+#endif
21513+
21514 for (i = 0; i < num_cache_leaves; i++) {
21515+ struct kobj_type *ktype;
21516+
21517 this_object = INDEX_KOBJECT_PTR(cpu, i);
21518 this_object->cpu = cpu;
21519 this_object->index = i;
21520
21521 this_leaf = CPUID4_INFO_IDX(cpu, i);
21522
21523- ktype_cache.default_attrs = default_attrs;
21524+ ktype = &ktype_cache;
21525 #ifdef CONFIG_AMD_NB
21526 if (this_leaf->base.nb)
21527- ktype_cache.default_attrs = amd_l3_attrs();
21528+ ktype = &ktype_cache_amd_nb;
21529 #endif
21530 retval = kobject_init_and_add(&(this_object->kobj),
21531- &ktype_cache,
21532+ ktype,
21533 per_cpu(ici_cache_kobject, cpu),
21534 "index%1lu", i);
21535 if (unlikely(retval)) {
21536diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21537index d2c6116..62fd7aa 100644
21538--- a/arch/x86/kernel/cpu/mcheck/mce.c
21539+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21540@@ -45,6 +45,7 @@
21541 #include <asm/processor.h>
21542 #include <asm/mce.h>
21543 #include <asm/msr.h>
21544+#include <asm/local.h>
21545
21546 #include "mce-internal.h"
21547
21548@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21549 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21550 m->cs, m->ip);
21551
21552- if (m->cs == __KERNEL_CS)
21553+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21554 print_symbol("{%s}", m->ip);
21555 pr_cont("\n");
21556 }
21557@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21558
21559 #define PANIC_TIMEOUT 5 /* 5 seconds */
21560
21561-static atomic_t mce_panicked;
21562+static atomic_unchecked_t mce_panicked;
21563
21564 static int fake_panic;
21565-static atomic_t mce_fake_panicked;
21566+static atomic_unchecked_t mce_fake_panicked;
21567
21568 /* Panic in progress. Enable interrupts and wait for final IPI */
21569 static void wait_for_panic(void)
21570@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21571 /*
21572 * Make sure only one CPU runs in machine check panic
21573 */
21574- if (atomic_inc_return(&mce_panicked) > 1)
21575+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21576 wait_for_panic();
21577 barrier();
21578
21579@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21580 console_verbose();
21581 } else {
21582 /* Don't log too much for fake panic */
21583- if (atomic_inc_return(&mce_fake_panicked) > 1)
21584+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21585 return;
21586 }
21587 /* First print corrected ones that are still unlogged */
21588@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21589 if (!fake_panic) {
21590 if (panic_timeout == 0)
21591 panic_timeout = mca_cfg.panic_timeout;
21592- panic(msg);
21593+ panic("%s", msg);
21594 } else
21595 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21596 }
21597@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21598 * might have been modified by someone else.
21599 */
21600 rmb();
21601- if (atomic_read(&mce_panicked))
21602+ if (atomic_read_unchecked(&mce_panicked))
21603 wait_for_panic();
21604 if (!mca_cfg.monarch_timeout)
21605 goto out;
21606@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21607 }
21608
21609 /* Call the installed machine check handler for this CPU setup. */
21610-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21611+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21612 unexpected_machine_check;
21613
21614 /*
21615@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21616 return;
21617 }
21618
21619+ pax_open_kernel();
21620 machine_check_vector = do_machine_check;
21621+ pax_close_kernel();
21622
21623 __mcheck_cpu_init_generic();
21624 __mcheck_cpu_init_vendor(c);
21625@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21626 */
21627
21628 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21629-static int mce_chrdev_open_count; /* #times opened */
21630+static local_t mce_chrdev_open_count; /* #times opened */
21631 static int mce_chrdev_open_exclu; /* already open exclusive? */
21632
21633 static int mce_chrdev_open(struct inode *inode, struct file *file)
21634@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21635 spin_lock(&mce_chrdev_state_lock);
21636
21637 if (mce_chrdev_open_exclu ||
21638- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21639+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21640 spin_unlock(&mce_chrdev_state_lock);
21641
21642 return -EBUSY;
21643@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21644
21645 if (file->f_flags & O_EXCL)
21646 mce_chrdev_open_exclu = 1;
21647- mce_chrdev_open_count++;
21648+ local_inc(&mce_chrdev_open_count);
21649
21650 spin_unlock(&mce_chrdev_state_lock);
21651
21652@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21653 {
21654 spin_lock(&mce_chrdev_state_lock);
21655
21656- mce_chrdev_open_count--;
21657+ local_dec(&mce_chrdev_open_count);
21658 mce_chrdev_open_exclu = 0;
21659
21660 spin_unlock(&mce_chrdev_state_lock);
21661@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21662
21663 for (i = 0; i < mca_cfg.banks; i++) {
21664 struct mce_bank *b = &mce_banks[i];
21665- struct device_attribute *a = &b->attr;
21666+ device_attribute_no_const *a = &b->attr;
21667
21668 sysfs_attr_init(&a->attr);
21669 a->attr.name = b->attrname;
21670@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21671 static void mce_reset(void)
21672 {
21673 cpu_missing = 0;
21674- atomic_set(&mce_fake_panicked, 0);
21675+ atomic_set_unchecked(&mce_fake_panicked, 0);
21676 atomic_set(&mce_executing, 0);
21677 atomic_set(&mce_callin, 0);
21678 atomic_set(&global_nwo, 0);
21679diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21680index a304298..49b6d06 100644
21681--- a/arch/x86/kernel/cpu/mcheck/p5.c
21682+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21683@@ -10,6 +10,7 @@
21684 #include <asm/processor.h>
21685 #include <asm/mce.h>
21686 #include <asm/msr.h>
21687+#include <asm/pgtable.h>
21688
21689 /* By default disabled */
21690 int mce_p5_enabled __read_mostly;
21691@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21692 if (!cpu_has(c, X86_FEATURE_MCE))
21693 return;
21694
21695+ pax_open_kernel();
21696 machine_check_vector = pentium_machine_check;
21697+ pax_close_kernel();
21698 /* Make sure the vector pointer is visible before we enable MCEs: */
21699 wmb();
21700
21701diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21702index 7dc5564..1273569 100644
21703--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21704+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21705@@ -9,6 +9,7 @@
21706 #include <asm/processor.h>
21707 #include <asm/mce.h>
21708 #include <asm/msr.h>
21709+#include <asm/pgtable.h>
21710
21711 /* Machine check handler for WinChip C6: */
21712 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21713@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21714 {
21715 u32 lo, hi;
21716
21717+ pax_open_kernel();
21718 machine_check_vector = winchip_machine_check;
21719+ pax_close_kernel();
21720 /* Make sure the vector pointer is visible before we enable MCEs: */
21721 wmb();
21722
21723diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21724index 36a8361..e7058c2 100644
21725--- a/arch/x86/kernel/cpu/microcode/core.c
21726+++ b/arch/x86/kernel/cpu/microcode/core.c
21727@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21728 return NOTIFY_OK;
21729 }
21730
21731-static struct notifier_block __refdata mc_cpu_notifier = {
21732+static struct notifier_block mc_cpu_notifier = {
21733 .notifier_call = mc_cpu_callback,
21734 };
21735
21736diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21737index c6826d1..8dc677e 100644
21738--- a/arch/x86/kernel/cpu/microcode/intel.c
21739+++ b/arch/x86/kernel/cpu/microcode/intel.c
21740@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21741 struct microcode_header_intel mc_header;
21742 unsigned int mc_size;
21743
21744+ if (leftover < sizeof(mc_header)) {
21745+ pr_err("error! Truncated header in microcode data file\n");
21746+ break;
21747+ }
21748+
21749 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21750 break;
21751
21752@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21753
21754 static int get_ucode_user(void *to, const void *from, size_t n)
21755 {
21756- return copy_from_user(to, from, n);
21757+ return copy_from_user(to, (const void __force_user *)from, n);
21758 }
21759
21760 static enum ucode_state
21761 request_microcode_user(int cpu, const void __user *buf, size_t size)
21762 {
21763- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21764+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21765 }
21766
21767 static void microcode_fini_cpu(int cpu)
21768diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21769index ec9df6f..420eb93 100644
21770--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21771+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21772@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21773 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21774 int i;
21775
21776- while (leftover) {
21777+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21778+
21779+ if (leftover < sizeof(mc_header))
21780+ break;
21781+
21782 mc_header = (struct microcode_header_intel *)ucode_ptr;
21783
21784 mc_size = get_totalsize(mc_header);
21785diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21786index ea5f363..cb0e905 100644
21787--- a/arch/x86/kernel/cpu/mtrr/main.c
21788+++ b/arch/x86/kernel/cpu/mtrr/main.c
21789@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21790 u64 size_or_mask, size_and_mask;
21791 static bool mtrr_aps_delayed_init;
21792
21793-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21794+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21795
21796 const struct mtrr_ops *mtrr_if;
21797
21798diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21799index df5e41f..816c719 100644
21800--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21801+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21802@@ -25,7 +25,7 @@ struct mtrr_ops {
21803 int (*validate_add_page)(unsigned long base, unsigned long size,
21804 unsigned int type);
21805 int (*have_wrcomb)(void);
21806-};
21807+} __do_const;
21808
21809 extern int generic_get_free_region(unsigned long base, unsigned long size,
21810 int replace_reg);
21811diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21812index 143e5f5..5825081 100644
21813--- a/arch/x86/kernel/cpu/perf_event.c
21814+++ b/arch/x86/kernel/cpu/perf_event.c
21815@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21816
21817 }
21818
21819-static struct attribute_group x86_pmu_format_group = {
21820+static attribute_group_no_const x86_pmu_format_group = {
21821 .name = "format",
21822 .attrs = NULL,
21823 };
21824@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21825 NULL,
21826 };
21827
21828-static struct attribute_group x86_pmu_events_group = {
21829+static attribute_group_no_const x86_pmu_events_group = {
21830 .name = "events",
21831 .attrs = events_attr,
21832 };
21833@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21834 if (idx > GDT_ENTRIES)
21835 return 0;
21836
21837- desc = raw_cpu_ptr(gdt_page.gdt);
21838+ desc = get_cpu_gdt_table(smp_processor_id());
21839 }
21840
21841 return get_desc_base(desc + idx);
21842@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21843 break;
21844
21845 perf_callchain_store(entry, frame.return_address);
21846- fp = frame.next_frame;
21847+ fp = (const void __force_user *)frame.next_frame;
21848 }
21849 }
21850
21851diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21852index 97242a9..cf9c30e 100644
21853--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21854+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21855@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21856 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21857 {
21858 struct attribute **attrs;
21859- struct attribute_group *attr_group;
21860+ attribute_group_no_const *attr_group;
21861 int i = 0, j;
21862
21863 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21864diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21865index 498b6d9..4126515 100644
21866--- a/arch/x86/kernel/cpu/perf_event_intel.c
21867+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21868@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21869 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21870
21871 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21872- u64 capabilities;
21873+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21874
21875- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21876- x86_pmu.intel_cap.capabilities = capabilities;
21877+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21878+ x86_pmu.intel_cap.capabilities = capabilities;
21879 }
21880
21881 intel_ds_init();
21882diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21883index c4bb8b8..9f7384d 100644
21884--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21885+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21886@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21887 NULL,
21888 };
21889
21890-static struct attribute_group rapl_pmu_events_group = {
21891+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21892 .name = "events",
21893 .attrs = NULL, /* patched at runtime */
21894 };
21895diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21896index c635b8b..b78835e 100644
21897--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21898+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21899@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21900 static int __init uncore_type_init(struct intel_uncore_type *type)
21901 {
21902 struct intel_uncore_pmu *pmus;
21903- struct attribute_group *attr_group;
21904+ attribute_group_no_const *attr_group;
21905 struct attribute **attrs;
21906 int i, j;
21907
21908diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21909index 6c8c1e7..515b98a 100644
21910--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21911+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21912@@ -114,7 +114,7 @@ struct intel_uncore_box {
21913 struct uncore_event_desc {
21914 struct kobj_attribute attr;
21915 const char *config;
21916-};
21917+} __do_const;
21918
21919 ssize_t uncore_event_show(struct kobject *kobj,
21920 struct kobj_attribute *attr, char *buf);
21921diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21922index 83741a7..bd3507d 100644
21923--- a/arch/x86/kernel/cpuid.c
21924+++ b/arch/x86/kernel/cpuid.c
21925@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21926 return notifier_from_errno(err);
21927 }
21928
21929-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21930+static struct notifier_block cpuid_class_cpu_notifier =
21931 {
21932 .notifier_call = cpuid_class_cpu_callback,
21933 };
21934diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21935index aceb2f9..c76d3e3 100644
21936--- a/arch/x86/kernel/crash.c
21937+++ b/arch/x86/kernel/crash.c
21938@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21939 #ifdef CONFIG_X86_32
21940 struct pt_regs fixed_regs;
21941
21942- if (!user_mode_vm(regs)) {
21943+ if (!user_mode(regs)) {
21944 crash_fixup_ss_esp(&fixed_regs, regs);
21945 regs = &fixed_regs;
21946 }
21947diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21948index afa64ad..dce67dd 100644
21949--- a/arch/x86/kernel/crash_dump_64.c
21950+++ b/arch/x86/kernel/crash_dump_64.c
21951@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21952 return -ENOMEM;
21953
21954 if (userbuf) {
21955- if (copy_to_user(buf, vaddr + offset, csize)) {
21956+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21957 iounmap(vaddr);
21958 return -EFAULT;
21959 }
21960diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21961index f6dfd93..892ade4 100644
21962--- a/arch/x86/kernel/doublefault.c
21963+++ b/arch/x86/kernel/doublefault.c
21964@@ -12,7 +12,7 @@
21965
21966 #define DOUBLEFAULT_STACKSIZE (1024)
21967 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21968-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21969+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21970
21971 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21972
21973@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21974 unsigned long gdt, tss;
21975
21976 native_store_gdt(&gdt_desc);
21977- gdt = gdt_desc.address;
21978+ gdt = (unsigned long)gdt_desc.address;
21979
21980 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21981
21982@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21983 /* 0x2 bit is always set */
21984 .flags = X86_EFLAGS_SF | 0x2,
21985 .sp = STACK_START,
21986- .es = __USER_DS,
21987+ .es = __KERNEL_DS,
21988 .cs = __KERNEL_CS,
21989 .ss = __KERNEL_DS,
21990- .ds = __USER_DS,
21991+ .ds = __KERNEL_DS,
21992 .fs = __KERNEL_PERCPU,
21993
21994 .__cr3 = __pa_nodebug(swapper_pg_dir),
21995diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21996index b74ebc7..2c95874 100644
21997--- a/arch/x86/kernel/dumpstack.c
21998+++ b/arch/x86/kernel/dumpstack.c
21999@@ -2,6 +2,9 @@
22000 * Copyright (C) 1991, 1992 Linus Torvalds
22001 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22002 */
22003+#ifdef CONFIG_GRKERNSEC_HIDESYM
22004+#define __INCLUDED_BY_HIDESYM 1
22005+#endif
22006 #include <linux/kallsyms.h>
22007 #include <linux/kprobes.h>
22008 #include <linux/uaccess.h>
22009@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22010
22011 void printk_address(unsigned long address)
22012 {
22013- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22014+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22015 }
22016
22017 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22018 static void
22019 print_ftrace_graph_addr(unsigned long addr, void *data,
22020 const struct stacktrace_ops *ops,
22021- struct thread_info *tinfo, int *graph)
22022+ struct task_struct *task, int *graph)
22023 {
22024- struct task_struct *task;
22025 unsigned long ret_addr;
22026 int index;
22027
22028 if (addr != (unsigned long)return_to_handler)
22029 return;
22030
22031- task = tinfo->task;
22032 index = task->curr_ret_stack;
22033
22034 if (!task->ret_stack || index < *graph)
22035@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22036 static inline void
22037 print_ftrace_graph_addr(unsigned long addr, void *data,
22038 const struct stacktrace_ops *ops,
22039- struct thread_info *tinfo, int *graph)
22040+ struct task_struct *task, int *graph)
22041 { }
22042 #endif
22043
22044@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22045 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22046 */
22047
22048-static inline int valid_stack_ptr(struct thread_info *tinfo,
22049- void *p, unsigned int size, void *end)
22050+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22051 {
22052- void *t = tinfo;
22053 if (end) {
22054 if (p < end && p >= (end-THREAD_SIZE))
22055 return 1;
22056@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22057 }
22058
22059 unsigned long
22060-print_context_stack(struct thread_info *tinfo,
22061+print_context_stack(struct task_struct *task, void *stack_start,
22062 unsigned long *stack, unsigned long bp,
22063 const struct stacktrace_ops *ops, void *data,
22064 unsigned long *end, int *graph)
22065 {
22066 struct stack_frame *frame = (struct stack_frame *)bp;
22067
22068- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22069+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22070 unsigned long addr;
22071
22072 addr = *stack;
22073@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22074 } else {
22075 ops->address(data, addr, 0);
22076 }
22077- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22078+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22079 }
22080 stack++;
22081 }
22082@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22083 EXPORT_SYMBOL_GPL(print_context_stack);
22084
22085 unsigned long
22086-print_context_stack_bp(struct thread_info *tinfo,
22087+print_context_stack_bp(struct task_struct *task, void *stack_start,
22088 unsigned long *stack, unsigned long bp,
22089 const struct stacktrace_ops *ops, void *data,
22090 unsigned long *end, int *graph)
22091@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22092 struct stack_frame *frame = (struct stack_frame *)bp;
22093 unsigned long *ret_addr = &frame->return_address;
22094
22095- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22096+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22097 unsigned long addr = *ret_addr;
22098
22099 if (!__kernel_text_address(addr))
22100@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22101 ops->address(data, addr, 1);
22102 frame = frame->next_frame;
22103 ret_addr = &frame->return_address;
22104- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22105+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22106 }
22107
22108 return (unsigned long)frame;
22109@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22110 static void print_trace_address(void *data, unsigned long addr, int reliable)
22111 {
22112 touch_nmi_watchdog();
22113- printk(data);
22114+ printk("%s", (char *)data);
22115 printk_stack_address(addr, reliable);
22116 }
22117
22118@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22119 EXPORT_SYMBOL_GPL(oops_begin);
22120 NOKPROBE_SYMBOL(oops_begin);
22121
22122+extern void gr_handle_kernel_exploit(void);
22123+
22124 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22125 {
22126 if (regs && kexec_should_crash(current))
22127@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22128 panic("Fatal exception in interrupt");
22129 if (panic_on_oops)
22130 panic("Fatal exception");
22131- do_exit(signr);
22132+
22133+ gr_handle_kernel_exploit();
22134+
22135+ do_group_exit(signr);
22136 }
22137 NOKPROBE_SYMBOL(oops_end);
22138
22139@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22140 print_modules();
22141 show_regs(regs);
22142 #ifdef CONFIG_X86_32
22143- if (user_mode_vm(regs)) {
22144+ if (user_mode(regs)) {
22145 sp = regs->sp;
22146 ss = regs->ss & 0xffff;
22147 } else {
22148@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22149 unsigned long flags = oops_begin();
22150 int sig = SIGSEGV;
22151
22152- if (!user_mode_vm(regs))
22153+ if (!user_mode(regs))
22154 report_bug(regs->ip, regs);
22155
22156 if (__die(str, regs, err))
22157diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22158index 5abd4cd..c65733b 100644
22159--- a/arch/x86/kernel/dumpstack_32.c
22160+++ b/arch/x86/kernel/dumpstack_32.c
22161@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22162 bp = stack_frame(task, regs);
22163
22164 for (;;) {
22165- struct thread_info *context;
22166+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22167 void *end_stack;
22168
22169 end_stack = is_hardirq_stack(stack, cpu);
22170 if (!end_stack)
22171 end_stack = is_softirq_stack(stack, cpu);
22172
22173- context = task_thread_info(task);
22174- bp = ops->walk_stack(context, stack, bp, ops, data,
22175+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22176 end_stack, &graph);
22177
22178 /* Stop if not on irq stack */
22179@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22180 int i;
22181
22182 show_regs_print_info(KERN_EMERG);
22183- __show_regs(regs, !user_mode_vm(regs));
22184+ __show_regs(regs, !user_mode(regs));
22185
22186 /*
22187 * When in-kernel, we also print out the stack and code at the
22188 * time of the fault..
22189 */
22190- if (!user_mode_vm(regs)) {
22191+ if (!user_mode(regs)) {
22192 unsigned int code_prologue = code_bytes * 43 / 64;
22193 unsigned int code_len = code_bytes;
22194 unsigned char c;
22195 u8 *ip;
22196+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22197
22198 pr_emerg("Stack:\n");
22199 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22200
22201 pr_emerg("Code:");
22202
22203- ip = (u8 *)regs->ip - code_prologue;
22204+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22205 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22206 /* try starting at IP */
22207- ip = (u8 *)regs->ip;
22208+ ip = (u8 *)regs->ip + cs_base;
22209 code_len = code_len - code_prologue + 1;
22210 }
22211 for (i = 0; i < code_len; i++, ip++) {
22212@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22213 pr_cont(" Bad EIP value.");
22214 break;
22215 }
22216- if (ip == (u8 *)regs->ip)
22217+ if (ip == (u8 *)regs->ip + cs_base)
22218 pr_cont(" <%02x>", c);
22219 else
22220 pr_cont(" %02x", c);
22221@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22222 {
22223 unsigned short ud2;
22224
22225+ ip = ktla_ktva(ip);
22226 if (ip < PAGE_OFFSET)
22227 return 0;
22228 if (probe_kernel_address((unsigned short *)ip, ud2))
22229@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22230
22231 return ud2 == 0x0b0f;
22232 }
22233+
22234+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22235+void pax_check_alloca(unsigned long size)
22236+{
22237+ unsigned long sp = (unsigned long)&sp, stack_left;
22238+
22239+ /* all kernel stacks are of the same size */
22240+ stack_left = sp & (THREAD_SIZE - 1);
22241+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22242+}
22243+EXPORT_SYMBOL(pax_check_alloca);
22244+#endif
22245diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22246index ff86f19..73eabf4 100644
22247--- a/arch/x86/kernel/dumpstack_64.c
22248+++ b/arch/x86/kernel/dumpstack_64.c
22249@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22250 const struct stacktrace_ops *ops, void *data)
22251 {
22252 const unsigned cpu = get_cpu();
22253- struct thread_info *tinfo;
22254 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22255 unsigned long dummy;
22256 unsigned used = 0;
22257 int graph = 0;
22258 int done = 0;
22259+ void *stack_start;
22260
22261 if (!task)
22262 task = current;
22263@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22264 * current stack address. If the stacks consist of nested
22265 * exceptions
22266 */
22267- tinfo = task_thread_info(task);
22268 while (!done) {
22269 unsigned long *stack_end;
22270 enum stack_type stype;
22271@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22272 if (ops->stack(data, id) < 0)
22273 break;
22274
22275- bp = ops->walk_stack(tinfo, stack, bp, ops,
22276+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22277 data, stack_end, &graph);
22278 ops->stack(data, "<EOE>");
22279 /*
22280@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22281 * second-to-last pointer (index -2 to end) in the
22282 * exception stack:
22283 */
22284+ if ((u16)stack_end[-1] != __KERNEL_DS)
22285+ goto out;
22286 stack = (unsigned long *) stack_end[-2];
22287 done = 0;
22288 break;
22289@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22290
22291 if (ops->stack(data, "IRQ") < 0)
22292 break;
22293- bp = ops->walk_stack(tinfo, stack, bp,
22294+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22295 ops, data, stack_end, &graph);
22296 /*
22297 * We link to the next stack (which would be
22298@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22299 /*
22300 * This handles the process stack:
22301 */
22302- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22303+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22304+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22305+out:
22306 put_cpu();
22307 }
22308 EXPORT_SYMBOL(dump_trace);
22309@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22310 {
22311 unsigned short ud2;
22312
22313- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22314+ if (probe_kernel_address((unsigned short *)ip, ud2))
22315 return 0;
22316
22317 return ud2 == 0x0b0f;
22318 }
22319+
22320+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22321+void pax_check_alloca(unsigned long size)
22322+{
22323+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22324+ unsigned cpu, used;
22325+ char *id;
22326+
22327+ /* check the process stack first */
22328+ stack_start = (unsigned long)task_stack_page(current);
22329+ stack_end = stack_start + THREAD_SIZE;
22330+ if (likely(stack_start <= sp && sp < stack_end)) {
22331+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22332+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22333+ return;
22334+ }
22335+
22336+ cpu = get_cpu();
22337+
22338+ /* check the irq stacks */
22339+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22340+ stack_start = stack_end - IRQ_STACK_SIZE;
22341+ if (stack_start <= sp && sp < stack_end) {
22342+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22343+ put_cpu();
22344+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22345+ return;
22346+ }
22347+
22348+ /* check the exception stacks */
22349+ used = 0;
22350+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22351+ stack_start = stack_end - EXCEPTION_STKSZ;
22352+ if (stack_end && stack_start <= sp && sp < stack_end) {
22353+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22354+ put_cpu();
22355+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22356+ return;
22357+ }
22358+
22359+ put_cpu();
22360+
22361+ /* unknown stack */
22362+ BUG();
22363+}
22364+EXPORT_SYMBOL(pax_check_alloca);
22365+#endif
22366diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22367index dd2f07a..845dc05 100644
22368--- a/arch/x86/kernel/e820.c
22369+++ b/arch/x86/kernel/e820.c
22370@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22371
22372 static void early_panic(char *msg)
22373 {
22374- early_printk(msg);
22375- panic(msg);
22376+ early_printk("%s", msg);
22377+ panic("%s", msg);
22378 }
22379
22380 static int userdef __initdata;
22381diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22382index 01d1c18..8073693 100644
22383--- a/arch/x86/kernel/early_printk.c
22384+++ b/arch/x86/kernel/early_printk.c
22385@@ -7,6 +7,7 @@
22386 #include <linux/pci_regs.h>
22387 #include <linux/pci_ids.h>
22388 #include <linux/errno.h>
22389+#include <linux/sched.h>
22390 #include <asm/io.h>
22391 #include <asm/processor.h>
22392 #include <asm/fcntl.h>
22393diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22394index 000d419..8f66802 100644
22395--- a/arch/x86/kernel/entry_32.S
22396+++ b/arch/x86/kernel/entry_32.S
22397@@ -177,13 +177,154 @@
22398 /*CFI_REL_OFFSET gs, PT_GS*/
22399 .endm
22400 .macro SET_KERNEL_GS reg
22401+
22402+#ifdef CONFIG_CC_STACKPROTECTOR
22403 movl $(__KERNEL_STACK_CANARY), \reg
22404+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22405+ movl $(__USER_DS), \reg
22406+#else
22407+ xorl \reg, \reg
22408+#endif
22409+
22410 movl \reg, %gs
22411 .endm
22412
22413 #endif /* CONFIG_X86_32_LAZY_GS */
22414
22415-.macro SAVE_ALL
22416+.macro pax_enter_kernel
22417+#ifdef CONFIG_PAX_KERNEXEC
22418+ call pax_enter_kernel
22419+#endif
22420+.endm
22421+
22422+.macro pax_exit_kernel
22423+#ifdef CONFIG_PAX_KERNEXEC
22424+ call pax_exit_kernel
22425+#endif
22426+.endm
22427+
22428+#ifdef CONFIG_PAX_KERNEXEC
22429+ENTRY(pax_enter_kernel)
22430+#ifdef CONFIG_PARAVIRT
22431+ pushl %eax
22432+ pushl %ecx
22433+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22434+ mov %eax, %esi
22435+#else
22436+ mov %cr0, %esi
22437+#endif
22438+ bts $16, %esi
22439+ jnc 1f
22440+ mov %cs, %esi
22441+ cmp $__KERNEL_CS, %esi
22442+ jz 3f
22443+ ljmp $__KERNEL_CS, $3f
22444+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22445+2:
22446+#ifdef CONFIG_PARAVIRT
22447+ mov %esi, %eax
22448+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22449+#else
22450+ mov %esi, %cr0
22451+#endif
22452+3:
22453+#ifdef CONFIG_PARAVIRT
22454+ popl %ecx
22455+ popl %eax
22456+#endif
22457+ ret
22458+ENDPROC(pax_enter_kernel)
22459+
22460+ENTRY(pax_exit_kernel)
22461+#ifdef CONFIG_PARAVIRT
22462+ pushl %eax
22463+ pushl %ecx
22464+#endif
22465+ mov %cs, %esi
22466+ cmp $__KERNEXEC_KERNEL_CS, %esi
22467+ jnz 2f
22468+#ifdef CONFIG_PARAVIRT
22469+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22470+ mov %eax, %esi
22471+#else
22472+ mov %cr0, %esi
22473+#endif
22474+ btr $16, %esi
22475+ ljmp $__KERNEL_CS, $1f
22476+1:
22477+#ifdef CONFIG_PARAVIRT
22478+ mov %esi, %eax
22479+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22480+#else
22481+ mov %esi, %cr0
22482+#endif
22483+2:
22484+#ifdef CONFIG_PARAVIRT
22485+ popl %ecx
22486+ popl %eax
22487+#endif
22488+ ret
22489+ENDPROC(pax_exit_kernel)
22490+#endif
22491+
22492+ .macro pax_erase_kstack
22493+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22494+ call pax_erase_kstack
22495+#endif
22496+ .endm
22497+
22498+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22499+/*
22500+ * ebp: thread_info
22501+ */
22502+ENTRY(pax_erase_kstack)
22503+ pushl %edi
22504+ pushl %ecx
22505+ pushl %eax
22506+
22507+ mov TI_lowest_stack(%ebp), %edi
22508+ mov $-0xBEEF, %eax
22509+ std
22510+
22511+1: mov %edi, %ecx
22512+ and $THREAD_SIZE_asm - 1, %ecx
22513+ shr $2, %ecx
22514+ repne scasl
22515+ jecxz 2f
22516+
22517+ cmp $2*16, %ecx
22518+ jc 2f
22519+
22520+ mov $2*16, %ecx
22521+ repe scasl
22522+ jecxz 2f
22523+ jne 1b
22524+
22525+2: cld
22526+ or $2*4, %edi
22527+ mov %esp, %ecx
22528+ sub %edi, %ecx
22529+
22530+ cmp $THREAD_SIZE_asm, %ecx
22531+ jb 3f
22532+ ud2
22533+3:
22534+
22535+ shr $2, %ecx
22536+ rep stosl
22537+
22538+ mov TI_task_thread_sp0(%ebp), %edi
22539+ sub $128, %edi
22540+ mov %edi, TI_lowest_stack(%ebp)
22541+
22542+ popl %eax
22543+ popl %ecx
22544+ popl %edi
22545+ ret
22546+ENDPROC(pax_erase_kstack)
22547+#endif
22548+
22549+.macro __SAVE_ALL _DS
22550 cld
22551 PUSH_GS
22552 pushl_cfi %fs
22553@@ -206,7 +347,7 @@
22554 CFI_REL_OFFSET ecx, 0
22555 pushl_cfi %ebx
22556 CFI_REL_OFFSET ebx, 0
22557- movl $(__USER_DS), %edx
22558+ movl $\_DS, %edx
22559 movl %edx, %ds
22560 movl %edx, %es
22561 movl $(__KERNEL_PERCPU), %edx
22562@@ -214,6 +355,15 @@
22563 SET_KERNEL_GS %edx
22564 .endm
22565
22566+.macro SAVE_ALL
22567+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22568+ __SAVE_ALL __KERNEL_DS
22569+ pax_enter_kernel
22570+#else
22571+ __SAVE_ALL __USER_DS
22572+#endif
22573+.endm
22574+
22575 .macro RESTORE_INT_REGS
22576 popl_cfi %ebx
22577 CFI_RESTORE ebx
22578@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22579 popfl_cfi
22580 jmp syscall_exit
22581 CFI_ENDPROC
22582-END(ret_from_fork)
22583+ENDPROC(ret_from_fork)
22584
22585 ENTRY(ret_from_kernel_thread)
22586 CFI_STARTPROC
22587@@ -340,7 +490,15 @@ ret_from_intr:
22588 andl $SEGMENT_RPL_MASK, %eax
22589 #endif
22590 cmpl $USER_RPL, %eax
22591+
22592+#ifdef CONFIG_PAX_KERNEXEC
22593+ jae resume_userspace
22594+
22595+ pax_exit_kernel
22596+ jmp resume_kernel
22597+#else
22598 jb resume_kernel # not returning to v8086 or userspace
22599+#endif
22600
22601 ENTRY(resume_userspace)
22602 LOCKDEP_SYS_EXIT
22603@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22604 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22605 # int/exception return?
22606 jne work_pending
22607- jmp restore_all
22608-END(ret_from_exception)
22609+ jmp restore_all_pax
22610+ENDPROC(ret_from_exception)
22611
22612 #ifdef CONFIG_PREEMPT
22613 ENTRY(resume_kernel)
22614@@ -365,7 +523,7 @@ need_resched:
22615 jz restore_all
22616 call preempt_schedule_irq
22617 jmp need_resched
22618-END(resume_kernel)
22619+ENDPROC(resume_kernel)
22620 #endif
22621 CFI_ENDPROC
22622
22623@@ -395,30 +553,45 @@ sysenter_past_esp:
22624 /*CFI_REL_OFFSET cs, 0*/
22625 /*
22626 * Push current_thread_info()->sysenter_return to the stack.
22627- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22628- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22629 */
22630- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22631+ pushl_cfi $0
22632 CFI_REL_OFFSET eip, 0
22633
22634 pushl_cfi %eax
22635 SAVE_ALL
22636+ GET_THREAD_INFO(%ebp)
22637+ movl TI_sysenter_return(%ebp),%ebp
22638+ movl %ebp,PT_EIP(%esp)
22639 ENABLE_INTERRUPTS(CLBR_NONE)
22640
22641 /*
22642 * Load the potential sixth argument from user stack.
22643 * Careful about security.
22644 */
22645+ movl PT_OLDESP(%esp),%ebp
22646+
22647+#ifdef CONFIG_PAX_MEMORY_UDEREF
22648+ mov PT_OLDSS(%esp),%ds
22649+1: movl %ds:(%ebp),%ebp
22650+ push %ss
22651+ pop %ds
22652+#else
22653 cmpl $__PAGE_OFFSET-3,%ebp
22654 jae syscall_fault
22655 ASM_STAC
22656 1: movl (%ebp),%ebp
22657 ASM_CLAC
22658+#endif
22659+
22660 movl %ebp,PT_EBP(%esp)
22661 _ASM_EXTABLE(1b,syscall_fault)
22662
22663 GET_THREAD_INFO(%ebp)
22664
22665+#ifdef CONFIG_PAX_RANDKSTACK
22666+ pax_erase_kstack
22667+#endif
22668+
22669 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22670 jnz sysenter_audit
22671 sysenter_do_call:
22672@@ -434,12 +607,24 @@ sysenter_after_call:
22673 testl $_TIF_ALLWORK_MASK, %ecx
22674 jne sysexit_audit
22675 sysenter_exit:
22676+
22677+#ifdef CONFIG_PAX_RANDKSTACK
22678+ pushl_cfi %eax
22679+ movl %esp, %eax
22680+ call pax_randomize_kstack
22681+ popl_cfi %eax
22682+#endif
22683+
22684+ pax_erase_kstack
22685+
22686 /* if something modifies registers it must also disable sysexit */
22687 movl PT_EIP(%esp), %edx
22688 movl PT_OLDESP(%esp), %ecx
22689 xorl %ebp,%ebp
22690 TRACE_IRQS_ON
22691 1: mov PT_FS(%esp), %fs
22692+2: mov PT_DS(%esp), %ds
22693+3: mov PT_ES(%esp), %es
22694 PTGS_TO_GS
22695 ENABLE_INTERRUPTS_SYSEXIT
22696
22697@@ -453,6 +638,9 @@ sysenter_audit:
22698 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22699 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22700 call __audit_syscall_entry
22701+
22702+ pax_erase_kstack
22703+
22704 popl_cfi %ecx /* get that remapped edx off the stack */
22705 popl_cfi %ecx /* get that remapped esi off the stack */
22706 movl PT_EAX(%esp),%eax /* reload syscall number */
22707@@ -479,10 +667,16 @@ sysexit_audit:
22708
22709 CFI_ENDPROC
22710 .pushsection .fixup,"ax"
22711-2: movl $0,PT_FS(%esp)
22712+4: movl $0,PT_FS(%esp)
22713+ jmp 1b
22714+5: movl $0,PT_DS(%esp)
22715+ jmp 1b
22716+6: movl $0,PT_ES(%esp)
22717 jmp 1b
22718 .popsection
22719- _ASM_EXTABLE(1b,2b)
22720+ _ASM_EXTABLE(1b,4b)
22721+ _ASM_EXTABLE(2b,5b)
22722+ _ASM_EXTABLE(3b,6b)
22723 PTGS_TO_GS_EX
22724 ENDPROC(ia32_sysenter_target)
22725
22726@@ -493,6 +687,11 @@ ENTRY(system_call)
22727 pushl_cfi %eax # save orig_eax
22728 SAVE_ALL
22729 GET_THREAD_INFO(%ebp)
22730+
22731+#ifdef CONFIG_PAX_RANDKSTACK
22732+ pax_erase_kstack
22733+#endif
22734+
22735 # system call tracing in operation / emulation
22736 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22737 jnz syscall_trace_entry
22738@@ -512,6 +711,15 @@ syscall_exit:
22739 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22740 jne syscall_exit_work
22741
22742+restore_all_pax:
22743+
22744+#ifdef CONFIG_PAX_RANDKSTACK
22745+ movl %esp, %eax
22746+ call pax_randomize_kstack
22747+#endif
22748+
22749+ pax_erase_kstack
22750+
22751 restore_all:
22752 TRACE_IRQS_IRET
22753 restore_all_notrace:
22754@@ -566,14 +774,34 @@ ldt_ss:
22755 * compensating for the offset by changing to the ESPFIX segment with
22756 * a base address that matches for the difference.
22757 */
22758-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22759+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22760 mov %esp, %edx /* load kernel esp */
22761 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22762 mov %dx, %ax /* eax: new kernel esp */
22763 sub %eax, %edx /* offset (low word is 0) */
22764+#ifdef CONFIG_SMP
22765+ movl PER_CPU_VAR(cpu_number), %ebx
22766+ shll $PAGE_SHIFT_asm, %ebx
22767+ addl $cpu_gdt_table, %ebx
22768+#else
22769+ movl $cpu_gdt_table, %ebx
22770+#endif
22771 shr $16, %edx
22772- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22773- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22774+
22775+#ifdef CONFIG_PAX_KERNEXEC
22776+ mov %cr0, %esi
22777+ btr $16, %esi
22778+ mov %esi, %cr0
22779+#endif
22780+
22781+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22782+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22783+
22784+#ifdef CONFIG_PAX_KERNEXEC
22785+ bts $16, %esi
22786+ mov %esi, %cr0
22787+#endif
22788+
22789 pushl_cfi $__ESPFIX_SS
22790 pushl_cfi %eax /* new kernel esp */
22791 /* Disable interrupts, but do not irqtrace this section: we
22792@@ -603,20 +831,18 @@ work_resched:
22793 movl TI_flags(%ebp), %ecx
22794 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22795 # than syscall tracing?
22796- jz restore_all
22797+ jz restore_all_pax
22798 testb $_TIF_NEED_RESCHED, %cl
22799 jnz work_resched
22800
22801 work_notifysig: # deal with pending signals and
22802 # notify-resume requests
22803+ movl %esp, %eax
22804 #ifdef CONFIG_VM86
22805 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22806- movl %esp, %eax
22807 jne work_notifysig_v86 # returning to kernel-space or
22808 # vm86-space
22809 1:
22810-#else
22811- movl %esp, %eax
22812 #endif
22813 TRACE_IRQS_ON
22814 ENABLE_INTERRUPTS(CLBR_NONE)
22815@@ -637,7 +863,7 @@ work_notifysig_v86:
22816 movl %eax, %esp
22817 jmp 1b
22818 #endif
22819-END(work_pending)
22820+ENDPROC(work_pending)
22821
22822 # perform syscall exit tracing
22823 ALIGN
22824@@ -645,11 +871,14 @@ syscall_trace_entry:
22825 movl $-ENOSYS,PT_EAX(%esp)
22826 movl %esp, %eax
22827 call syscall_trace_enter
22828+
22829+ pax_erase_kstack
22830+
22831 /* What it returned is what we'll actually use. */
22832 cmpl $(NR_syscalls), %eax
22833 jnae syscall_call
22834 jmp syscall_exit
22835-END(syscall_trace_entry)
22836+ENDPROC(syscall_trace_entry)
22837
22838 # perform syscall exit tracing
22839 ALIGN
22840@@ -662,26 +891,30 @@ syscall_exit_work:
22841 movl %esp, %eax
22842 call syscall_trace_leave
22843 jmp resume_userspace
22844-END(syscall_exit_work)
22845+ENDPROC(syscall_exit_work)
22846 CFI_ENDPROC
22847
22848 RING0_INT_FRAME # can't unwind into user space anyway
22849 syscall_fault:
22850+#ifdef CONFIG_PAX_MEMORY_UDEREF
22851+ push %ss
22852+ pop %ds
22853+#endif
22854 ASM_CLAC
22855 GET_THREAD_INFO(%ebp)
22856 movl $-EFAULT,PT_EAX(%esp)
22857 jmp resume_userspace
22858-END(syscall_fault)
22859+ENDPROC(syscall_fault)
22860
22861 syscall_badsys:
22862 movl $-ENOSYS,%eax
22863 jmp syscall_after_call
22864-END(syscall_badsys)
22865+ENDPROC(syscall_badsys)
22866
22867 sysenter_badsys:
22868 movl $-ENOSYS,%eax
22869 jmp sysenter_after_call
22870-END(sysenter_badsys)
22871+ENDPROC(sysenter_badsys)
22872 CFI_ENDPROC
22873
22874 .macro FIXUP_ESPFIX_STACK
22875@@ -694,8 +927,15 @@ END(sysenter_badsys)
22876 */
22877 #ifdef CONFIG_X86_ESPFIX32
22878 /* fixup the stack */
22879- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22880- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22881+#ifdef CONFIG_SMP
22882+ movl PER_CPU_VAR(cpu_number), %ebx
22883+ shll $PAGE_SHIFT_asm, %ebx
22884+ addl $cpu_gdt_table, %ebx
22885+#else
22886+ movl $cpu_gdt_table, %ebx
22887+#endif
22888+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22889+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22890 shl $16, %eax
22891 addl %esp, %eax /* the adjusted stack pointer */
22892 pushl_cfi $__KERNEL_DS
22893@@ -751,7 +991,7 @@ vector=vector+1
22894 .endr
22895 2: jmp common_interrupt
22896 .endr
22897-END(irq_entries_start)
22898+ENDPROC(irq_entries_start)
22899
22900 .previous
22901 END(interrupt)
22902@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22903 pushl_cfi $do_coprocessor_error
22904 jmp error_code
22905 CFI_ENDPROC
22906-END(coprocessor_error)
22907+ENDPROC(coprocessor_error)
22908
22909 ENTRY(simd_coprocessor_error)
22910 RING0_INT_FRAME
22911@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22912 .section .altinstructions,"a"
22913 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22914 .previous
22915-.section .altinstr_replacement,"ax"
22916+.section .altinstr_replacement,"a"
22917 663: pushl $do_simd_coprocessor_error
22918 664:
22919 .previous
22920@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22921 #endif
22922 jmp error_code
22923 CFI_ENDPROC
22924-END(simd_coprocessor_error)
22925+ENDPROC(simd_coprocessor_error)
22926
22927 ENTRY(device_not_available)
22928 RING0_INT_FRAME
22929@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22930 pushl_cfi $do_device_not_available
22931 jmp error_code
22932 CFI_ENDPROC
22933-END(device_not_available)
22934+ENDPROC(device_not_available)
22935
22936 #ifdef CONFIG_PARAVIRT
22937 ENTRY(native_iret)
22938 iret
22939 _ASM_EXTABLE(native_iret, iret_exc)
22940-END(native_iret)
22941+ENDPROC(native_iret)
22942
22943 ENTRY(native_irq_enable_sysexit)
22944 sti
22945 sysexit
22946-END(native_irq_enable_sysexit)
22947+ENDPROC(native_irq_enable_sysexit)
22948 #endif
22949
22950 ENTRY(overflow)
22951@@ -860,7 +1100,7 @@ ENTRY(overflow)
22952 pushl_cfi $do_overflow
22953 jmp error_code
22954 CFI_ENDPROC
22955-END(overflow)
22956+ENDPROC(overflow)
22957
22958 ENTRY(bounds)
22959 RING0_INT_FRAME
22960@@ -869,7 +1109,7 @@ ENTRY(bounds)
22961 pushl_cfi $do_bounds
22962 jmp error_code
22963 CFI_ENDPROC
22964-END(bounds)
22965+ENDPROC(bounds)
22966
22967 ENTRY(invalid_op)
22968 RING0_INT_FRAME
22969@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22970 pushl_cfi $do_invalid_op
22971 jmp error_code
22972 CFI_ENDPROC
22973-END(invalid_op)
22974+ENDPROC(invalid_op)
22975
22976 ENTRY(coprocessor_segment_overrun)
22977 RING0_INT_FRAME
22978@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22979 pushl_cfi $do_coprocessor_segment_overrun
22980 jmp error_code
22981 CFI_ENDPROC
22982-END(coprocessor_segment_overrun)
22983+ENDPROC(coprocessor_segment_overrun)
22984
22985 ENTRY(invalid_TSS)
22986 RING0_EC_FRAME
22987@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22988 pushl_cfi $do_invalid_TSS
22989 jmp error_code
22990 CFI_ENDPROC
22991-END(invalid_TSS)
22992+ENDPROC(invalid_TSS)
22993
22994 ENTRY(segment_not_present)
22995 RING0_EC_FRAME
22996@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22997 pushl_cfi $do_segment_not_present
22998 jmp error_code
22999 CFI_ENDPROC
23000-END(segment_not_present)
23001+ENDPROC(segment_not_present)
23002
23003 ENTRY(stack_segment)
23004 RING0_EC_FRAME
23005@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
23006 pushl_cfi $do_stack_segment
23007 jmp error_code
23008 CFI_ENDPROC
23009-END(stack_segment)
23010+ENDPROC(stack_segment)
23011
23012 ENTRY(alignment_check)
23013 RING0_EC_FRAME
23014@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
23015 pushl_cfi $do_alignment_check
23016 jmp error_code
23017 CFI_ENDPROC
23018-END(alignment_check)
23019+ENDPROC(alignment_check)
23020
23021 ENTRY(divide_error)
23022 RING0_INT_FRAME
23023@@ -928,7 +1168,7 @@ ENTRY(divide_error)
23024 pushl_cfi $do_divide_error
23025 jmp error_code
23026 CFI_ENDPROC
23027-END(divide_error)
23028+ENDPROC(divide_error)
23029
23030 #ifdef CONFIG_X86_MCE
23031 ENTRY(machine_check)
23032@@ -938,7 +1178,7 @@ ENTRY(machine_check)
23033 pushl_cfi machine_check_vector
23034 jmp error_code
23035 CFI_ENDPROC
23036-END(machine_check)
23037+ENDPROC(machine_check)
23038 #endif
23039
23040 ENTRY(spurious_interrupt_bug)
23041@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
23042 pushl_cfi $do_spurious_interrupt_bug
23043 jmp error_code
23044 CFI_ENDPROC
23045-END(spurious_interrupt_bug)
23046+ENDPROC(spurious_interrupt_bug)
23047
23048 #ifdef CONFIG_XEN
23049 /* Xen doesn't set %esp to be precisely what the normal sysenter
23050@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23051
23052 ENTRY(mcount)
23053 ret
23054-END(mcount)
23055+ENDPROC(mcount)
23056
23057 ENTRY(ftrace_caller)
23058 pushl %eax
23059@@ -1084,7 +1324,7 @@ ftrace_graph_call:
23060 .globl ftrace_stub
23061 ftrace_stub:
23062 ret
23063-END(ftrace_caller)
23064+ENDPROC(ftrace_caller)
23065
23066 ENTRY(ftrace_regs_caller)
23067 pushf /* push flags before compare (in cs location) */
23068@@ -1182,7 +1422,7 @@ trace:
23069 popl %ecx
23070 popl %eax
23071 jmp ftrace_stub
23072-END(mcount)
23073+ENDPROC(mcount)
23074 #endif /* CONFIG_DYNAMIC_FTRACE */
23075 #endif /* CONFIG_FUNCTION_TRACER */
23076
23077@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
23078 popl %ecx
23079 popl %eax
23080 ret
23081-END(ftrace_graph_caller)
23082+ENDPROC(ftrace_graph_caller)
23083
23084 .globl return_to_handler
23085 return_to_handler:
23086@@ -1261,15 +1501,18 @@ error_code:
23087 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23088 REG_TO_PTGS %ecx
23089 SET_KERNEL_GS %ecx
23090- movl $(__USER_DS), %ecx
23091+ movl $(__KERNEL_DS), %ecx
23092 movl %ecx, %ds
23093 movl %ecx, %es
23094+
23095+ pax_enter_kernel
23096+
23097 TRACE_IRQS_OFF
23098 movl %esp,%eax # pt_regs pointer
23099 call *%edi
23100 jmp ret_from_exception
23101 CFI_ENDPROC
23102-END(page_fault)
23103+ENDPROC(page_fault)
23104
23105 /*
23106 * Debug traps and NMI can happen at the one SYSENTER instruction
23107@@ -1312,7 +1555,7 @@ debug_stack_correct:
23108 call do_debug
23109 jmp ret_from_exception
23110 CFI_ENDPROC
23111-END(debug)
23112+ENDPROC(debug)
23113
23114 /*
23115 * NMI is doubly nasty. It can happen _while_ we're handling
23116@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23117 xorl %edx,%edx # zero error code
23118 movl %esp,%eax # pt_regs pointer
23119 call do_nmi
23120+
23121+ pax_exit_kernel
23122+
23123 jmp restore_all_notrace
23124 CFI_ENDPROC
23125
23126@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23127 FIXUP_ESPFIX_STACK # %eax == %esp
23128 xorl %edx,%edx # zero error code
23129 call do_nmi
23130+
23131+ pax_exit_kernel
23132+
23133 RESTORE_REGS
23134 lss 12+4(%esp), %esp # back to espfix stack
23135 CFI_ADJUST_CFA_OFFSET -24
23136 jmp irq_return
23137 #endif
23138 CFI_ENDPROC
23139-END(nmi)
23140+ENDPROC(nmi)
23141
23142 ENTRY(int3)
23143 RING0_INT_FRAME
23144@@ -1408,14 +1657,14 @@ ENTRY(int3)
23145 call do_int3
23146 jmp ret_from_exception
23147 CFI_ENDPROC
23148-END(int3)
23149+ENDPROC(int3)
23150
23151 ENTRY(general_protection)
23152 RING0_EC_FRAME
23153 pushl_cfi $do_general_protection
23154 jmp error_code
23155 CFI_ENDPROC
23156-END(general_protection)
23157+ENDPROC(general_protection)
23158
23159 #ifdef CONFIG_KVM_GUEST
23160 ENTRY(async_page_fault)
23161@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23162 pushl_cfi $do_async_page_fault
23163 jmp error_code
23164 CFI_ENDPROC
23165-END(async_page_fault)
23166+ENDPROC(async_page_fault)
23167 #endif
23168
23169diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23170index 4ee9a23..c786610 100644
23171--- a/arch/x86/kernel/entry_64.S
23172+++ b/arch/x86/kernel/entry_64.S
23173@@ -59,6 +59,8 @@
23174 #include <asm/smap.h>
23175 #include <asm/pgtable_types.h>
23176 #include <linux/err.h>
23177+#include <asm/pgtable.h>
23178+#include <asm/alternative-asm.h>
23179
23180 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23181 #include <linux/elf-em.h>
23182@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23183 ENDPROC(native_usergs_sysret64)
23184 #endif /* CONFIG_PARAVIRT */
23185
23186+ .macro ljmpq sel, off
23187+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23188+ .byte 0x48; ljmp *1234f(%rip)
23189+ .pushsection .rodata
23190+ .align 16
23191+ 1234: .quad \off; .word \sel
23192+ .popsection
23193+#else
23194+ pushq $\sel
23195+ pushq $\off
23196+ lretq
23197+#endif
23198+ .endm
23199+
23200+ .macro pax_enter_kernel
23201+ pax_set_fptr_mask
23202+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23203+ call pax_enter_kernel
23204+#endif
23205+ .endm
23206+
23207+ .macro pax_exit_kernel
23208+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23209+ call pax_exit_kernel
23210+#endif
23211+
23212+ .endm
23213+
23214+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23215+ENTRY(pax_enter_kernel)
23216+ pushq %rdi
23217+
23218+#ifdef CONFIG_PARAVIRT
23219+ PV_SAVE_REGS(CLBR_RDI)
23220+#endif
23221+
23222+#ifdef CONFIG_PAX_KERNEXEC
23223+ GET_CR0_INTO_RDI
23224+ bts $16,%rdi
23225+ jnc 3f
23226+ mov %cs,%edi
23227+ cmp $__KERNEL_CS,%edi
23228+ jnz 2f
23229+1:
23230+#endif
23231+
23232+#ifdef CONFIG_PAX_MEMORY_UDEREF
23233+ 661: jmp 111f
23234+ .pushsection .altinstr_replacement, "a"
23235+ 662: ASM_NOP2
23236+ .popsection
23237+ .pushsection .altinstructions, "a"
23238+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23239+ .popsection
23240+ GET_CR3_INTO_RDI
23241+ cmp $0,%dil
23242+ jnz 112f
23243+ mov $__KERNEL_DS,%edi
23244+ mov %edi,%ss
23245+ jmp 111f
23246+112: cmp $1,%dil
23247+ jz 113f
23248+ ud2
23249+113: sub $4097,%rdi
23250+ bts $63,%rdi
23251+ SET_RDI_INTO_CR3
23252+ mov $__UDEREF_KERNEL_DS,%edi
23253+ mov %edi,%ss
23254+111:
23255+#endif
23256+
23257+#ifdef CONFIG_PARAVIRT
23258+ PV_RESTORE_REGS(CLBR_RDI)
23259+#endif
23260+
23261+ popq %rdi
23262+ pax_force_retaddr
23263+ retq
23264+
23265+#ifdef CONFIG_PAX_KERNEXEC
23266+2: ljmpq __KERNEL_CS,1b
23267+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23268+4: SET_RDI_INTO_CR0
23269+ jmp 1b
23270+#endif
23271+ENDPROC(pax_enter_kernel)
23272+
23273+ENTRY(pax_exit_kernel)
23274+ pushq %rdi
23275+
23276+#ifdef CONFIG_PARAVIRT
23277+ PV_SAVE_REGS(CLBR_RDI)
23278+#endif
23279+
23280+#ifdef CONFIG_PAX_KERNEXEC
23281+ mov %cs,%rdi
23282+ cmp $__KERNEXEC_KERNEL_CS,%edi
23283+ jz 2f
23284+ GET_CR0_INTO_RDI
23285+ bts $16,%rdi
23286+ jnc 4f
23287+1:
23288+#endif
23289+
23290+#ifdef CONFIG_PAX_MEMORY_UDEREF
23291+ 661: jmp 111f
23292+ .pushsection .altinstr_replacement, "a"
23293+ 662: ASM_NOP2
23294+ .popsection
23295+ .pushsection .altinstructions, "a"
23296+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23297+ .popsection
23298+ mov %ss,%edi
23299+ cmp $__UDEREF_KERNEL_DS,%edi
23300+ jnz 111f
23301+ GET_CR3_INTO_RDI
23302+ cmp $0,%dil
23303+ jz 112f
23304+ ud2
23305+112: add $4097,%rdi
23306+ bts $63,%rdi
23307+ SET_RDI_INTO_CR3
23308+ mov $__KERNEL_DS,%edi
23309+ mov %edi,%ss
23310+111:
23311+#endif
23312+
23313+#ifdef CONFIG_PARAVIRT
23314+ PV_RESTORE_REGS(CLBR_RDI);
23315+#endif
23316+
23317+ popq %rdi
23318+ pax_force_retaddr
23319+ retq
23320+
23321+#ifdef CONFIG_PAX_KERNEXEC
23322+2: GET_CR0_INTO_RDI
23323+ btr $16,%rdi
23324+ jnc 4f
23325+ ljmpq __KERNEL_CS,3f
23326+3: SET_RDI_INTO_CR0
23327+ jmp 1b
23328+4: ud2
23329+ jmp 4b
23330+#endif
23331+ENDPROC(pax_exit_kernel)
23332+#endif
23333+
23334+ .macro pax_enter_kernel_user
23335+ pax_set_fptr_mask
23336+#ifdef CONFIG_PAX_MEMORY_UDEREF
23337+ call pax_enter_kernel_user
23338+#endif
23339+ .endm
23340+
23341+ .macro pax_exit_kernel_user
23342+#ifdef CONFIG_PAX_MEMORY_UDEREF
23343+ call pax_exit_kernel_user
23344+#endif
23345+#ifdef CONFIG_PAX_RANDKSTACK
23346+ pushq %rax
23347+ pushq %r11
23348+ call pax_randomize_kstack
23349+ popq %r11
23350+ popq %rax
23351+#endif
23352+ .endm
23353+
23354+#ifdef CONFIG_PAX_MEMORY_UDEREF
23355+ENTRY(pax_enter_kernel_user)
23356+ pushq %rdi
23357+ pushq %rbx
23358+
23359+#ifdef CONFIG_PARAVIRT
23360+ PV_SAVE_REGS(CLBR_RDI)
23361+#endif
23362+
23363+ 661: jmp 111f
23364+ .pushsection .altinstr_replacement, "a"
23365+ 662: ASM_NOP2
23366+ .popsection
23367+ .pushsection .altinstructions, "a"
23368+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23369+ .popsection
23370+ GET_CR3_INTO_RDI
23371+ cmp $1,%dil
23372+ jnz 4f
23373+ sub $4097,%rdi
23374+ bts $63,%rdi
23375+ SET_RDI_INTO_CR3
23376+ jmp 3f
23377+111:
23378+
23379+ GET_CR3_INTO_RDI
23380+ mov %rdi,%rbx
23381+ add $__START_KERNEL_map,%rbx
23382+ sub phys_base(%rip),%rbx
23383+
23384+#ifdef CONFIG_PARAVIRT
23385+ cmpl $0, pv_info+PARAVIRT_enabled
23386+ jz 1f
23387+ pushq %rdi
23388+ i = 0
23389+ .rept USER_PGD_PTRS
23390+ mov i*8(%rbx),%rsi
23391+ mov $0,%sil
23392+ lea i*8(%rbx),%rdi
23393+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23394+ i = i + 1
23395+ .endr
23396+ popq %rdi
23397+ jmp 2f
23398+1:
23399+#endif
23400+
23401+ i = 0
23402+ .rept USER_PGD_PTRS
23403+ movb $0,i*8(%rbx)
23404+ i = i + 1
23405+ .endr
23406+
23407+2: SET_RDI_INTO_CR3
23408+
23409+#ifdef CONFIG_PAX_KERNEXEC
23410+ GET_CR0_INTO_RDI
23411+ bts $16,%rdi
23412+ SET_RDI_INTO_CR0
23413+#endif
23414+
23415+3:
23416+
23417+#ifdef CONFIG_PARAVIRT
23418+ PV_RESTORE_REGS(CLBR_RDI)
23419+#endif
23420+
23421+ popq %rbx
23422+ popq %rdi
23423+ pax_force_retaddr
23424+ retq
23425+4: ud2
23426+ENDPROC(pax_enter_kernel_user)
23427+
23428+ENTRY(pax_exit_kernel_user)
23429+ pushq %rdi
23430+ pushq %rbx
23431+
23432+#ifdef CONFIG_PARAVIRT
23433+ PV_SAVE_REGS(CLBR_RDI)
23434+#endif
23435+
23436+ GET_CR3_INTO_RDI
23437+ 661: jmp 1f
23438+ .pushsection .altinstr_replacement, "a"
23439+ 662: ASM_NOP2
23440+ .popsection
23441+ .pushsection .altinstructions, "a"
23442+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23443+ .popsection
23444+ cmp $0,%dil
23445+ jnz 3f
23446+ add $4097,%rdi
23447+ bts $63,%rdi
23448+ SET_RDI_INTO_CR3
23449+ jmp 2f
23450+1:
23451+
23452+ mov %rdi,%rbx
23453+
23454+#ifdef CONFIG_PAX_KERNEXEC
23455+ GET_CR0_INTO_RDI
23456+ btr $16,%rdi
23457+ jnc 3f
23458+ SET_RDI_INTO_CR0
23459+#endif
23460+
23461+ add $__START_KERNEL_map,%rbx
23462+ sub phys_base(%rip),%rbx
23463+
23464+#ifdef CONFIG_PARAVIRT
23465+ cmpl $0, pv_info+PARAVIRT_enabled
23466+ jz 1f
23467+ i = 0
23468+ .rept USER_PGD_PTRS
23469+ mov i*8(%rbx),%rsi
23470+ mov $0x67,%sil
23471+ lea i*8(%rbx),%rdi
23472+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23473+ i = i + 1
23474+ .endr
23475+ jmp 2f
23476+1:
23477+#endif
23478+
23479+ i = 0
23480+ .rept USER_PGD_PTRS
23481+ movb $0x67,i*8(%rbx)
23482+ i = i + 1
23483+ .endr
23484+2:
23485+
23486+#ifdef CONFIG_PARAVIRT
23487+ PV_RESTORE_REGS(CLBR_RDI)
23488+#endif
23489+
23490+ popq %rbx
23491+ popq %rdi
23492+ pax_force_retaddr
23493+ retq
23494+3: ud2
23495+ENDPROC(pax_exit_kernel_user)
23496+#endif
23497+
23498+ .macro pax_enter_kernel_nmi
23499+ pax_set_fptr_mask
23500+
23501+#ifdef CONFIG_PAX_KERNEXEC
23502+ GET_CR0_INTO_RDI
23503+ bts $16,%rdi
23504+ jc 110f
23505+ SET_RDI_INTO_CR0
23506+ or $2,%ebx
23507+110:
23508+#endif
23509+
23510+#ifdef CONFIG_PAX_MEMORY_UDEREF
23511+ 661: jmp 111f
23512+ .pushsection .altinstr_replacement, "a"
23513+ 662: ASM_NOP2
23514+ .popsection
23515+ .pushsection .altinstructions, "a"
23516+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23517+ .popsection
23518+ GET_CR3_INTO_RDI
23519+ cmp $0,%dil
23520+ jz 111f
23521+ sub $4097,%rdi
23522+ or $4,%ebx
23523+ bts $63,%rdi
23524+ SET_RDI_INTO_CR3
23525+ mov $__UDEREF_KERNEL_DS,%edi
23526+ mov %edi,%ss
23527+111:
23528+#endif
23529+ .endm
23530+
23531+ .macro pax_exit_kernel_nmi
23532+#ifdef CONFIG_PAX_KERNEXEC
23533+ btr $1,%ebx
23534+ jnc 110f
23535+ GET_CR0_INTO_RDI
23536+ btr $16,%rdi
23537+ SET_RDI_INTO_CR0
23538+110:
23539+#endif
23540+
23541+#ifdef CONFIG_PAX_MEMORY_UDEREF
23542+ btr $2,%ebx
23543+ jnc 111f
23544+ GET_CR3_INTO_RDI
23545+ add $4097,%rdi
23546+ bts $63,%rdi
23547+ SET_RDI_INTO_CR3
23548+ mov $__KERNEL_DS,%edi
23549+ mov %edi,%ss
23550+111:
23551+#endif
23552+ .endm
23553+
23554+ .macro pax_erase_kstack
23555+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23556+ call pax_erase_kstack
23557+#endif
23558+ .endm
23559+
23560+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23561+ENTRY(pax_erase_kstack)
23562+ pushq %rdi
23563+ pushq %rcx
23564+ pushq %rax
23565+ pushq %r11
23566+
23567+ GET_THREAD_INFO(%r11)
23568+ mov TI_lowest_stack(%r11), %rdi
23569+ mov $-0xBEEF, %rax
23570+ std
23571+
23572+1: mov %edi, %ecx
23573+ and $THREAD_SIZE_asm - 1, %ecx
23574+ shr $3, %ecx
23575+ repne scasq
23576+ jecxz 2f
23577+
23578+ cmp $2*8, %ecx
23579+ jc 2f
23580+
23581+ mov $2*8, %ecx
23582+ repe scasq
23583+ jecxz 2f
23584+ jne 1b
23585+
23586+2: cld
23587+ or $2*8, %rdi
23588+ mov %esp, %ecx
23589+ sub %edi, %ecx
23590+
23591+ cmp $THREAD_SIZE_asm, %rcx
23592+ jb 3f
23593+ ud2
23594+3:
23595+
23596+ shr $3, %ecx
23597+ rep stosq
23598+
23599+ mov TI_task_thread_sp0(%r11), %rdi
23600+ sub $256, %rdi
23601+ mov %rdi, TI_lowest_stack(%r11)
23602+
23603+ popq %r11
23604+ popq %rax
23605+ popq %rcx
23606+ popq %rdi
23607+ pax_force_retaddr
23608+ ret
23609+ENDPROC(pax_erase_kstack)
23610+#endif
23611
23612 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23613 #ifdef CONFIG_TRACE_IRQFLAGS
23614@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23615 .endm
23616
23617 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23618- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23619+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23620 jnc 1f
23621 TRACE_IRQS_ON_DEBUG
23622 1:
23623@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23624 movq \tmp,R11+\offset(%rsp)
23625 .endm
23626
23627- .macro FAKE_STACK_FRAME child_rip
23628- /* push in order ss, rsp, eflags, cs, rip */
23629- xorl %eax, %eax
23630- pushq_cfi $__KERNEL_DS /* ss */
23631- /*CFI_REL_OFFSET ss,0*/
23632- pushq_cfi %rax /* rsp */
23633- CFI_REL_OFFSET rsp,0
23634- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23635- /*CFI_REL_OFFSET rflags,0*/
23636- pushq_cfi $__KERNEL_CS /* cs */
23637- /*CFI_REL_OFFSET cs,0*/
23638- pushq_cfi \child_rip /* rip */
23639- CFI_REL_OFFSET rip,0
23640- pushq_cfi %rax /* orig rax */
23641- .endm
23642-
23643- .macro UNFAKE_STACK_FRAME
23644- addq $8*6, %rsp
23645- CFI_ADJUST_CFA_OFFSET -(6*8)
23646- .endm
23647-
23648 /*
23649 * initial frame state for interrupts (and exceptions without error code)
23650 */
23651@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23652 /* save partial stack frame */
23653 .macro SAVE_ARGS_IRQ
23654 cld
23655- /* start from rbp in pt_regs and jump over */
23656- movq_cfi rdi, (RDI-RBP)
23657- movq_cfi rsi, (RSI-RBP)
23658- movq_cfi rdx, (RDX-RBP)
23659- movq_cfi rcx, (RCX-RBP)
23660- movq_cfi rax, (RAX-RBP)
23661- movq_cfi r8, (R8-RBP)
23662- movq_cfi r9, (R9-RBP)
23663- movq_cfi r10, (R10-RBP)
23664- movq_cfi r11, (R11-RBP)
23665+ /* start from r15 in pt_regs and jump over */
23666+ movq_cfi rdi, RDI
23667+ movq_cfi rsi, RSI
23668+ movq_cfi rdx, RDX
23669+ movq_cfi rcx, RCX
23670+ movq_cfi rax, RAX
23671+ movq_cfi r8, R8
23672+ movq_cfi r9, R9
23673+ movq_cfi r10, R10
23674+ movq_cfi r11, R11
23675+ movq_cfi r12, R12
23676
23677 /* Save rbp so that we can unwind from get_irq_regs() */
23678- movq_cfi rbp, 0
23679+ movq_cfi rbp, RBP
23680
23681 /* Save previous stack value */
23682 movq %rsp, %rsi
23683
23684- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23685- testl $3, CS-RBP(%rsi)
23686+ movq %rsp,%rdi /* arg1 for handler */
23687+ testb $3, CS(%rsi)
23688 je 1f
23689 SWAPGS
23690 /*
23691@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23692 0x06 /* DW_OP_deref */, \
23693 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23694 0x22 /* DW_OP_plus */
23695+
23696+#ifdef CONFIG_PAX_MEMORY_UDEREF
23697+ testb $3, CS(%rdi)
23698+ jnz 1f
23699+ pax_enter_kernel
23700+ jmp 2f
23701+1: pax_enter_kernel_user
23702+2:
23703+#else
23704+ pax_enter_kernel
23705+#endif
23706+
23707 /* We entered an interrupt context - irqs are off: */
23708 TRACE_IRQS_OFF
23709 .endm
23710@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23711 js 1f /* negative -> in kernel */
23712 SWAPGS
23713 xorl %ebx,%ebx
23714-1: ret
23715+1:
23716+#ifdef CONFIG_PAX_MEMORY_UDEREF
23717+ testb $3, CS+8(%rsp)
23718+ jnz 1f
23719+ pax_enter_kernel
23720+ jmp 2f
23721+1: pax_enter_kernel_user
23722+2:
23723+#else
23724+ pax_enter_kernel
23725+#endif
23726+ pax_force_retaddr
23727+ ret
23728 CFI_ENDPROC
23729-END(save_paranoid)
23730+ENDPROC(save_paranoid)
23731+
23732+ENTRY(save_paranoid_nmi)
23733+ XCPT_FRAME 1 RDI+8
23734+ cld
23735+ movq_cfi rdi, RDI+8
23736+ movq_cfi rsi, RSI+8
23737+ movq_cfi rdx, RDX+8
23738+ movq_cfi rcx, RCX+8
23739+ movq_cfi rax, RAX+8
23740+ movq_cfi r8, R8+8
23741+ movq_cfi r9, R9+8
23742+ movq_cfi r10, R10+8
23743+ movq_cfi r11, R11+8
23744+ movq_cfi rbx, RBX+8
23745+ movq_cfi rbp, RBP+8
23746+ movq_cfi r12, R12+8
23747+ movq_cfi r13, R13+8
23748+ movq_cfi r14, R14+8
23749+ movq_cfi r15, R15+8
23750+ movl $1,%ebx
23751+ movl $MSR_GS_BASE,%ecx
23752+ rdmsr
23753+ testl %edx,%edx
23754+ js 1f /* negative -> in kernel */
23755+ SWAPGS
23756+ xorl %ebx,%ebx
23757+1: pax_enter_kernel_nmi
23758+ pax_force_retaddr
23759+ ret
23760+ CFI_ENDPROC
23761+ENDPROC(save_paranoid_nmi)
23762
23763 /*
23764 * A newly forked process directly context switches into this address.
23765@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23766
23767 RESTORE_REST
23768
23769- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23770+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23771 jz 1f
23772
23773 /*
23774@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23775 jmp int_ret_from_sys_call
23776
23777 1:
23778- subq $REST_SKIP, %rsp # leave space for volatiles
23779- CFI_ADJUST_CFA_OFFSET REST_SKIP
23780 movq %rbp, %rdi
23781 call *%rbx
23782 movl $0, RAX(%rsp)
23783 RESTORE_REST
23784 jmp int_ret_from_sys_call
23785 CFI_ENDPROC
23786-END(ret_from_fork)
23787+ENDPROC(ret_from_fork)
23788
23789 /*
23790 * System call entry. Up to 6 arguments in registers are supported.
23791@@ -389,7 +849,7 @@ END(ret_from_fork)
23792 ENTRY(system_call)
23793 CFI_STARTPROC simple
23794 CFI_SIGNAL_FRAME
23795- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23796+ CFI_DEF_CFA rsp,0
23797 CFI_REGISTER rip,rcx
23798 /*CFI_REGISTER rflags,r11*/
23799 SWAPGS_UNSAFE_STACK
23800@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23801
23802 movq %rsp,PER_CPU_VAR(old_rsp)
23803 movq PER_CPU_VAR(kernel_stack),%rsp
23804+ SAVE_ARGS 8*6, 0, rax_enosys=1
23805+ pax_enter_kernel_user
23806+
23807+#ifdef CONFIG_PAX_RANDKSTACK
23808+ pax_erase_kstack
23809+#endif
23810+
23811 /*
23812 * No need to follow this irqs off/on section - it's straight
23813 * and short:
23814 */
23815 ENABLE_INTERRUPTS(CLBR_NONE)
23816- SAVE_ARGS 8, 0, rax_enosys=1
23817 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23818 movq %rcx,RIP-ARGOFFSET(%rsp)
23819 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23820- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23821+ GET_THREAD_INFO(%rcx)
23822+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23823 jnz tracesys
23824 system_call_fastpath:
23825 #if __SYSCALL_MASK == ~0
23826@@ -435,10 +902,13 @@ sysret_check:
23827 LOCKDEP_SYS_EXIT
23828 DISABLE_INTERRUPTS(CLBR_NONE)
23829 TRACE_IRQS_OFF
23830- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23831+ GET_THREAD_INFO(%rcx)
23832+ movl TI_flags(%rcx),%edx
23833 andl %edi,%edx
23834 jnz sysret_careful
23835 CFI_REMEMBER_STATE
23836+ pax_exit_kernel_user
23837+ pax_erase_kstack
23838 /*
23839 * sysretq will re-enable interrupts:
23840 */
23841@@ -497,12 +967,15 @@ sysret_audit:
23842
23843 /* Do syscall tracing */
23844 tracesys:
23845- leaq -REST_SKIP(%rsp), %rdi
23846+ movq %rsp, %rdi
23847 movq $AUDIT_ARCH_X86_64, %rsi
23848 call syscall_trace_enter_phase1
23849 test %rax, %rax
23850 jnz tracesys_phase2 /* if needed, run the slow path */
23851- LOAD_ARGS 0 /* else restore clobbered regs */
23852+
23853+ pax_erase_kstack
23854+
23855+ LOAD_ARGS /* else restore clobbered regs */
23856 jmp system_call_fastpath /* and return to the fast path */
23857
23858 tracesys_phase2:
23859@@ -513,12 +986,14 @@ tracesys_phase2:
23860 movq %rax,%rdx
23861 call syscall_trace_enter_phase2
23862
23863+ pax_erase_kstack
23864+
23865 /*
23866 * Reload arg registers from stack in case ptrace changed them.
23867 * We don't reload %rax because syscall_trace_entry_phase2() returned
23868 * the value it wants us to use in the table lookup.
23869 */
23870- LOAD_ARGS ARGOFFSET, 1
23871+ LOAD_ARGS 1
23872 RESTORE_REST
23873 #if __SYSCALL_MASK == ~0
23874 cmpq $__NR_syscall_max,%rax
23875@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23876 andl %edi,%edx
23877 jnz int_careful
23878 andl $~TS_COMPAT,TI_status(%rcx)
23879- jmp retint_swapgs
23880+ pax_exit_kernel_user
23881+ pax_erase_kstack
23882+ jmp retint_swapgs_pax
23883
23884 /* Either reschedule or signal or syscall exit tracking needed. */
23885 /* First do a reschedule test. */
23886@@ -594,7 +1071,7 @@ int_restore_rest:
23887 TRACE_IRQS_OFF
23888 jmp int_with_check
23889 CFI_ENDPROC
23890-END(system_call)
23891+ENDPROC(system_call)
23892
23893 .macro FORK_LIKE func
23894 ENTRY(stub_\func)
23895@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23896 DEFAULT_FRAME 0 8 /* offset 8: return address */
23897 call sys_\func
23898 RESTORE_TOP_OF_STACK %r11, 8
23899- ret $REST_SKIP /* pop extended registers */
23900+ pax_force_retaddr
23901+ ret
23902 CFI_ENDPROC
23903-END(stub_\func)
23904+ENDPROC(stub_\func)
23905 .endm
23906
23907 .macro FIXED_FRAME label,func
23908@@ -619,9 +1097,10 @@ ENTRY(\label)
23909 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23910 call \func
23911 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23912+ pax_force_retaddr
23913 ret
23914 CFI_ENDPROC
23915-END(\label)
23916+ENDPROC(\label)
23917 .endm
23918
23919 FORK_LIKE clone
23920@@ -629,19 +1108,6 @@ END(\label)
23921 FORK_LIKE vfork
23922 FIXED_FRAME stub_iopl, sys_iopl
23923
23924-ENTRY(ptregscall_common)
23925- DEFAULT_FRAME 1 8 /* offset 8: return address */
23926- RESTORE_TOP_OF_STACK %r11, 8
23927- movq_cfi_restore R15+8, r15
23928- movq_cfi_restore R14+8, r14
23929- movq_cfi_restore R13+8, r13
23930- movq_cfi_restore R12+8, r12
23931- movq_cfi_restore RBP+8, rbp
23932- movq_cfi_restore RBX+8, rbx
23933- ret $REST_SKIP /* pop extended registers */
23934- CFI_ENDPROC
23935-END(ptregscall_common)
23936-
23937 ENTRY(stub_execve)
23938 CFI_STARTPROC
23939 addq $8, %rsp
23940@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23941 RESTORE_REST
23942 jmp int_ret_from_sys_call
23943 CFI_ENDPROC
23944-END(stub_execve)
23945+ENDPROC(stub_execve)
23946
23947 ENTRY(stub_execveat)
23948 CFI_STARTPROC
23949@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23950 RESTORE_REST
23951 jmp int_ret_from_sys_call
23952 CFI_ENDPROC
23953-END(stub_execveat)
23954+ENDPROC(stub_execveat)
23955
23956 /*
23957 * sigreturn is special because it needs to restore all registers on return.
23958@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23959 RESTORE_REST
23960 jmp int_ret_from_sys_call
23961 CFI_ENDPROC
23962-END(stub_rt_sigreturn)
23963+ENDPROC(stub_rt_sigreturn)
23964
23965 #ifdef CONFIG_X86_X32_ABI
23966 ENTRY(stub_x32_rt_sigreturn)
23967@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23968 RESTORE_REST
23969 jmp int_ret_from_sys_call
23970 CFI_ENDPROC
23971-END(stub_x32_rt_sigreturn)
23972+ENDPROC(stub_x32_rt_sigreturn)
23973
23974 ENTRY(stub_x32_execve)
23975 CFI_STARTPROC
23976@@ -763,7 +1229,7 @@ vector=vector+1
23977 2: jmp common_interrupt
23978 .endr
23979 CFI_ENDPROC
23980-END(irq_entries_start)
23981+ENDPROC(irq_entries_start)
23982
23983 .previous
23984 END(interrupt)
23985@@ -780,8 +1246,8 @@ END(interrupt)
23986 /* 0(%rsp): ~(interrupt number) */
23987 .macro interrupt func
23988 /* reserve pt_regs for scratch regs and rbp */
23989- subq $ORIG_RAX-RBP, %rsp
23990- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23991+ subq $ORIG_RAX, %rsp
23992+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23993 SAVE_ARGS_IRQ
23994 call \func
23995 .endm
23996@@ -804,14 +1270,14 @@ ret_from_intr:
23997
23998 /* Restore saved previous stack */
23999 popq %rsi
24000- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24001- leaq ARGOFFSET-RBP(%rsi), %rsp
24002+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24003+ movq %rsi, %rsp
24004 CFI_DEF_CFA_REGISTER rsp
24005- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24006+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24007
24008 exit_intr:
24009 GET_THREAD_INFO(%rcx)
24010- testl $3,CS-ARGOFFSET(%rsp)
24011+ testb $3,CS-ARGOFFSET(%rsp)
24012 je retint_kernel
24013
24014 /* Interrupt came from user space */
24015@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
24016 * The iretq could re-enable interrupts:
24017 */
24018 DISABLE_INTERRUPTS(CLBR_ANY)
24019+ pax_exit_kernel_user
24020+retint_swapgs_pax:
24021 TRACE_IRQS_IRETQ
24022 SWAPGS
24023 jmp restore_args
24024
24025 retint_restore_args: /* return to kernel space */
24026 DISABLE_INTERRUPTS(CLBR_ANY)
24027+ pax_exit_kernel
24028+
24029+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24030+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24031+ * namely calling EFI runtime services with a phys mapping. We're
24032+ * starting off with NOPs and patch in the real instrumentation
24033+ * (BTS/OR) before starting any userland process; even before starting
24034+ * up the APs.
24035+ */
24036+ .pushsection .altinstr_replacement, "a"
24037+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24038+ 602:
24039+ .popsection
24040+ 603: .fill 602b-601b, 1, 0x90
24041+ .pushsection .altinstructions, "a"
24042+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24043+ .popsection
24044+#else
24045+ pax_force_retaddr (RIP-ARGOFFSET)
24046+#endif
24047+
24048 /*
24049 * The iretq could re-enable interrupts:
24050 */
24051@@ -876,15 +1365,15 @@ native_irq_return_ldt:
24052 SWAPGS
24053 movq PER_CPU_VAR(espfix_waddr),%rdi
24054 movq %rax,(0*8)(%rdi) /* RAX */
24055- movq (2*8)(%rsp),%rax /* RIP */
24056+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
24057 movq %rax,(1*8)(%rdi)
24058- movq (3*8)(%rsp),%rax /* CS */
24059+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
24060 movq %rax,(2*8)(%rdi)
24061- movq (4*8)(%rsp),%rax /* RFLAGS */
24062+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
24063 movq %rax,(3*8)(%rdi)
24064- movq (6*8)(%rsp),%rax /* SS */
24065+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
24066 movq %rax,(5*8)(%rdi)
24067- movq (5*8)(%rsp),%rax /* RSP */
24068+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
24069 movq %rax,(4*8)(%rdi)
24070 andl $0xffff0000,%eax
24071 popq_cfi %rdi
24072@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
24073 jmp exit_intr
24074 #endif
24075 CFI_ENDPROC
24076-END(common_interrupt)
24077+ENDPROC(common_interrupt)
24078
24079 /*
24080 * APIC interrupts.
24081@@ -952,7 +1441,7 @@ ENTRY(\sym)
24082 interrupt \do_sym
24083 jmp ret_from_intr
24084 CFI_ENDPROC
24085-END(\sym)
24086+ENDPROC(\sym)
24087 .endm
24088
24089 #ifdef CONFIG_TRACING
24090@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24091 /*
24092 * Exception entry points.
24093 */
24094-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24095+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24096
24097 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24098 ENTRY(\sym)
24099@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24100 .endif
24101
24102 .if \shift_ist != -1
24103+#ifdef CONFIG_SMP
24104+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24105+ lea init_tss(%r13), %r13
24106+#else
24107+ lea init_tss(%rip), %r13
24108+#endif
24109 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24110 .endif
24111
24112@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24113 .endif
24114
24115 CFI_ENDPROC
24116-END(\sym)
24117+ENDPROC(\sym)
24118 .endm
24119
24120 #ifdef CONFIG_TRACING
24121@@ -1133,9 +1628,10 @@ gs_change:
24122 2: mfence /* workaround */
24123 SWAPGS
24124 popfq_cfi
24125+ pax_force_retaddr
24126 ret
24127 CFI_ENDPROC
24128-END(native_load_gs_index)
24129+ENDPROC(native_load_gs_index)
24130
24131 _ASM_EXTABLE(gs_change,bad_gs)
24132 .section .fixup,"ax"
24133@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24134 CFI_DEF_CFA_REGISTER rsp
24135 CFI_ADJUST_CFA_OFFSET -8
24136 decl PER_CPU_VAR(irq_count)
24137+ pax_force_retaddr
24138 ret
24139 CFI_ENDPROC
24140-END(do_softirq_own_stack)
24141+ENDPROC(do_softirq_own_stack)
24142
24143 #ifdef CONFIG_XEN
24144 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24145@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24146 decl PER_CPU_VAR(irq_count)
24147 jmp error_exit
24148 CFI_ENDPROC
24149-END(xen_do_hypervisor_callback)
24150+ENDPROC(xen_do_hypervisor_callback)
24151
24152 /*
24153 * Hypervisor uses this for application faults while it executes.
24154@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24155 SAVE_ALL
24156 jmp error_exit
24157 CFI_ENDPROC
24158-END(xen_failsafe_callback)
24159+ENDPROC(xen_failsafe_callback)
24160
24161 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24162 xen_hvm_callback_vector xen_evtchn_do_upcall
24163@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24164 DEFAULT_FRAME
24165 DISABLE_INTERRUPTS(CLBR_NONE)
24166 TRACE_IRQS_OFF_DEBUG
24167- testl %ebx,%ebx /* swapgs needed? */
24168+ testl $1,%ebx /* swapgs needed? */
24169 jnz paranoid_restore
24170- testl $3,CS(%rsp)
24171+ testb $3,CS(%rsp)
24172 jnz paranoid_userspace
24173+#ifdef CONFIG_PAX_MEMORY_UDEREF
24174+ pax_exit_kernel
24175+ TRACE_IRQS_IRETQ 0
24176+ SWAPGS_UNSAFE_STACK
24177+ RESTORE_ALL 8
24178+ pax_force_retaddr_bts
24179+ jmp irq_return
24180+#endif
24181 paranoid_swapgs:
24182+#ifdef CONFIG_PAX_MEMORY_UDEREF
24183+ pax_exit_kernel_user
24184+#else
24185+ pax_exit_kernel
24186+#endif
24187 TRACE_IRQS_IRETQ 0
24188 SWAPGS_UNSAFE_STACK
24189 RESTORE_ALL 8
24190 jmp irq_return
24191 paranoid_restore:
24192+ pax_exit_kernel
24193 TRACE_IRQS_IRETQ_DEBUG 0
24194 RESTORE_ALL 8
24195+ pax_force_retaddr_bts
24196 jmp irq_return
24197 paranoid_userspace:
24198 GET_THREAD_INFO(%rcx)
24199@@ -1349,7 +1861,7 @@ paranoid_schedule:
24200 TRACE_IRQS_OFF
24201 jmp paranoid_userspace
24202 CFI_ENDPROC
24203-END(paranoid_exit)
24204+ENDPROC(paranoid_exit)
24205
24206 /*
24207 * Exception entry point. This expects an error code/orig_rax on the stack.
24208@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24209 movq %r14, R14+8(%rsp)
24210 movq %r15, R15+8(%rsp)
24211 xorl %ebx,%ebx
24212- testl $3,CS+8(%rsp)
24213+ testb $3,CS+8(%rsp)
24214 je error_kernelspace
24215 error_swapgs:
24216 SWAPGS
24217 error_sti:
24218+#ifdef CONFIG_PAX_MEMORY_UDEREF
24219+ testb $3, CS+8(%rsp)
24220+ jnz 1f
24221+ pax_enter_kernel
24222+ jmp 2f
24223+1: pax_enter_kernel_user
24224+2:
24225+#else
24226+ pax_enter_kernel
24227+#endif
24228 TRACE_IRQS_OFF
24229+ pax_force_retaddr
24230 ret
24231
24232 /*
24233@@ -1416,7 +1939,7 @@ error_bad_iret:
24234 decl %ebx /* Return to usergs */
24235 jmp error_sti
24236 CFI_ENDPROC
24237-END(error_entry)
24238+ENDPROC(error_entry)
24239
24240
24241 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24242@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24243 DISABLE_INTERRUPTS(CLBR_NONE)
24244 TRACE_IRQS_OFF
24245 GET_THREAD_INFO(%rcx)
24246- testl %eax,%eax
24247+ testl $1,%eax
24248 jne retint_kernel
24249 LOCKDEP_SYS_EXIT_IRQ
24250 movl TI_flags(%rcx),%edx
24251@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24252 jnz retint_careful
24253 jmp retint_swapgs
24254 CFI_ENDPROC
24255-END(error_exit)
24256+ENDPROC(error_exit)
24257
24258 /*
24259 * Test if a given stack is an NMI stack or not.
24260@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24261 * If %cs was not the kernel segment, then the NMI triggered in user
24262 * space, which means it is definitely not nested.
24263 */
24264+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24265+ je 1f
24266 cmpl $__KERNEL_CS, 16(%rsp)
24267 jne first_nmi
24268-
24269+1:
24270 /*
24271 * Check the special variable on the stack to see if NMIs are
24272 * executing.
24273@@ -1530,8 +2055,7 @@ nested_nmi:
24274
24275 1:
24276 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24277- leaq -1*8(%rsp), %rdx
24278- movq %rdx, %rsp
24279+ subq $8, %rsp
24280 CFI_ADJUST_CFA_OFFSET 1*8
24281 leaq -10*8(%rsp), %rdx
24282 pushq_cfi $__KERNEL_DS
24283@@ -1549,6 +2073,7 @@ nested_nmi_out:
24284 CFI_RESTORE rdx
24285
24286 /* No need to check faults here */
24287+# pax_force_retaddr_bts
24288 INTERRUPT_RETURN
24289
24290 CFI_RESTORE_STATE
24291@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24292 subq $ORIG_RAX-R15, %rsp
24293 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24294 /*
24295- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24296+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24297 * as we should not be calling schedule in NMI context.
24298 * Even with normal interrupts enabled. An NMI should not be
24299 * setting NEED_RESCHED or anything that normal interrupts and
24300 * exceptions might do.
24301 */
24302- call save_paranoid
24303+ call save_paranoid_nmi
24304 DEFAULT_FRAME 0
24305
24306 /*
24307@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24308 * NMI itself takes a page fault, the page fault that was preempted
24309 * will read the information from the NMI page fault and not the
24310 * origin fault. Save it off and restore it if it changes.
24311- * Use the r12 callee-saved register.
24312+ * Use the r13 callee-saved register.
24313 */
24314- movq %cr2, %r12
24315+ movq %cr2, %r13
24316
24317 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24318 movq %rsp,%rdi
24319@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24320
24321 /* Did the NMI take a page fault? Restore cr2 if it did */
24322 movq %cr2, %rcx
24323- cmpq %rcx, %r12
24324+ cmpq %rcx, %r13
24325 je 1f
24326- movq %r12, %cr2
24327+ movq %r13, %cr2
24328 1:
24329
24330- testl %ebx,%ebx /* swapgs needed? */
24331+ testl $1,%ebx /* swapgs needed? */
24332 jnz nmi_restore
24333 nmi_swapgs:
24334 SWAPGS_UNSAFE_STACK
24335 nmi_restore:
24336+ pax_exit_kernel_nmi
24337 /* Pop the extra iret frame at once */
24338 RESTORE_ALL 6*8
24339+ testb $3, 8(%rsp)
24340+ jnz 1f
24341+ pax_force_retaddr_bts
24342+1:
24343
24344 /* Clear the NMI executing stack variable */
24345 movq $0, 5*8(%rsp)
24346 jmp irq_return
24347 CFI_ENDPROC
24348-END(nmi)
24349+ENDPROC(nmi)
24350
24351 ENTRY(ignore_sysret)
24352 CFI_STARTPROC
24353 mov $-ENOSYS,%eax
24354 sysret
24355 CFI_ENDPROC
24356-END(ignore_sysret)
24357+ENDPROC(ignore_sysret)
24358
24359diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24360index f5d0730..5bce89c 100644
24361--- a/arch/x86/kernel/espfix_64.c
24362+++ b/arch/x86/kernel/espfix_64.c
24363@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24364 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24365 static void *espfix_pages[ESPFIX_MAX_PAGES];
24366
24367-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24368- __aligned(PAGE_SIZE);
24369+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24370
24371 static unsigned int page_random, slot_random;
24372
24373@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24374 void __init init_espfix_bsp(void)
24375 {
24376 pgd_t *pgd_p;
24377+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24378
24379 /* Install the espfix pud into the kernel page directory */
24380- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24381+ pgd_p = &init_level4_pgt[index];
24382 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24383
24384+#ifdef CONFIG_PAX_PER_CPU_PGD
24385+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24386+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24387+#endif
24388+
24389 /* Randomize the locations */
24390 init_espfix_random();
24391
24392@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24393 set_pte(&pte_p[n*PTE_STRIDE], pte);
24394
24395 /* Job is done for this CPU and any CPU which shares this page */
24396- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24397+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24398
24399 unlock_done:
24400 mutex_unlock(&espfix_init_mutex);
24401diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24402index 8b7b0a5..2395f29 100644
24403--- a/arch/x86/kernel/ftrace.c
24404+++ b/arch/x86/kernel/ftrace.c
24405@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24406 * kernel identity mapping to modify code.
24407 */
24408 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24409- ip = (unsigned long)__va(__pa_symbol(ip));
24410+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24411
24412 return ip;
24413 }
24414@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24415 {
24416 unsigned char replaced[MCOUNT_INSN_SIZE];
24417
24418+ ip = ktla_ktva(ip);
24419+
24420 /*
24421 * Note: Due to modules and __init, code can
24422 * disappear and change, we need to protect against faulting
24423@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24424 unsigned char old[MCOUNT_INSN_SIZE];
24425 int ret;
24426
24427- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24428+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24429
24430 ftrace_update_func = ip;
24431 /* Make sure the breakpoints see the ftrace_update_func update */
24432@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24433 unsigned char replaced[MCOUNT_INSN_SIZE];
24434 unsigned char brk = BREAKPOINT_INSTRUCTION;
24435
24436- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24437+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24438 return -EFAULT;
24439
24440 /* Make sure it is what we expect it to be */
24441diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24442index eda1a86..8f6df48 100644
24443--- a/arch/x86/kernel/head64.c
24444+++ b/arch/x86/kernel/head64.c
24445@@ -67,12 +67,12 @@ again:
24446 pgd = *pgd_p;
24447
24448 /*
24449- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24450- * critical -- __PAGE_OFFSET would point us back into the dynamic
24451+ * The use of __early_va rather than __va here is critical:
24452+ * __va would point us back into the dynamic
24453 * range and we might end up looping forever...
24454 */
24455 if (pgd)
24456- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24457+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24458 else {
24459 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24460 reset_early_page_tables();
24461@@ -82,13 +82,13 @@ again:
24462 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24463 for (i = 0; i < PTRS_PER_PUD; i++)
24464 pud_p[i] = 0;
24465- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24466+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24467 }
24468 pud_p += pud_index(address);
24469 pud = *pud_p;
24470
24471 if (pud)
24472- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24473+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24474 else {
24475 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24476 reset_early_page_tables();
24477@@ -98,7 +98,7 @@ again:
24478 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24479 for (i = 0; i < PTRS_PER_PMD; i++)
24480 pmd_p[i] = 0;
24481- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24482+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24483 }
24484 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24485 pmd_p[pmd_index(address)] = pmd;
24486@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24487 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24488 early_printk("Kernel alive\n");
24489
24490- clear_page(init_level4_pgt);
24491 /* set init_level4_pgt kernel high mapping*/
24492 init_level4_pgt[511] = early_level4_pgt[511];
24493
24494diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24495index f36bd42..0ab4474 100644
24496--- a/arch/x86/kernel/head_32.S
24497+++ b/arch/x86/kernel/head_32.S
24498@@ -26,6 +26,12 @@
24499 /* Physical address */
24500 #define pa(X) ((X) - __PAGE_OFFSET)
24501
24502+#ifdef CONFIG_PAX_KERNEXEC
24503+#define ta(X) (X)
24504+#else
24505+#define ta(X) ((X) - __PAGE_OFFSET)
24506+#endif
24507+
24508 /*
24509 * References to members of the new_cpu_data structure.
24510 */
24511@@ -55,11 +61,7 @@
24512 * and small than max_low_pfn, otherwise will waste some page table entries
24513 */
24514
24515-#if PTRS_PER_PMD > 1
24516-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24517-#else
24518-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24519-#endif
24520+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24521
24522 /* Number of possible pages in the lowmem region */
24523 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24524@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24525 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24526
24527 /*
24528+ * Real beginning of normal "text" segment
24529+ */
24530+ENTRY(stext)
24531+ENTRY(_stext)
24532+
24533+/*
24534 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24535 * %esi points to the real-mode code as a 32-bit pointer.
24536 * CS and DS must be 4 GB flat segments, but we don't depend on
24537@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24538 * can.
24539 */
24540 __HEAD
24541+
24542+#ifdef CONFIG_PAX_KERNEXEC
24543+ jmp startup_32
24544+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24545+.fill PAGE_SIZE-5,1,0xcc
24546+#endif
24547+
24548 ENTRY(startup_32)
24549 movl pa(stack_start),%ecx
24550
24551@@ -106,6 +121,59 @@ ENTRY(startup_32)
24552 2:
24553 leal -__PAGE_OFFSET(%ecx),%esp
24554
24555+#ifdef CONFIG_SMP
24556+ movl $pa(cpu_gdt_table),%edi
24557+ movl $__per_cpu_load,%eax
24558+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24559+ rorl $16,%eax
24560+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24561+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24562+ movl $__per_cpu_end - 1,%eax
24563+ subl $__per_cpu_start,%eax
24564+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24565+#endif
24566+
24567+#ifdef CONFIG_PAX_MEMORY_UDEREF
24568+ movl $NR_CPUS,%ecx
24569+ movl $pa(cpu_gdt_table),%edi
24570+1:
24571+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24572+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24573+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24574+ addl $PAGE_SIZE_asm,%edi
24575+ loop 1b
24576+#endif
24577+
24578+#ifdef CONFIG_PAX_KERNEXEC
24579+ movl $pa(boot_gdt),%edi
24580+ movl $__LOAD_PHYSICAL_ADDR,%eax
24581+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24582+ rorl $16,%eax
24583+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24584+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24585+ rorl $16,%eax
24586+
24587+ ljmp $(__BOOT_CS),$1f
24588+1:
24589+
24590+ movl $NR_CPUS,%ecx
24591+ movl $pa(cpu_gdt_table),%edi
24592+ addl $__PAGE_OFFSET,%eax
24593+1:
24594+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24595+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24596+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24597+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24598+ rorl $16,%eax
24599+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24600+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24601+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24602+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24603+ rorl $16,%eax
24604+ addl $PAGE_SIZE_asm,%edi
24605+ loop 1b
24606+#endif
24607+
24608 /*
24609 * Clear BSS first so that there are no surprises...
24610 */
24611@@ -201,8 +269,11 @@ ENTRY(startup_32)
24612 movl %eax, pa(max_pfn_mapped)
24613
24614 /* Do early initialization of the fixmap area */
24615- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24616- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24617+#ifdef CONFIG_COMPAT_VDSO
24618+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24619+#else
24620+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24621+#endif
24622 #else /* Not PAE */
24623
24624 page_pde_offset = (__PAGE_OFFSET >> 20);
24625@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24626 movl %eax, pa(max_pfn_mapped)
24627
24628 /* Do early initialization of the fixmap area */
24629- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24630- movl %eax,pa(initial_page_table+0xffc)
24631+#ifdef CONFIG_COMPAT_VDSO
24632+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24633+#else
24634+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24635+#endif
24636 #endif
24637
24638 #ifdef CONFIG_PARAVIRT
24639@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24640 cmpl $num_subarch_entries, %eax
24641 jae bad_subarch
24642
24643- movl pa(subarch_entries)(,%eax,4), %eax
24644- subl $__PAGE_OFFSET, %eax
24645- jmp *%eax
24646+ jmp *pa(subarch_entries)(,%eax,4)
24647
24648 bad_subarch:
24649 WEAK(lguest_entry)
24650@@ -261,10 +333,10 @@ WEAK(xen_entry)
24651 __INITDATA
24652
24653 subarch_entries:
24654- .long default_entry /* normal x86/PC */
24655- .long lguest_entry /* lguest hypervisor */
24656- .long xen_entry /* Xen hypervisor */
24657- .long default_entry /* Moorestown MID */
24658+ .long ta(default_entry) /* normal x86/PC */
24659+ .long ta(lguest_entry) /* lguest hypervisor */
24660+ .long ta(xen_entry) /* Xen hypervisor */
24661+ .long ta(default_entry) /* Moorestown MID */
24662 num_subarch_entries = (. - subarch_entries) / 4
24663 .previous
24664 #else
24665@@ -354,6 +426,7 @@ default_entry:
24666 movl pa(mmu_cr4_features),%eax
24667 movl %eax,%cr4
24668
24669+#ifdef CONFIG_X86_PAE
24670 testb $X86_CR4_PAE, %al # check if PAE is enabled
24671 jz enable_paging
24672
24673@@ -382,6 +455,9 @@ default_entry:
24674 /* Make changes effective */
24675 wrmsr
24676
24677+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24678+#endif
24679+
24680 enable_paging:
24681
24682 /*
24683@@ -449,14 +525,20 @@ is486:
24684 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24685 movl %eax,%ss # after changing gdt.
24686
24687- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24688+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24689 movl %eax,%ds
24690 movl %eax,%es
24691
24692 movl $(__KERNEL_PERCPU), %eax
24693 movl %eax,%fs # set this cpu's percpu
24694
24695+#ifdef CONFIG_CC_STACKPROTECTOR
24696 movl $(__KERNEL_STACK_CANARY),%eax
24697+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24698+ movl $(__USER_DS),%eax
24699+#else
24700+ xorl %eax,%eax
24701+#endif
24702 movl %eax,%gs
24703
24704 xorl %eax,%eax # Clear LDT
24705@@ -512,8 +594,11 @@ setup_once:
24706 * relocation. Manually set base address in stack canary
24707 * segment descriptor.
24708 */
24709- movl $gdt_page,%eax
24710+ movl $cpu_gdt_table,%eax
24711 movl $stack_canary,%ecx
24712+#ifdef CONFIG_SMP
24713+ addl $__per_cpu_load,%ecx
24714+#endif
24715 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24716 shrl $16, %ecx
24717 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24718@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24719 cmpl $2,(%esp) # X86_TRAP_NMI
24720 je is_nmi # Ignore NMI
24721
24722- cmpl $2,%ss:early_recursion_flag
24723+ cmpl $1,%ss:early_recursion_flag
24724 je hlt_loop
24725 incl %ss:early_recursion_flag
24726
24727@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24728 pushl (20+6*4)(%esp) /* trapno */
24729 pushl $fault_msg
24730 call printk
24731-#endif
24732 call dump_stack
24733+#endif
24734 hlt_loop:
24735 hlt
24736 jmp hlt_loop
24737@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24738 /* This is the default interrupt "handler" :-) */
24739 ALIGN
24740 ignore_int:
24741- cld
24742 #ifdef CONFIG_PRINTK
24743+ cmpl $2,%ss:early_recursion_flag
24744+ je hlt_loop
24745+ incl %ss:early_recursion_flag
24746+ cld
24747 pushl %eax
24748 pushl %ecx
24749 pushl %edx
24750@@ -617,9 +705,6 @@ ignore_int:
24751 movl $(__KERNEL_DS),%eax
24752 movl %eax,%ds
24753 movl %eax,%es
24754- cmpl $2,early_recursion_flag
24755- je hlt_loop
24756- incl early_recursion_flag
24757 pushl 16(%esp)
24758 pushl 24(%esp)
24759 pushl 32(%esp)
24760@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24761 /*
24762 * BSS section
24763 */
24764-__PAGE_ALIGNED_BSS
24765- .align PAGE_SIZE
24766 #ifdef CONFIG_X86_PAE
24767+.section .initial_pg_pmd,"a",@progbits
24768 initial_pg_pmd:
24769 .fill 1024*KPMDS,4,0
24770 #else
24771+.section .initial_page_table,"a",@progbits
24772 ENTRY(initial_page_table)
24773 .fill 1024,4,0
24774 #endif
24775+.section .initial_pg_fixmap,"a",@progbits
24776 initial_pg_fixmap:
24777 .fill 1024,4,0
24778+.section .empty_zero_page,"a",@progbits
24779 ENTRY(empty_zero_page)
24780 .fill 4096,1,0
24781+.section .swapper_pg_dir,"a",@progbits
24782 ENTRY(swapper_pg_dir)
24783+#ifdef CONFIG_X86_PAE
24784+ .fill 4,8,0
24785+#else
24786 .fill 1024,4,0
24787+#endif
24788
24789 /*
24790 * This starts the data section.
24791 */
24792 #ifdef CONFIG_X86_PAE
24793-__PAGE_ALIGNED_DATA
24794- /* Page-aligned for the benefit of paravirt? */
24795- .align PAGE_SIZE
24796+.section .initial_page_table,"a",@progbits
24797 ENTRY(initial_page_table)
24798 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24799 # if KPMDS == 3
24800@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24801 # error "Kernel PMDs should be 1, 2 or 3"
24802 # endif
24803 .align PAGE_SIZE /* needs to be page-sized too */
24804+
24805+#ifdef CONFIG_PAX_PER_CPU_PGD
24806+ENTRY(cpu_pgd)
24807+ .rept 2*NR_CPUS
24808+ .fill 4,8,0
24809+ .endr
24810+#endif
24811+
24812 #endif
24813
24814 .data
24815 .balign 4
24816 ENTRY(stack_start)
24817- .long init_thread_union+THREAD_SIZE
24818+ .long init_thread_union+THREAD_SIZE-8
24819
24820 __INITRODATA
24821 int_msg:
24822@@ -727,7 +825,7 @@ fault_msg:
24823 * segment size, and 32-bit linear address value:
24824 */
24825
24826- .data
24827+.section .rodata,"a",@progbits
24828 .globl boot_gdt_descr
24829 .globl idt_descr
24830
24831@@ -736,7 +834,7 @@ fault_msg:
24832 .word 0 # 32 bit align gdt_desc.address
24833 boot_gdt_descr:
24834 .word __BOOT_DS+7
24835- .long boot_gdt - __PAGE_OFFSET
24836+ .long pa(boot_gdt)
24837
24838 .word 0 # 32-bit align idt_desc.address
24839 idt_descr:
24840@@ -747,7 +845,7 @@ idt_descr:
24841 .word 0 # 32 bit align gdt_desc.address
24842 ENTRY(early_gdt_descr)
24843 .word GDT_ENTRIES*8-1
24844- .long gdt_page /* Overwritten for secondary CPUs */
24845+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24846
24847 /*
24848 * The boot_gdt must mirror the equivalent in setup.S and is
24849@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24850 .align L1_CACHE_BYTES
24851 ENTRY(boot_gdt)
24852 .fill GDT_ENTRY_BOOT_CS,8,0
24853- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24854- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24855+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24856+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24857+
24858+ .align PAGE_SIZE_asm
24859+ENTRY(cpu_gdt_table)
24860+ .rept NR_CPUS
24861+ .quad 0x0000000000000000 /* NULL descriptor */
24862+ .quad 0x0000000000000000 /* 0x0b reserved */
24863+ .quad 0x0000000000000000 /* 0x13 reserved */
24864+ .quad 0x0000000000000000 /* 0x1b reserved */
24865+
24866+#ifdef CONFIG_PAX_KERNEXEC
24867+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24868+#else
24869+ .quad 0x0000000000000000 /* 0x20 unused */
24870+#endif
24871+
24872+ .quad 0x0000000000000000 /* 0x28 unused */
24873+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24874+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24875+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24876+ .quad 0x0000000000000000 /* 0x4b reserved */
24877+ .quad 0x0000000000000000 /* 0x53 reserved */
24878+ .quad 0x0000000000000000 /* 0x5b reserved */
24879+
24880+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24881+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24882+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24883+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24884+
24885+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24886+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24887+
24888+ /*
24889+ * Segments used for calling PnP BIOS have byte granularity.
24890+ * The code segments and data segments have fixed 64k limits,
24891+ * the transfer segment sizes are set at run time.
24892+ */
24893+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24894+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24895+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24896+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24897+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24898+
24899+ /*
24900+ * The APM segments have byte granularity and their bases
24901+ * are set at run time. All have 64k limits.
24902+ */
24903+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24904+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24905+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24906+
24907+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24908+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24909+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24910+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24911+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24912+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24913+
24914+ /* Be sure this is zeroed to avoid false validations in Xen */
24915+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24916+ .endr
24917diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24918index a468c0a..8b5a879 100644
24919--- a/arch/x86/kernel/head_64.S
24920+++ b/arch/x86/kernel/head_64.S
24921@@ -20,6 +20,8 @@
24922 #include <asm/processor-flags.h>
24923 #include <asm/percpu.h>
24924 #include <asm/nops.h>
24925+#include <asm/cpufeature.h>
24926+#include <asm/alternative-asm.h>
24927
24928 #ifdef CONFIG_PARAVIRT
24929 #include <asm/asm-offsets.h>
24930@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24931 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24932 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24933 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24934+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24935+L3_VMALLOC_START = pud_index(VMALLOC_START)
24936+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24937+L3_VMALLOC_END = pud_index(VMALLOC_END)
24938+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24939+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24940
24941 .text
24942 __HEAD
24943@@ -89,11 +97,24 @@ startup_64:
24944 * Fixup the physical addresses in the page table
24945 */
24946 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24947+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24948+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24949+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24950+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24951+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24952
24953- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24954- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24955+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24956+#ifndef CONFIG_XEN
24957+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24958+#endif
24959+
24960+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24961+
24962+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24963+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24964
24965 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24966+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24967
24968 /*
24969 * Set up the identity mapping for the switchover. These
24970@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24971 * after the boot processor executes this code.
24972 */
24973
24974+ orq $-1, %rbp
24975 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24976 1:
24977
24978- /* Enable PAE mode and PGE */
24979- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24980+ /* Enable PAE mode and PSE/PGE */
24981+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24982 movq %rcx, %cr4
24983
24984 /* Setup early boot stage 4 level pagetables. */
24985@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24986 movl $MSR_EFER, %ecx
24987 rdmsr
24988 btsl $_EFER_SCE, %eax /* Enable System Call */
24989- btl $20,%edi /* No Execute supported? */
24990+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24991 jnc 1f
24992 btsl $_EFER_NX, %eax
24993+ cmpq $-1, %rbp
24994+ je 1f
24995 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24996+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24997+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24998+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24999+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25000+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25001+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25002+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25003 1: wrmsr /* Make changes effective */
25004
25005 /* Setup cr0 */
25006@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25007 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25008 * address given in m16:64.
25009 */
25010+ pax_set_fptr_mask
25011 movq initial_code(%rip),%rax
25012 pushq $0 # fake return address to stop unwinder
25013 pushq $__KERNEL_CS # set correct cs
25014@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25015 .quad INIT_PER_CPU_VAR(irq_stack_union)
25016
25017 GLOBAL(stack_start)
25018- .quad init_thread_union+THREAD_SIZE-8
25019+ .quad init_thread_union+THREAD_SIZE-16
25020 .word 0
25021 __FINITDATA
25022
25023@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25024 call dump_stack
25025 #ifdef CONFIG_KALLSYMS
25026 leaq early_idt_ripmsg(%rip),%rdi
25027- movq 40(%rsp),%rsi # %rip again
25028+ movq 88(%rsp),%rsi # %rip again
25029 call __print_symbol
25030 #endif
25031 #endif /* EARLY_PRINTK */
25032@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25033 early_recursion_flag:
25034 .long 0
25035
25036+ .section .rodata,"a",@progbits
25037 #ifdef CONFIG_EARLY_PRINTK
25038 early_idt_msg:
25039 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25040@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25041 NEXT_PAGE(early_dynamic_pgts)
25042 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25043
25044- .data
25045+ .section .rodata,"a",@progbits
25046
25047-#ifndef CONFIG_XEN
25048 NEXT_PAGE(init_level4_pgt)
25049- .fill 512,8,0
25050-#else
25051-NEXT_PAGE(init_level4_pgt)
25052- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25053 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25054 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25055+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25056+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25057+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25058+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25059+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25060+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25061 .org init_level4_pgt + L4_START_KERNEL*8, 0
25062 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25063 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25064
25065+#ifdef CONFIG_PAX_PER_CPU_PGD
25066+NEXT_PAGE(cpu_pgd)
25067+ .rept 2*NR_CPUS
25068+ .fill 512,8,0
25069+ .endr
25070+#endif
25071+
25072 NEXT_PAGE(level3_ident_pgt)
25073 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25074+#ifdef CONFIG_XEN
25075 .fill 511, 8, 0
25076+#else
25077+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25078+ .fill 510,8,0
25079+#endif
25080+
25081+NEXT_PAGE(level3_vmalloc_start_pgt)
25082+ .fill 512,8,0
25083+
25084+NEXT_PAGE(level3_vmalloc_end_pgt)
25085+ .fill 512,8,0
25086+
25087+NEXT_PAGE(level3_vmemmap_pgt)
25088+ .fill L3_VMEMMAP_START,8,0
25089+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25090+
25091 NEXT_PAGE(level2_ident_pgt)
25092- /* Since I easily can, map the first 1G.
25093+ /* Since I easily can, map the first 2G.
25094 * Don't set NX because code runs from these pages.
25095 */
25096- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25097-#endif
25098+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25099
25100 NEXT_PAGE(level3_kernel_pgt)
25101 .fill L3_START_KERNEL,8,0
25102@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25103 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25104 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25105
25106+NEXT_PAGE(level2_vmemmap_pgt)
25107+ .fill 512,8,0
25108+
25109 NEXT_PAGE(level2_kernel_pgt)
25110 /*
25111 * 512 MB kernel mapping. We spend a full page on this pagetable
25112@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25113 NEXT_PAGE(level2_fixmap_pgt)
25114 .fill 506,8,0
25115 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25116- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25117- .fill 5,8,0
25118+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25119+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25120+ .fill 4,8,0
25121
25122 NEXT_PAGE(level1_fixmap_pgt)
25123 .fill 512,8,0
25124
25125+NEXT_PAGE(level1_vsyscall_pgt)
25126+ .fill 512,8,0
25127+
25128 #undef PMDS
25129
25130- .data
25131+ .align PAGE_SIZE
25132+ENTRY(cpu_gdt_table)
25133+ .rept NR_CPUS
25134+ .quad 0x0000000000000000 /* NULL descriptor */
25135+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25136+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25137+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25138+ .quad 0x00cffb000000ffff /* __USER32_CS */
25139+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25140+ .quad 0x00affb000000ffff /* __USER_CS */
25141+
25142+#ifdef CONFIG_PAX_KERNEXEC
25143+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25144+#else
25145+ .quad 0x0 /* unused */
25146+#endif
25147+
25148+ .quad 0,0 /* TSS */
25149+ .quad 0,0 /* LDT */
25150+ .quad 0,0,0 /* three TLS descriptors */
25151+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25152+ /* asm/segment.h:GDT_ENTRIES must match this */
25153+
25154+#ifdef CONFIG_PAX_MEMORY_UDEREF
25155+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25156+#else
25157+ .quad 0x0 /* unused */
25158+#endif
25159+
25160+ /* zero the remaining page */
25161+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25162+ .endr
25163+
25164 .align 16
25165 .globl early_gdt_descr
25166 early_gdt_descr:
25167 .word GDT_ENTRIES*8-1
25168 early_gdt_descr_base:
25169- .quad INIT_PER_CPU_VAR(gdt_page)
25170+ .quad cpu_gdt_table
25171
25172 ENTRY(phys_base)
25173 /* This must match the first entry in level2_kernel_pgt */
25174 .quad 0x0000000000000000
25175
25176 #include "../../x86/xen/xen-head.S"
25177-
25178- __PAGE_ALIGNED_BSS
25179+
25180+ .section .rodata,"a",@progbits
25181 NEXT_PAGE(empty_zero_page)
25182 .skip PAGE_SIZE
25183diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25184index 05fd74f..c3548b1 100644
25185--- a/arch/x86/kernel/i386_ksyms_32.c
25186+++ b/arch/x86/kernel/i386_ksyms_32.c
25187@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25188 EXPORT_SYMBOL(cmpxchg8b_emu);
25189 #endif
25190
25191+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25192+
25193 /* Networking helper routines. */
25194 EXPORT_SYMBOL(csum_partial_copy_generic);
25195+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25196+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25197
25198 EXPORT_SYMBOL(__get_user_1);
25199 EXPORT_SYMBOL(__get_user_2);
25200@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25201 EXPORT_SYMBOL(___preempt_schedule_context);
25202 #endif
25203 #endif
25204+
25205+#ifdef CONFIG_PAX_KERNEXEC
25206+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25207+#endif
25208+
25209+#ifdef CONFIG_PAX_PER_CPU_PGD
25210+EXPORT_SYMBOL(cpu_pgd);
25211+#endif
25212diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25213index a9a4229..6f4d476 100644
25214--- a/arch/x86/kernel/i387.c
25215+++ b/arch/x86/kernel/i387.c
25216@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25217 static inline bool interrupted_user_mode(void)
25218 {
25219 struct pt_regs *regs = get_irq_regs();
25220- return regs && user_mode_vm(regs);
25221+ return regs && user_mode(regs);
25222 }
25223
25224 /*
25225diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25226index e7cc537..67d7372 100644
25227--- a/arch/x86/kernel/i8259.c
25228+++ b/arch/x86/kernel/i8259.c
25229@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25230 static void make_8259A_irq(unsigned int irq)
25231 {
25232 disable_irq_nosync(irq);
25233- io_apic_irqs &= ~(1<<irq);
25234+ io_apic_irqs &= ~(1UL<<irq);
25235 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25236 enable_irq(irq);
25237 }
25238@@ -208,7 +208,7 @@ spurious_8259A_irq:
25239 "spurious 8259A interrupt: IRQ%d.\n", irq);
25240 spurious_irq_mask |= irqmask;
25241 }
25242- atomic_inc(&irq_err_count);
25243+ atomic_inc_unchecked(&irq_err_count);
25244 /*
25245 * Theoretically we do not have to handle this IRQ,
25246 * but in Linux this does not cause problems and is
25247@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25248 /* (slave's support for AEOI in flat mode is to be investigated) */
25249 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25250
25251+ pax_open_kernel();
25252 if (auto_eoi)
25253 /*
25254 * In AEOI mode we just have to mask the interrupt
25255 * when acking.
25256 */
25257- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25258+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25259 else
25260- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25261+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25262+ pax_close_kernel();
25263
25264 udelay(100); /* wait for 8259A to initialize */
25265
25266diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25267index a979b5b..1d6db75 100644
25268--- a/arch/x86/kernel/io_delay.c
25269+++ b/arch/x86/kernel/io_delay.c
25270@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25271 * Quirk table for systems that misbehave (lock up, etc.) if port
25272 * 0x80 is used:
25273 */
25274-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25275+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25276 {
25277 .callback = dmi_io_delay_0xed_port,
25278 .ident = "Compaq Presario V6000",
25279diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25280index 4ddaf66..49d5c18 100644
25281--- a/arch/x86/kernel/ioport.c
25282+++ b/arch/x86/kernel/ioport.c
25283@@ -6,6 +6,7 @@
25284 #include <linux/sched.h>
25285 #include <linux/kernel.h>
25286 #include <linux/capability.h>
25287+#include <linux/security.h>
25288 #include <linux/errno.h>
25289 #include <linux/types.h>
25290 #include <linux/ioport.h>
25291@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25292 return -EINVAL;
25293 if (turn_on && !capable(CAP_SYS_RAWIO))
25294 return -EPERM;
25295+#ifdef CONFIG_GRKERNSEC_IO
25296+ if (turn_on && grsec_disable_privio) {
25297+ gr_handle_ioperm();
25298+ return -ENODEV;
25299+ }
25300+#endif
25301
25302 /*
25303 * If it's the first ioperm() call in this thread's lifetime, set the
25304@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25305 * because the ->io_bitmap_max value must match the bitmap
25306 * contents:
25307 */
25308- tss = &per_cpu(init_tss, get_cpu());
25309+ tss = init_tss + get_cpu();
25310
25311 if (turn_on)
25312 bitmap_clear(t->io_bitmap_ptr, from, num);
25313@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25314 if (level > old) {
25315 if (!capable(CAP_SYS_RAWIO))
25316 return -EPERM;
25317+#ifdef CONFIG_GRKERNSEC_IO
25318+ if (grsec_disable_privio) {
25319+ gr_handle_iopl();
25320+ return -ENODEV;
25321+ }
25322+#endif
25323 }
25324 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25325 t->iopl = level << 12;
25326diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25327index 705ef8d..8672c9d 100644
25328--- a/arch/x86/kernel/irq.c
25329+++ b/arch/x86/kernel/irq.c
25330@@ -22,7 +22,7 @@
25331 #define CREATE_TRACE_POINTS
25332 #include <asm/trace/irq_vectors.h>
25333
25334-atomic_t irq_err_count;
25335+atomic_unchecked_t irq_err_count;
25336
25337 /* Function pointer for generic interrupt vector handling */
25338 void (*x86_platform_ipi_callback)(void) = NULL;
25339@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25340 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25341 seq_puts(p, " Hypervisor callback interrupts\n");
25342 #endif
25343- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25344+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25345 #if defined(CONFIG_X86_IO_APIC)
25346- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25347+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25348 #endif
25349 return 0;
25350 }
25351@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25352
25353 u64 arch_irq_stat(void)
25354 {
25355- u64 sum = atomic_read(&irq_err_count);
25356+ u64 sum = atomic_read_unchecked(&irq_err_count);
25357 return sum;
25358 }
25359
25360diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25361index 63ce838..2ea3e06 100644
25362--- a/arch/x86/kernel/irq_32.c
25363+++ b/arch/x86/kernel/irq_32.c
25364@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25365
25366 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25367
25368+extern void gr_handle_kernel_exploit(void);
25369+
25370 int sysctl_panic_on_stackoverflow __read_mostly;
25371
25372 /* Debugging check for stack overflow: is there less than 1KB free? */
25373@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25374 __asm__ __volatile__("andl %%esp,%0" :
25375 "=r" (sp) : "0" (THREAD_SIZE - 1));
25376
25377- return sp < (sizeof(struct thread_info) + STACK_WARN);
25378+ return sp < STACK_WARN;
25379 }
25380
25381 static void print_stack_overflow(void)
25382 {
25383 printk(KERN_WARNING "low stack detected by irq handler\n");
25384 dump_stack();
25385+ gr_handle_kernel_exploit();
25386 if (sysctl_panic_on_stackoverflow)
25387 panic("low stack detected by irq handler - check messages\n");
25388 }
25389@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25390 static inline int
25391 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25392 {
25393- struct irq_stack *curstk, *irqstk;
25394+ struct irq_stack *irqstk;
25395 u32 *isp, *prev_esp, arg1, arg2;
25396
25397- curstk = (struct irq_stack *) current_stack();
25398 irqstk = __this_cpu_read(hardirq_stack);
25399
25400 /*
25401@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25402 * handler) we can't do that and just have to keep using the
25403 * current stack (which is the irq stack already after all)
25404 */
25405- if (unlikely(curstk == irqstk))
25406+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25407 return 0;
25408
25409- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25410+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25411
25412 /* Save the next esp at the bottom of the stack */
25413 prev_esp = (u32 *)irqstk;
25414 *prev_esp = current_stack_pointer;
25415
25416+#ifdef CONFIG_PAX_MEMORY_UDEREF
25417+ __set_fs(MAKE_MM_SEG(0));
25418+#endif
25419+
25420 if (unlikely(overflow))
25421 call_on_stack(print_stack_overflow, isp);
25422
25423@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25424 : "0" (irq), "1" (desc), "2" (isp),
25425 "D" (desc->handle_irq)
25426 : "memory", "cc", "ecx");
25427+
25428+#ifdef CONFIG_PAX_MEMORY_UDEREF
25429+ __set_fs(current_thread_info()->addr_limit);
25430+#endif
25431+
25432 return 1;
25433 }
25434
25435@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25436 */
25437 void irq_ctx_init(int cpu)
25438 {
25439- struct irq_stack *irqstk;
25440-
25441 if (per_cpu(hardirq_stack, cpu))
25442 return;
25443
25444- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25445- THREADINFO_GFP,
25446- THREAD_SIZE_ORDER));
25447- per_cpu(hardirq_stack, cpu) = irqstk;
25448-
25449- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25450- THREADINFO_GFP,
25451- THREAD_SIZE_ORDER));
25452- per_cpu(softirq_stack, cpu) = irqstk;
25453-
25454- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25455- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25456+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25457+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25458 }
25459
25460 void do_softirq_own_stack(void)
25461 {
25462- struct thread_info *curstk;
25463 struct irq_stack *irqstk;
25464 u32 *isp, *prev_esp;
25465
25466- curstk = current_stack();
25467 irqstk = __this_cpu_read(softirq_stack);
25468
25469 /* build the stack frame on the softirq stack */
25470@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25471 prev_esp = (u32 *)irqstk;
25472 *prev_esp = current_stack_pointer;
25473
25474+#ifdef CONFIG_PAX_MEMORY_UDEREF
25475+ __set_fs(MAKE_MM_SEG(0));
25476+#endif
25477+
25478 call_on_stack(__do_softirq, isp);
25479+
25480+#ifdef CONFIG_PAX_MEMORY_UDEREF
25481+ __set_fs(current_thread_info()->addr_limit);
25482+#endif
25483+
25484 }
25485
25486 bool handle_irq(unsigned irq, struct pt_regs *regs)
25487@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25488 if (unlikely(!desc))
25489 return false;
25490
25491- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25492+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25493 if (unlikely(overflow))
25494 print_stack_overflow();
25495 desc->handle_irq(irq, desc);
25496diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25497index e4b503d..824fce8 100644
25498--- a/arch/x86/kernel/irq_64.c
25499+++ b/arch/x86/kernel/irq_64.c
25500@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25501 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25502 EXPORT_PER_CPU_SYMBOL(irq_regs);
25503
25504+extern void gr_handle_kernel_exploit(void);
25505+
25506 int sysctl_panic_on_stackoverflow;
25507
25508 /*
25509@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25510 u64 estack_top, estack_bottom;
25511 u64 curbase = (u64)task_stack_page(current);
25512
25513- if (user_mode_vm(regs))
25514+ if (user_mode(regs))
25515 return;
25516
25517 if (regs->sp >= curbase + sizeof(struct thread_info) +
25518@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25519 irq_stack_top, irq_stack_bottom,
25520 estack_top, estack_bottom);
25521
25522+ gr_handle_kernel_exploit();
25523+
25524 if (sysctl_panic_on_stackoverflow)
25525 panic("low stack detected by irq handler - check messages\n");
25526 #endif
25527diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25528index 26d5a55..a01160a 100644
25529--- a/arch/x86/kernel/jump_label.c
25530+++ b/arch/x86/kernel/jump_label.c
25531@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25532 * Jump label is enabled for the first time.
25533 * So we expect a default_nop...
25534 */
25535- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25536+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25537 != 0))
25538 bug_at((void *)entry->code, __LINE__);
25539 } else {
25540@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25541 * ...otherwise expect an ideal_nop. Otherwise
25542 * something went horribly wrong.
25543 */
25544- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25545+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25546 != 0))
25547 bug_at((void *)entry->code, __LINE__);
25548 }
25549@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25550 * are converting the default nop to the ideal nop.
25551 */
25552 if (init) {
25553- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25554+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25555 bug_at((void *)entry->code, __LINE__);
25556 } else {
25557 code.jump = 0xe9;
25558 code.offset = entry->target -
25559 (entry->code + JUMP_LABEL_NOP_SIZE);
25560- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25561+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25562 bug_at((void *)entry->code, __LINE__);
25563 }
25564 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25565diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25566index 7ec1d5f..5a7d130 100644
25567--- a/arch/x86/kernel/kgdb.c
25568+++ b/arch/x86/kernel/kgdb.c
25569@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25570 #ifdef CONFIG_X86_32
25571 switch (regno) {
25572 case GDB_SS:
25573- if (!user_mode_vm(regs))
25574+ if (!user_mode(regs))
25575 *(unsigned long *)mem = __KERNEL_DS;
25576 break;
25577 case GDB_SP:
25578- if (!user_mode_vm(regs))
25579+ if (!user_mode(regs))
25580 *(unsigned long *)mem = kernel_stack_pointer(regs);
25581 break;
25582 case GDB_GS:
25583@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25584 bp->attr.bp_addr = breakinfo[breakno].addr;
25585 bp->attr.bp_len = breakinfo[breakno].len;
25586 bp->attr.bp_type = breakinfo[breakno].type;
25587- info->address = breakinfo[breakno].addr;
25588+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25589+ info->address = ktla_ktva(breakinfo[breakno].addr);
25590+ else
25591+ info->address = breakinfo[breakno].addr;
25592 info->len = breakinfo[breakno].len;
25593 info->type = breakinfo[breakno].type;
25594 val = arch_install_hw_breakpoint(bp);
25595@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25596 case 'k':
25597 /* clear the trace bit */
25598 linux_regs->flags &= ~X86_EFLAGS_TF;
25599- atomic_set(&kgdb_cpu_doing_single_step, -1);
25600+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25601
25602 /* set the trace bit if we're stepping */
25603 if (remcomInBuffer[0] == 's') {
25604 linux_regs->flags |= X86_EFLAGS_TF;
25605- atomic_set(&kgdb_cpu_doing_single_step,
25606+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25607 raw_smp_processor_id());
25608 }
25609
25610@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25611
25612 switch (cmd) {
25613 case DIE_DEBUG:
25614- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25616 if (user_mode(regs))
25617 return single_step_cont(regs, args);
25618 break;
25619@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25620 #endif /* CONFIG_DEBUG_RODATA */
25621
25622 bpt->type = BP_BREAKPOINT;
25623- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25624+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25625 BREAK_INSTR_SIZE);
25626 if (err)
25627 return err;
25628- err = probe_kernel_write((char *)bpt->bpt_addr,
25629+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25630 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25631 #ifdef CONFIG_DEBUG_RODATA
25632 if (!err)
25633@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25634 return -EBUSY;
25635 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25636 BREAK_INSTR_SIZE);
25637- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25638+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25639 if (err)
25640 return err;
25641 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25642@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25643 if (mutex_is_locked(&text_mutex))
25644 goto knl_write;
25645 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25646- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25647+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25648 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25649 goto knl_write;
25650 return err;
25651 knl_write:
25652 #endif /* CONFIG_DEBUG_RODATA */
25653- return probe_kernel_write((char *)bpt->bpt_addr,
25654+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25655 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25656 }
25657
25658diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25659index 98f654d..ac04352 100644
25660--- a/arch/x86/kernel/kprobes/core.c
25661+++ b/arch/x86/kernel/kprobes/core.c
25662@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25663 s32 raddr;
25664 } __packed *insn;
25665
25666- insn = (struct __arch_relative_insn *)from;
25667+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25668+
25669+ pax_open_kernel();
25670 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25671 insn->op = op;
25672+ pax_close_kernel();
25673 }
25674
25675 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25676@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25677 kprobe_opcode_t opcode;
25678 kprobe_opcode_t *orig_opcodes = opcodes;
25679
25680- if (search_exception_tables((unsigned long)opcodes))
25681+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25682 return 0; /* Page fault may occur on this address. */
25683
25684 retry:
25685@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25686 * for the first byte, we can recover the original instruction
25687 * from it and kp->opcode.
25688 */
25689- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25690+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25691 buf[0] = kp->opcode;
25692- return (unsigned long)buf;
25693+ return ktva_ktla((unsigned long)buf);
25694 }
25695
25696 /*
25697@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25698 /* Another subsystem puts a breakpoint, failed to recover */
25699 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25700 return 0;
25701+ pax_open_kernel();
25702 memcpy(dest, insn.kaddr, insn.length);
25703+ pax_close_kernel();
25704
25705 #ifdef CONFIG_X86_64
25706 if (insn_rip_relative(&insn)) {
25707@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25708 return 0;
25709 }
25710 disp = (u8 *) dest + insn_offset_displacement(&insn);
25711+ pax_open_kernel();
25712 *(s32 *) disp = (s32) newdisp;
25713+ pax_close_kernel();
25714 }
25715 #endif
25716 return insn.length;
25717@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25718 * nor set current_kprobe, because it doesn't use single
25719 * stepping.
25720 */
25721- regs->ip = (unsigned long)p->ainsn.insn;
25722+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25723 preempt_enable_no_resched();
25724 return;
25725 }
25726@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25727 regs->flags &= ~X86_EFLAGS_IF;
25728 /* single step inline if the instruction is an int3 */
25729 if (p->opcode == BREAKPOINT_INSTRUCTION)
25730- regs->ip = (unsigned long)p->addr;
25731+ regs->ip = ktla_ktva((unsigned long)p->addr);
25732 else
25733- regs->ip = (unsigned long)p->ainsn.insn;
25734+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25735 }
25736 NOKPROBE_SYMBOL(setup_singlestep);
25737
25738@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25739 struct kprobe *p;
25740 struct kprobe_ctlblk *kcb;
25741
25742- if (user_mode_vm(regs))
25743+ if (user_mode(regs))
25744 return 0;
25745
25746 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25747@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25748 setup_singlestep(p, regs, kcb, 0);
25749 return 1;
25750 }
25751- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25752+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25753 /*
25754 * The breakpoint instruction was removed right
25755 * after we hit it. Another cpu has removed
25756@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25757 " movq %rax, 152(%rsp)\n"
25758 RESTORE_REGS_STRING
25759 " popfq\n"
25760+#ifdef KERNEXEC_PLUGIN
25761+ " btsq $63,(%rsp)\n"
25762+#endif
25763 #else
25764 " pushf\n"
25765 SAVE_REGS_STRING
25766@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25767 struct kprobe_ctlblk *kcb)
25768 {
25769 unsigned long *tos = stack_addr(regs);
25770- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25771+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25772 unsigned long orig_ip = (unsigned long)p->addr;
25773 kprobe_opcode_t *insn = p->ainsn.insn;
25774
25775@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25776 struct die_args *args = data;
25777 int ret = NOTIFY_DONE;
25778
25779- if (args->regs && user_mode_vm(args->regs))
25780+ if (args->regs && user_mode(args->regs))
25781 return ret;
25782
25783 if (val == DIE_GPF) {
25784diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25785index 7c523bb..01b051b 100644
25786--- a/arch/x86/kernel/kprobes/opt.c
25787+++ b/arch/x86/kernel/kprobes/opt.c
25788@@ -79,6 +79,7 @@ found:
25789 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25790 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25791 {
25792+ pax_open_kernel();
25793 #ifdef CONFIG_X86_64
25794 *addr++ = 0x48;
25795 *addr++ = 0xbf;
25796@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25797 *addr++ = 0xb8;
25798 #endif
25799 *(unsigned long *)addr = val;
25800+ pax_close_kernel();
25801 }
25802
25803 asm (
25804@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25805 * Verify if the address gap is in 2GB range, because this uses
25806 * a relative jump.
25807 */
25808- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25809+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25810 if (abs(rel) > 0x7fffffff) {
25811 __arch_remove_optimized_kprobe(op, 0);
25812 return -ERANGE;
25813@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25814 op->optinsn.size = ret;
25815
25816 /* Copy arch-dep-instance from template */
25817- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25818+ pax_open_kernel();
25819+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25820+ pax_close_kernel();
25821
25822 /* Set probe information */
25823 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25824
25825 /* Set probe function call */
25826- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25827+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25828
25829 /* Set returning jmp instruction at the tail of out-of-line buffer */
25830- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25831+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25832 (u8 *)op->kp.addr + op->optinsn.size);
25833
25834 flush_icache_range((unsigned long) buf,
25835@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25836 WARN_ON(kprobe_disabled(&op->kp));
25837
25838 /* Backup instructions which will be replaced by jump address */
25839- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25840+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25841 RELATIVE_ADDR_SIZE);
25842
25843 insn_buf[0] = RELATIVEJUMP_OPCODE;
25844@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25845 /* This kprobe is really able to run optimized path. */
25846 op = container_of(p, struct optimized_kprobe, kp);
25847 /* Detour through copied instructions */
25848- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25849+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25850 if (!reenter)
25851 reset_current_kprobe();
25852 preempt_enable_no_resched();
25853diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25854index c2bedae..25e7ab60 100644
25855--- a/arch/x86/kernel/ksysfs.c
25856+++ b/arch/x86/kernel/ksysfs.c
25857@@ -184,7 +184,7 @@ out:
25858
25859 static struct kobj_attribute type_attr = __ATTR_RO(type);
25860
25861-static struct bin_attribute data_attr = {
25862+static bin_attribute_no_const data_attr __read_only = {
25863 .attr = {
25864 .name = "data",
25865 .mode = S_IRUGO,
25866diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25867index c37886d..d851d32 100644
25868--- a/arch/x86/kernel/ldt.c
25869+++ b/arch/x86/kernel/ldt.c
25870@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25871 if (reload) {
25872 #ifdef CONFIG_SMP
25873 preempt_disable();
25874- load_LDT(pc);
25875+ load_LDT_nolock(pc);
25876 if (!cpumask_equal(mm_cpumask(current->mm),
25877 cpumask_of(smp_processor_id())))
25878 smp_call_function(flush_ldt, current->mm, 1);
25879 preempt_enable();
25880 #else
25881- load_LDT(pc);
25882+ load_LDT_nolock(pc);
25883 #endif
25884 }
25885 if (oldsize) {
25886@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25887 return err;
25888
25889 for (i = 0; i < old->size; i++)
25890- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25891+ write_ldt_entry(new->ldt, i, old->ldt + i);
25892 return 0;
25893 }
25894
25895@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25896 retval = copy_ldt(&mm->context, &old_mm->context);
25897 mutex_unlock(&old_mm->context.lock);
25898 }
25899+
25900+ if (tsk == current) {
25901+ mm->context.vdso = 0;
25902+
25903+#ifdef CONFIG_X86_32
25904+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25905+ mm->context.user_cs_base = 0UL;
25906+ mm->context.user_cs_limit = ~0UL;
25907+
25908+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25909+ cpus_clear(mm->context.cpu_user_cs_mask);
25910+#endif
25911+
25912+#endif
25913+#endif
25914+
25915+ }
25916+
25917 return retval;
25918 }
25919
25920@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25921 }
25922 }
25923
25924+#ifdef CONFIG_PAX_SEGMEXEC
25925+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25926+ error = -EINVAL;
25927+ goto out_unlock;
25928+ }
25929+#endif
25930+
25931 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25932 error = -EINVAL;
25933 goto out_unlock;
25934diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25935index 469b23d..5449cfe 100644
25936--- a/arch/x86/kernel/machine_kexec_32.c
25937+++ b/arch/x86/kernel/machine_kexec_32.c
25938@@ -26,7 +26,7 @@
25939 #include <asm/cacheflush.h>
25940 #include <asm/debugreg.h>
25941
25942-static void set_idt(void *newidt, __u16 limit)
25943+static void set_idt(struct desc_struct *newidt, __u16 limit)
25944 {
25945 struct desc_ptr curidt;
25946
25947@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25948 }
25949
25950
25951-static void set_gdt(void *newgdt, __u16 limit)
25952+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25953 {
25954 struct desc_ptr curgdt;
25955
25956@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25957 }
25958
25959 control_page = page_address(image->control_code_page);
25960- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25961+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25962
25963 relocate_kernel_ptr = control_page;
25964 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25965diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25966index 94ea120..4154cea 100644
25967--- a/arch/x86/kernel/mcount_64.S
25968+++ b/arch/x86/kernel/mcount_64.S
25969@@ -7,7 +7,7 @@
25970 #include <linux/linkage.h>
25971 #include <asm/ptrace.h>
25972 #include <asm/ftrace.h>
25973-
25974+#include <asm/alternative-asm.h>
25975
25976 .code64
25977 .section .entry.text, "ax"
25978@@ -148,8 +148,9 @@
25979 #ifdef CONFIG_DYNAMIC_FTRACE
25980
25981 ENTRY(function_hook)
25982+ pax_force_retaddr
25983 retq
25984-END(function_hook)
25985+ENDPROC(function_hook)
25986
25987 ENTRY(ftrace_caller)
25988 /* save_mcount_regs fills in first two parameters */
25989@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25990 #endif
25991
25992 GLOBAL(ftrace_stub)
25993+ pax_force_retaddr
25994 retq
25995-END(ftrace_caller)
25996+ENDPROC(ftrace_caller)
25997
25998 ENTRY(ftrace_regs_caller)
25999 /* Save the current flags before any operations that can change them */
26000@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
26001
26002 jmp ftrace_return
26003
26004-END(ftrace_regs_caller)
26005+ENDPROC(ftrace_regs_caller)
26006
26007
26008 #else /* ! CONFIG_DYNAMIC_FTRACE */
26009@@ -272,18 +274,20 @@ fgraph_trace:
26010 #endif
26011
26012 GLOBAL(ftrace_stub)
26013+ pax_force_retaddr
26014 retq
26015
26016 trace:
26017 /* save_mcount_regs fills in first two parameters */
26018 save_mcount_regs
26019
26020+ pax_force_fptr ftrace_trace_function
26021 call *ftrace_trace_function
26022
26023 restore_mcount_regs
26024
26025 jmp fgraph_trace
26026-END(function_hook)
26027+ENDPROC(function_hook)
26028 #endif /* CONFIG_DYNAMIC_FTRACE */
26029 #endif /* CONFIG_FUNCTION_TRACER */
26030
26031@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
26032
26033 restore_mcount_regs
26034
26035+ pax_force_retaddr
26036 retq
26037-END(ftrace_graph_caller)
26038+ENDPROC(ftrace_graph_caller)
26039
26040 GLOBAL(return_to_handler)
26041 subq $24, %rsp
26042@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
26043 movq 8(%rsp), %rdx
26044 movq (%rsp), %rax
26045 addq $24, %rsp
26046+ pax_force_fptr %rdi
26047 jmp *%rdi
26048+ENDPROC(return_to_handler)
26049 #endif
26050diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26051index e69f988..72902b7 100644
26052--- a/arch/x86/kernel/module.c
26053+++ b/arch/x86/kernel/module.c
26054@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26055 }
26056 #endif
26057
26058-void *module_alloc(unsigned long size)
26059+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26060 {
26061- if (PAGE_ALIGN(size) > MODULES_LEN)
26062+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26063 return NULL;
26064 return __vmalloc_node_range(size, 1,
26065 MODULES_VADDR + get_module_load_offset(),
26066- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26067- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26068+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26069+ prot, NUMA_NO_NODE,
26070 __builtin_return_address(0));
26071 }
26072
26073+void *module_alloc(unsigned long size)
26074+{
26075+
26076+#ifdef CONFIG_PAX_KERNEXEC
26077+ return __module_alloc(size, PAGE_KERNEL);
26078+#else
26079+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26080+#endif
26081+
26082+}
26083+
26084+#ifdef CONFIG_PAX_KERNEXEC
26085+#ifdef CONFIG_X86_32
26086+void *module_alloc_exec(unsigned long size)
26087+{
26088+ struct vm_struct *area;
26089+
26090+ if (size == 0)
26091+ return NULL;
26092+
26093+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26094+return area ? area->addr : NULL;
26095+}
26096+EXPORT_SYMBOL(module_alloc_exec);
26097+
26098+void module_memfree_exec(void *module_region)
26099+{
26100+ vunmap(module_region);
26101+}
26102+EXPORT_SYMBOL(module_memfree_exec);
26103+#else
26104+void module_memfree_exec(void *module_region)
26105+{
26106+ module_memfree(module_region);
26107+}
26108+EXPORT_SYMBOL(module_memfree_exec);
26109+
26110+void *module_alloc_exec(unsigned long size)
26111+{
26112+ return __module_alloc(size, PAGE_KERNEL_RX);
26113+}
26114+EXPORT_SYMBOL(module_alloc_exec);
26115+#endif
26116+#endif
26117+
26118 #ifdef CONFIG_X86_32
26119 int apply_relocate(Elf32_Shdr *sechdrs,
26120 const char *strtab,
26121@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26122 unsigned int i;
26123 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26124 Elf32_Sym *sym;
26125- uint32_t *location;
26126+ uint32_t *plocation, location;
26127
26128 DEBUGP("Applying relocate section %u to %u\n",
26129 relsec, sechdrs[relsec].sh_info);
26130 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26131 /* This is where to make the change */
26132- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26133- + rel[i].r_offset;
26134+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26135+ location = (uint32_t)plocation;
26136+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26137+ plocation = ktla_ktva((void *)plocation);
26138 /* This is the symbol it is referring to. Note that all
26139 undefined symbols have been resolved. */
26140 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26141@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26142 switch (ELF32_R_TYPE(rel[i].r_info)) {
26143 case R_386_32:
26144 /* We add the value into the location given */
26145- *location += sym->st_value;
26146+ pax_open_kernel();
26147+ *plocation += sym->st_value;
26148+ pax_close_kernel();
26149 break;
26150 case R_386_PC32:
26151 /* Add the value, subtract its position */
26152- *location += sym->st_value - (uint32_t)location;
26153+ pax_open_kernel();
26154+ *plocation += sym->st_value - location;
26155+ pax_close_kernel();
26156 break;
26157 default:
26158 pr_err("%s: Unknown relocation: %u\n",
26159@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26160 case R_X86_64_NONE:
26161 break;
26162 case R_X86_64_64:
26163+ pax_open_kernel();
26164 *(u64 *)loc = val;
26165+ pax_close_kernel();
26166 break;
26167 case R_X86_64_32:
26168+ pax_open_kernel();
26169 *(u32 *)loc = val;
26170+ pax_close_kernel();
26171 if (val != *(u32 *)loc)
26172 goto overflow;
26173 break;
26174 case R_X86_64_32S:
26175+ pax_open_kernel();
26176 *(s32 *)loc = val;
26177+ pax_close_kernel();
26178 if ((s64)val != *(s32 *)loc)
26179 goto overflow;
26180 break;
26181 case R_X86_64_PC32:
26182 val -= (u64)loc;
26183+ pax_open_kernel();
26184 *(u32 *)loc = val;
26185+ pax_close_kernel();
26186+
26187 #if 0
26188 if ((s64)val != *(s32 *)loc)
26189 goto overflow;
26190diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26191index 113e707..0a690e1 100644
26192--- a/arch/x86/kernel/msr.c
26193+++ b/arch/x86/kernel/msr.c
26194@@ -39,6 +39,7 @@
26195 #include <linux/notifier.h>
26196 #include <linux/uaccess.h>
26197 #include <linux/gfp.h>
26198+#include <linux/grsecurity.h>
26199
26200 #include <asm/processor.h>
26201 #include <asm/msr.h>
26202@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26203 int err = 0;
26204 ssize_t bytes = 0;
26205
26206+#ifdef CONFIG_GRKERNSEC_KMEM
26207+ gr_handle_msr_write();
26208+ return -EPERM;
26209+#endif
26210+
26211 if (count % 8)
26212 return -EINVAL; /* Invalid chunk size */
26213
26214@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26215 err = -EBADF;
26216 break;
26217 }
26218+#ifdef CONFIG_GRKERNSEC_KMEM
26219+ gr_handle_msr_write();
26220+ return -EPERM;
26221+#endif
26222 if (copy_from_user(&regs, uregs, sizeof regs)) {
26223 err = -EFAULT;
26224 break;
26225@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26226 return notifier_from_errno(err);
26227 }
26228
26229-static struct notifier_block __refdata msr_class_cpu_notifier = {
26230+static struct notifier_block msr_class_cpu_notifier = {
26231 .notifier_call = msr_class_cpu_callback,
26232 };
26233
26234diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26235index c3e985d..110a36a 100644
26236--- a/arch/x86/kernel/nmi.c
26237+++ b/arch/x86/kernel/nmi.c
26238@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26239
26240 static void nmi_max_handler(struct irq_work *w)
26241 {
26242- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26243+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26244 int remainder_ns, decimal_msecs;
26245- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26246+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26247
26248 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26249 decimal_msecs = remainder_ns / 1000;
26250
26251 printk_ratelimited(KERN_INFO
26252 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26253- a->handler, whole_msecs, decimal_msecs);
26254+ n->action->handler, whole_msecs, decimal_msecs);
26255 }
26256
26257 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26258@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26259 delta = sched_clock() - delta;
26260 trace_nmi_handler(a->handler, (int)delta, thishandled);
26261
26262- if (delta < nmi_longest_ns || delta < a->max_duration)
26263+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26264 continue;
26265
26266- a->max_duration = delta;
26267- irq_work_queue(&a->irq_work);
26268+ a->work->max_duration = delta;
26269+ irq_work_queue(&a->work->irq_work);
26270 }
26271
26272 rcu_read_unlock();
26273@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26274 }
26275 NOKPROBE_SYMBOL(nmi_handle);
26276
26277-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26278+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26279 {
26280 struct nmi_desc *desc = nmi_to_desc(type);
26281 unsigned long flags;
26282@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26283 if (!action->handler)
26284 return -EINVAL;
26285
26286- init_irq_work(&action->irq_work, nmi_max_handler);
26287+ action->work->action = action;
26288+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26289
26290 spin_lock_irqsave(&desc->lock, flags);
26291
26292@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26293 * event confuses some handlers (kdump uses this flag)
26294 */
26295 if (action->flags & NMI_FLAG_FIRST)
26296- list_add_rcu(&action->list, &desc->head);
26297+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26298 else
26299- list_add_tail_rcu(&action->list, &desc->head);
26300+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26301
26302 spin_unlock_irqrestore(&desc->lock, flags);
26303 return 0;
26304@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26305 if (!strcmp(n->name, name)) {
26306 WARN(in_nmi(),
26307 "Trying to free NMI (%s) from NMI context!\n", n->name);
26308- list_del_rcu(&n->list);
26309+ pax_list_del_rcu((struct list_head *)&n->list);
26310 break;
26311 }
26312 }
26313@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26314 dotraplinkage notrace void
26315 do_nmi(struct pt_regs *regs, long error_code)
26316 {
26317+
26318+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26319+ if (!user_mode(regs)) {
26320+ unsigned long cs = regs->cs & 0xFFFF;
26321+ unsigned long ip = ktva_ktla(regs->ip);
26322+
26323+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26324+ regs->ip = ip;
26325+ }
26326+#endif
26327+
26328 nmi_nesting_preprocess(regs);
26329
26330 nmi_enter();
26331diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26332index 6d9582e..f746287 100644
26333--- a/arch/x86/kernel/nmi_selftest.c
26334+++ b/arch/x86/kernel/nmi_selftest.c
26335@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26336 {
26337 /* trap all the unknown NMIs we may generate */
26338 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26339- __initdata);
26340+ __initconst);
26341 }
26342
26343 static void __init cleanup_nmi_testsuite(void)
26344@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26345 unsigned long timeout;
26346
26347 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26348- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26349+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26350 nmi_fail = FAILURE;
26351 return;
26352 }
26353diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26354index bbb6c73..24a58ef 100644
26355--- a/arch/x86/kernel/paravirt-spinlocks.c
26356+++ b/arch/x86/kernel/paravirt-spinlocks.c
26357@@ -8,7 +8,7 @@
26358
26359 #include <asm/paravirt.h>
26360
26361-struct pv_lock_ops pv_lock_ops = {
26362+struct pv_lock_ops pv_lock_ops __read_only = {
26363 #ifdef CONFIG_SMP
26364 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26365 .unlock_kick = paravirt_nop,
26366diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26367index 548d25f..f8fb99c 100644
26368--- a/arch/x86/kernel/paravirt.c
26369+++ b/arch/x86/kernel/paravirt.c
26370@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26371 {
26372 return x;
26373 }
26374+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26375+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26376+#endif
26377
26378 void __init default_banner(void)
26379 {
26380@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26381
26382 if (opfunc == NULL)
26383 /* If there's no function, patch it with a ud2a (BUG) */
26384- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26385- else if (opfunc == _paravirt_nop)
26386+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26387+ else if (opfunc == (void *)_paravirt_nop)
26388 /* If the operation is a nop, then nop the callsite */
26389 ret = paravirt_patch_nop();
26390
26391 /* identity functions just return their single argument */
26392- else if (opfunc == _paravirt_ident_32)
26393+ else if (opfunc == (void *)_paravirt_ident_32)
26394 ret = paravirt_patch_ident_32(insnbuf, len);
26395- else if (opfunc == _paravirt_ident_64)
26396+ else if (opfunc == (void *)_paravirt_ident_64)
26397 ret = paravirt_patch_ident_64(insnbuf, len);
26398+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26399+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26400+ ret = paravirt_patch_ident_64(insnbuf, len);
26401+#endif
26402
26403 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26404 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26405@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26406 if (insn_len > len || start == NULL)
26407 insn_len = len;
26408 else
26409- memcpy(insnbuf, start, insn_len);
26410+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26411
26412 return insn_len;
26413 }
26414@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26415 return this_cpu_read(paravirt_lazy_mode);
26416 }
26417
26418-struct pv_info pv_info = {
26419+struct pv_info pv_info __read_only = {
26420 .name = "bare hardware",
26421 .paravirt_enabled = 0,
26422 .kernel_rpl = 0,
26423@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26424 #endif
26425 };
26426
26427-struct pv_init_ops pv_init_ops = {
26428+struct pv_init_ops pv_init_ops __read_only = {
26429 .patch = native_patch,
26430 };
26431
26432-struct pv_time_ops pv_time_ops = {
26433+struct pv_time_ops pv_time_ops __read_only = {
26434 .sched_clock = native_sched_clock,
26435 .steal_clock = native_steal_clock,
26436 };
26437
26438-__visible struct pv_irq_ops pv_irq_ops = {
26439+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26440 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26441 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26442 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26443@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26444 #endif
26445 };
26446
26447-__visible struct pv_cpu_ops pv_cpu_ops = {
26448+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26449 .cpuid = native_cpuid,
26450 .get_debugreg = native_get_debugreg,
26451 .set_debugreg = native_set_debugreg,
26452@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26453 NOKPROBE_SYMBOL(native_set_debugreg);
26454 NOKPROBE_SYMBOL(native_load_idt);
26455
26456-struct pv_apic_ops pv_apic_ops = {
26457+struct pv_apic_ops pv_apic_ops __read_only= {
26458 #ifdef CONFIG_X86_LOCAL_APIC
26459 .startup_ipi_hook = paravirt_nop,
26460 #endif
26461 };
26462
26463-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26464+#ifdef CONFIG_X86_32
26465+#ifdef CONFIG_X86_PAE
26466+/* 64-bit pagetable entries */
26467+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26468+#else
26469 /* 32-bit pagetable entries */
26470 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26471+#endif
26472 #else
26473 /* 64-bit pagetable entries */
26474 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26475 #endif
26476
26477-struct pv_mmu_ops pv_mmu_ops = {
26478+struct pv_mmu_ops pv_mmu_ops __read_only = {
26479
26480 .read_cr2 = native_read_cr2,
26481 .write_cr2 = native_write_cr2,
26482@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26483 .make_pud = PTE_IDENT,
26484
26485 .set_pgd = native_set_pgd,
26486+ .set_pgd_batched = native_set_pgd_batched,
26487 #endif
26488 #endif /* PAGETABLE_LEVELS >= 3 */
26489
26490@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26491 },
26492
26493 .set_fixmap = native_set_fixmap,
26494+
26495+#ifdef CONFIG_PAX_KERNEXEC
26496+ .pax_open_kernel = native_pax_open_kernel,
26497+ .pax_close_kernel = native_pax_close_kernel,
26498+#endif
26499+
26500 };
26501
26502 EXPORT_SYMBOL_GPL(pv_time_ops);
26503diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26504index a1da673..b6f5831 100644
26505--- a/arch/x86/kernel/paravirt_patch_64.c
26506+++ b/arch/x86/kernel/paravirt_patch_64.c
26507@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26508 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26509 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26510 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26511+
26512+#ifndef CONFIG_PAX_MEMORY_UDEREF
26513 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26514+#endif
26515+
26516 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26517 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26518
26519@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26520 PATCH_SITE(pv_mmu_ops, read_cr3);
26521 PATCH_SITE(pv_mmu_ops, write_cr3);
26522 PATCH_SITE(pv_cpu_ops, clts);
26523+
26524+#ifndef CONFIG_PAX_MEMORY_UDEREF
26525 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26526+#endif
26527+
26528 PATCH_SITE(pv_cpu_ops, wbinvd);
26529
26530 patch_site:
26531diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26532index 0497f71..7186c0d 100644
26533--- a/arch/x86/kernel/pci-calgary_64.c
26534+++ b/arch/x86/kernel/pci-calgary_64.c
26535@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26536 tce_space = be64_to_cpu(readq(target));
26537 tce_space = tce_space & TAR_SW_BITS;
26538
26539- tce_space = tce_space & (~specified_table_size);
26540+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26541 info->tce_space = (u64 *)__va(tce_space);
26542 }
26543 }
26544diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26545index 35ccf75..7a15747 100644
26546--- a/arch/x86/kernel/pci-iommu_table.c
26547+++ b/arch/x86/kernel/pci-iommu_table.c
26548@@ -2,7 +2,7 @@
26549 #include <asm/iommu_table.h>
26550 #include <linux/string.h>
26551 #include <linux/kallsyms.h>
26552-
26553+#include <linux/sched.h>
26554
26555 #define DEBUG 1
26556
26557diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26558index 77dd0ad..9ec4723 100644
26559--- a/arch/x86/kernel/pci-swiotlb.c
26560+++ b/arch/x86/kernel/pci-swiotlb.c
26561@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26562 struct dma_attrs *attrs)
26563 {
26564 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26565- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26566+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26567 else
26568 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26569 }
26570diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26571index e127dda..94e384d 100644
26572--- a/arch/x86/kernel/process.c
26573+++ b/arch/x86/kernel/process.c
26574@@ -36,7 +36,8 @@
26575 * section. Since TSS's are completely CPU-local, we want them
26576 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26577 */
26578-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26579+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26580+EXPORT_SYMBOL(init_tss);
26581
26582 #ifdef CONFIG_X86_64
26583 static DEFINE_PER_CPU(unsigned char, is_idle);
26584@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26585 task_xstate_cachep =
26586 kmem_cache_create("task_xstate", xstate_size,
26587 __alignof__(union thread_xstate),
26588- SLAB_PANIC | SLAB_NOTRACK, NULL);
26589+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26590 setup_xstate_comp();
26591 }
26592
26593@@ -108,7 +109,7 @@ void exit_thread(void)
26594 unsigned long *bp = t->io_bitmap_ptr;
26595
26596 if (bp) {
26597- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26598+ struct tss_struct *tss = init_tss + get_cpu();
26599
26600 t->io_bitmap_ptr = NULL;
26601 clear_thread_flag(TIF_IO_BITMAP);
26602@@ -128,6 +129,9 @@ void flush_thread(void)
26603 {
26604 struct task_struct *tsk = current;
26605
26606+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26607+ loadsegment(gs, 0);
26608+#endif
26609 flush_ptrace_hw_breakpoint(tsk);
26610 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26611 drop_init_fpu(tsk);
26612@@ -274,7 +278,7 @@ static void __exit_idle(void)
26613 void exit_idle(void)
26614 {
26615 /* idle loop has pid 0 */
26616- if (current->pid)
26617+ if (task_pid_nr(current))
26618 return;
26619 __exit_idle();
26620 }
26621@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26622 return ret;
26623 }
26624 #endif
26625-void stop_this_cpu(void *dummy)
26626+__noreturn void stop_this_cpu(void *dummy)
26627 {
26628 local_irq_disable();
26629 /*
26630@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26631 }
26632 early_param("idle", idle_setup);
26633
26634-unsigned long arch_align_stack(unsigned long sp)
26635+#ifdef CONFIG_PAX_RANDKSTACK
26636+void pax_randomize_kstack(struct pt_regs *regs)
26637 {
26638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26639- sp -= get_random_int() % 8192;
26640- return sp & ~0xf;
26641-}
26642+ struct thread_struct *thread = &current->thread;
26643+ unsigned long time;
26644
26645-unsigned long arch_randomize_brk(struct mm_struct *mm)
26646-{
26647- unsigned long range_end = mm->brk + 0x02000000;
26648- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26649-}
26650+ if (!randomize_va_space)
26651+ return;
26652+
26653+ if (v8086_mode(regs))
26654+ return;
26655
26656+ rdtscl(time);
26657+
26658+ /* P4 seems to return a 0 LSB, ignore it */
26659+#ifdef CONFIG_MPENTIUM4
26660+ time &= 0x3EUL;
26661+ time <<= 2;
26662+#elif defined(CONFIG_X86_64)
26663+ time &= 0xFUL;
26664+ time <<= 4;
26665+#else
26666+ time &= 0x1FUL;
26667+ time <<= 3;
26668+#endif
26669+
26670+ thread->sp0 ^= time;
26671+ load_sp0(init_tss + smp_processor_id(), thread);
26672+
26673+#ifdef CONFIG_X86_64
26674+ this_cpu_write(kernel_stack, thread->sp0);
26675+#endif
26676+}
26677+#endif
26678diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26679index 8f3ebfe..cbc731b 100644
26680--- a/arch/x86/kernel/process_32.c
26681+++ b/arch/x86/kernel/process_32.c
26682@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26683 unsigned long thread_saved_pc(struct task_struct *tsk)
26684 {
26685 return ((unsigned long *)tsk->thread.sp)[3];
26686+//XXX return tsk->thread.eip;
26687 }
26688
26689 void __show_regs(struct pt_regs *regs, int all)
26690@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26691 unsigned long sp;
26692 unsigned short ss, gs;
26693
26694- if (user_mode_vm(regs)) {
26695+ if (user_mode(regs)) {
26696 sp = regs->sp;
26697 ss = regs->ss & 0xffff;
26698- gs = get_user_gs(regs);
26699 } else {
26700 sp = kernel_stack_pointer(regs);
26701 savesegment(ss, ss);
26702- savesegment(gs, gs);
26703 }
26704+ gs = get_user_gs(regs);
26705
26706 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26707 (u16)regs->cs, regs->ip, regs->flags,
26708- smp_processor_id());
26709+ raw_smp_processor_id());
26710 print_symbol("EIP is at %s\n", regs->ip);
26711
26712 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26713@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26714 int copy_thread(unsigned long clone_flags, unsigned long sp,
26715 unsigned long arg, struct task_struct *p)
26716 {
26717- struct pt_regs *childregs = task_pt_regs(p);
26718+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26719 struct task_struct *tsk;
26720 int err;
26721
26722 p->thread.sp = (unsigned long) childregs;
26723 p->thread.sp0 = (unsigned long) (childregs+1);
26724+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26725 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26726
26727 if (unlikely(p->flags & PF_KTHREAD)) {
26728 /* kernel thread */
26729 memset(childregs, 0, sizeof(struct pt_regs));
26730 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26731- task_user_gs(p) = __KERNEL_STACK_CANARY;
26732- childregs->ds = __USER_DS;
26733- childregs->es = __USER_DS;
26734+ savesegment(gs, childregs->gs);
26735+ childregs->ds = __KERNEL_DS;
26736+ childregs->es = __KERNEL_DS;
26737 childregs->fs = __KERNEL_PERCPU;
26738 childregs->bx = sp; /* function */
26739 childregs->bp = arg;
26740@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26741 struct thread_struct *prev = &prev_p->thread,
26742 *next = &next_p->thread;
26743 int cpu = smp_processor_id();
26744- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26745+ struct tss_struct *tss = init_tss + cpu;
26746 fpu_switch_t fpu;
26747
26748 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26749@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26750 */
26751 lazy_save_gs(prev->gs);
26752
26753+#ifdef CONFIG_PAX_MEMORY_UDEREF
26754+ __set_fs(task_thread_info(next_p)->addr_limit);
26755+#endif
26756+
26757 /*
26758 * Load the per-thread Thread-Local Storage descriptor.
26759 */
26760@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26761 */
26762 arch_end_context_switch(next_p);
26763
26764- this_cpu_write(kernel_stack,
26765- (unsigned long)task_stack_page(next_p) +
26766- THREAD_SIZE - KERNEL_STACK_OFFSET);
26767+ this_cpu_write(current_task, next_p);
26768+ this_cpu_write(current_tinfo, &next_p->tinfo);
26769+ this_cpu_write(kernel_stack, next->sp0);
26770
26771 /*
26772 * Restore %gs if needed (which is common)
26773@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26774
26775 switch_fpu_finish(next_p, fpu);
26776
26777- this_cpu_write(current_task, next_p);
26778-
26779 return prev_p;
26780 }
26781
26782@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26783 } while (count++ < 16);
26784 return 0;
26785 }
26786-
26787diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26788index 5a2c029..ec8611d 100644
26789--- a/arch/x86/kernel/process_64.c
26790+++ b/arch/x86/kernel/process_64.c
26791@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26792 struct pt_regs *childregs;
26793 struct task_struct *me = current;
26794
26795- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26796+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26797 childregs = task_pt_regs(p);
26798 p->thread.sp = (unsigned long) childregs;
26799 p->thread.usersp = me->thread.usersp;
26800+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26801 set_tsk_thread_flag(p, TIF_FORK);
26802 p->thread.io_bitmap_ptr = NULL;
26803
26804@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26805 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26806 savesegment(es, p->thread.es);
26807 savesegment(ds, p->thread.ds);
26808+ savesegment(ss, p->thread.ss);
26809+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26810 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26811
26812 if (unlikely(p->flags & PF_KTHREAD)) {
26813@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26814 struct thread_struct *prev = &prev_p->thread;
26815 struct thread_struct *next = &next_p->thread;
26816 int cpu = smp_processor_id();
26817- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26818+ struct tss_struct *tss = init_tss + cpu;
26819 unsigned fsindex, gsindex;
26820 fpu_switch_t fpu;
26821
26822@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26823 if (unlikely(next->ds | prev->ds))
26824 loadsegment(ds, next->ds);
26825
26826+ savesegment(ss, prev->ss);
26827+ if (unlikely(next->ss != prev->ss))
26828+ loadsegment(ss, next->ss);
26829+
26830 /*
26831 * Switch FS and GS.
26832 *
26833@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26834 prev->usersp = this_cpu_read(old_rsp);
26835 this_cpu_write(old_rsp, next->usersp);
26836 this_cpu_write(current_task, next_p);
26837+ this_cpu_write(current_tinfo, &next_p->tinfo);
26838
26839 /*
26840 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26841@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26842 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26843 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26844
26845- this_cpu_write(kernel_stack,
26846- (unsigned long)task_stack_page(next_p) +
26847- THREAD_SIZE - KERNEL_STACK_OFFSET);
26848+ this_cpu_write(kernel_stack, next->sp0);
26849
26850 /*
26851 * Now maybe reload the debug registers and handle I/O bitmaps
26852@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26853 if (!p || p == current || p->state == TASK_RUNNING)
26854 return 0;
26855 stack = (unsigned long)task_stack_page(p);
26856- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26857+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26858 return 0;
26859 fp = *(u64 *)(p->thread.sp);
26860 do {
26861- if (fp < (unsigned long)stack ||
26862- fp >= (unsigned long)stack+THREAD_SIZE)
26863+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26864 return 0;
26865 ip = *(u64 *)(fp+8);
26866 if (!in_sched_functions(ip))
26867diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26868index e510618..5165ac0 100644
26869--- a/arch/x86/kernel/ptrace.c
26870+++ b/arch/x86/kernel/ptrace.c
26871@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26872 unsigned long sp = (unsigned long)&regs->sp;
26873 u32 *prev_esp;
26874
26875- if (context == (sp & ~(THREAD_SIZE - 1)))
26876+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26877 return sp;
26878
26879- prev_esp = (u32 *)(context);
26880+ prev_esp = *(u32 **)(context);
26881 if (prev_esp)
26882 return (unsigned long)prev_esp;
26883
26884@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26885 if (child->thread.gs != value)
26886 return do_arch_prctl(child, ARCH_SET_GS, value);
26887 return 0;
26888+
26889+ case offsetof(struct user_regs_struct,ip):
26890+ /*
26891+ * Protect against any attempt to set ip to an
26892+ * impossible address. There are dragons lurking if the
26893+ * address is noncanonical. (This explicitly allows
26894+ * setting ip to TASK_SIZE_MAX, because user code can do
26895+ * that all by itself by running off the end of its
26896+ * address space.
26897+ */
26898+ if (value > TASK_SIZE_MAX)
26899+ return -EIO;
26900+ break;
26901+
26902 #endif
26903 }
26904
26905@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26906 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26907 {
26908 int i;
26909- int dr7 = 0;
26910+ unsigned long dr7 = 0;
26911 struct arch_hw_breakpoint *info;
26912
26913 for (i = 0; i < HBP_NUM; i++) {
26914@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26915 unsigned long addr, unsigned long data)
26916 {
26917 int ret;
26918- unsigned long __user *datap = (unsigned long __user *)data;
26919+ unsigned long __user *datap = (__force unsigned long __user *)data;
26920
26921 switch (request) {
26922 /* read the word at location addr in the USER area. */
26923@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26924 if ((int) addr < 0)
26925 return -EIO;
26926 ret = do_get_thread_area(child, addr,
26927- (struct user_desc __user *)data);
26928+ (__force struct user_desc __user *) data);
26929 break;
26930
26931 case PTRACE_SET_THREAD_AREA:
26932 if ((int) addr < 0)
26933 return -EIO;
26934 ret = do_set_thread_area(child, addr,
26935- (struct user_desc __user *)data, 0);
26936+ (__force struct user_desc __user *) data, 0);
26937 break;
26938 #endif
26939
26940@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26941
26942 #ifdef CONFIG_X86_64
26943
26944-static struct user_regset x86_64_regsets[] __read_mostly = {
26945+static user_regset_no_const x86_64_regsets[] __read_only = {
26946 [REGSET_GENERAL] = {
26947 .core_note_type = NT_PRSTATUS,
26948 .n = sizeof(struct user_regs_struct) / sizeof(long),
26949@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26950 #endif /* CONFIG_X86_64 */
26951
26952 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26953-static struct user_regset x86_32_regsets[] __read_mostly = {
26954+static user_regset_no_const x86_32_regsets[] __read_only = {
26955 [REGSET_GENERAL] = {
26956 .core_note_type = NT_PRSTATUS,
26957 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26958@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26959 */
26960 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26961
26962-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26963+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26964 {
26965 #ifdef CONFIG_X86_64
26966 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26967@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26968 memset(info, 0, sizeof(*info));
26969 info->si_signo = SIGTRAP;
26970 info->si_code = si_code;
26971- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26972+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26973 }
26974
26975 void user_single_step_siginfo(struct task_struct *tsk,
26976@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26977 }
26978 }
26979
26980+#ifdef CONFIG_GRKERNSEC_SETXID
26981+extern void gr_delayed_cred_worker(void);
26982+#endif
26983+
26984 /*
26985 * We can return 0 to resume the syscall or anything else to go to phase
26986 * 2. If we resume the syscall, we need to put something appropriate in
26987@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26988
26989 BUG_ON(regs != task_pt_regs(current));
26990
26991+#ifdef CONFIG_GRKERNSEC_SETXID
26992+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26993+ gr_delayed_cred_worker();
26994+#endif
26995+
26996 /*
26997 * If we stepped into a sysenter/syscall insn, it trapped in
26998 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26999@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27000 */
27001 user_exit();
27002
27003+#ifdef CONFIG_GRKERNSEC_SETXID
27004+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27005+ gr_delayed_cred_worker();
27006+#endif
27007+
27008 audit_syscall_exit(regs);
27009
27010 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27011diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27012index 2f355d2..e75ed0a 100644
27013--- a/arch/x86/kernel/pvclock.c
27014+++ b/arch/x86/kernel/pvclock.c
27015@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27016 reset_hung_task_detector();
27017 }
27018
27019-static atomic64_t last_value = ATOMIC64_INIT(0);
27020+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27021
27022 void pvclock_resume(void)
27023 {
27024- atomic64_set(&last_value, 0);
27025+ atomic64_set_unchecked(&last_value, 0);
27026 }
27027
27028 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27029@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27030 * updating at the same time, and one of them could be slightly behind,
27031 * making the assumption that last_value always go forward fail to hold.
27032 */
27033- last = atomic64_read(&last_value);
27034+ last = atomic64_read_unchecked(&last_value);
27035 do {
27036 if (ret < last)
27037 return last;
27038- last = atomic64_cmpxchg(&last_value, last, ret);
27039+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27040 } while (unlikely(last != ret));
27041
27042 return ret;
27043diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27044index 86db4bc..531675b 100644
27045--- a/arch/x86/kernel/reboot.c
27046+++ b/arch/x86/kernel/reboot.c
27047@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27048
27049 void __noreturn machine_real_restart(unsigned int type)
27050 {
27051+
27052+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27053+ struct desc_struct *gdt;
27054+#endif
27055+
27056 local_irq_disable();
27057
27058 /*
27059@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
27060
27061 /* Jump to the identity-mapped low memory code */
27062 #ifdef CONFIG_X86_32
27063- asm volatile("jmpl *%0" : :
27064+
27065+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27066+ gdt = get_cpu_gdt_table(smp_processor_id());
27067+ pax_open_kernel();
27068+#ifdef CONFIG_PAX_MEMORY_UDEREF
27069+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27070+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27071+ loadsegment(ds, __KERNEL_DS);
27072+ loadsegment(es, __KERNEL_DS);
27073+ loadsegment(ss, __KERNEL_DS);
27074+#endif
27075+#ifdef CONFIG_PAX_KERNEXEC
27076+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27077+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27078+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27079+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27080+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27081+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27082+#endif
27083+ pax_close_kernel();
27084+#endif
27085+
27086+ asm volatile("ljmpl *%0" : :
27087 "rm" (real_mode_header->machine_real_restart_asm),
27088 "a" (type));
27089 #else
27090@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27091 * This means that this function can never return, it can misbehave
27092 * by not rebooting properly and hanging.
27093 */
27094-static void native_machine_emergency_restart(void)
27095+static void __noreturn native_machine_emergency_restart(void)
27096 {
27097 int i;
27098 int attempt = 0;
27099@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27100 #endif
27101 }
27102
27103-static void __machine_emergency_restart(int emergency)
27104+static void __noreturn __machine_emergency_restart(int emergency)
27105 {
27106 reboot_emergency = emergency;
27107 machine_ops.emergency_restart();
27108 }
27109
27110-static void native_machine_restart(char *__unused)
27111+static void __noreturn native_machine_restart(char *__unused)
27112 {
27113 pr_notice("machine restart\n");
27114
27115@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27116 __machine_emergency_restart(0);
27117 }
27118
27119-static void native_machine_halt(void)
27120+static void __noreturn native_machine_halt(void)
27121 {
27122 /* Stop other cpus and apics */
27123 machine_shutdown();
27124@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27125 stop_this_cpu(NULL);
27126 }
27127
27128-static void native_machine_power_off(void)
27129+static void __noreturn native_machine_power_off(void)
27130 {
27131 if (pm_power_off) {
27132 if (!reboot_force)
27133@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27134 }
27135 /* A fallback in case there is no PM info available */
27136 tboot_shutdown(TB_SHUTDOWN_HALT);
27137+ unreachable();
27138 }
27139
27140-struct machine_ops machine_ops = {
27141+struct machine_ops machine_ops __read_only = {
27142 .power_off = native_machine_power_off,
27143 .shutdown = native_machine_shutdown,
27144 .emergency_restart = native_machine_emergency_restart,
27145diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27146index c8e41e9..64049ef 100644
27147--- a/arch/x86/kernel/reboot_fixups_32.c
27148+++ b/arch/x86/kernel/reboot_fixups_32.c
27149@@ -57,7 +57,7 @@ struct device_fixup {
27150 unsigned int vendor;
27151 unsigned int device;
27152 void (*reboot_fixup)(struct pci_dev *);
27153-};
27154+} __do_const;
27155
27156 /*
27157 * PCI ids solely used for fixups_table go here
27158diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27159index 3fd2c69..a444264 100644
27160--- a/arch/x86/kernel/relocate_kernel_64.S
27161+++ b/arch/x86/kernel/relocate_kernel_64.S
27162@@ -96,8 +96,7 @@ relocate_kernel:
27163
27164 /* jump to identity mapped page */
27165 addq $(identity_mapped - relocate_kernel), %r8
27166- pushq %r8
27167- ret
27168+ jmp *%r8
27169
27170 identity_mapped:
27171 /* set return address to 0 if not preserving context */
27172diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27173index ab4734e..c4ca0eb 100644
27174--- a/arch/x86/kernel/setup.c
27175+++ b/arch/x86/kernel/setup.c
27176@@ -110,6 +110,7 @@
27177 #include <asm/mce.h>
27178 #include <asm/alternative.h>
27179 #include <asm/prom.h>
27180+#include <asm/boot.h>
27181
27182 /*
27183 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27184@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27185 #endif
27186
27187
27188-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27189-__visible unsigned long mmu_cr4_features;
27190+#ifdef CONFIG_X86_64
27191+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27192+#elif defined(CONFIG_X86_PAE)
27193+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27194 #else
27195-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27196+__visible unsigned long mmu_cr4_features __read_only;
27197 #endif
27198
27199+void set_in_cr4(unsigned long mask)
27200+{
27201+ unsigned long cr4 = read_cr4();
27202+
27203+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27204+ return;
27205+
27206+ pax_open_kernel();
27207+ mmu_cr4_features |= mask;
27208+ pax_close_kernel();
27209+
27210+ if (trampoline_cr4_features)
27211+ *trampoline_cr4_features = mmu_cr4_features;
27212+ cr4 |= mask;
27213+ write_cr4(cr4);
27214+}
27215+EXPORT_SYMBOL(set_in_cr4);
27216+
27217+void clear_in_cr4(unsigned long mask)
27218+{
27219+ unsigned long cr4 = read_cr4();
27220+
27221+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27222+ return;
27223+
27224+ pax_open_kernel();
27225+ mmu_cr4_features &= ~mask;
27226+ pax_close_kernel();
27227+
27228+ if (trampoline_cr4_features)
27229+ *trampoline_cr4_features = mmu_cr4_features;
27230+ cr4 &= ~mask;
27231+ write_cr4(cr4);
27232+}
27233+EXPORT_SYMBOL(clear_in_cr4);
27234+
27235 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27236 int bootloader_type, bootloader_version;
27237
27238@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27239 * area (640->1Mb) as ram even though it is not.
27240 * take them out.
27241 */
27242- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27243+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27244
27245 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27246 }
27247@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27248 /* called before trim_bios_range() to spare extra sanitize */
27249 static void __init e820_add_kernel_range(void)
27250 {
27251- u64 start = __pa_symbol(_text);
27252+ u64 start = __pa_symbol(ktla_ktva(_text));
27253 u64 size = __pa_symbol(_end) - start;
27254
27255 /*
27256@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27257
27258 void __init setup_arch(char **cmdline_p)
27259 {
27260+#ifdef CONFIG_X86_32
27261+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27262+#else
27263 memblock_reserve(__pa_symbol(_text),
27264 (unsigned long)__bss_stop - (unsigned long)_text);
27265+#endif
27266
27267 early_reserve_initrd();
27268
27269@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27270
27271 if (!boot_params.hdr.root_flags)
27272 root_mountflags &= ~MS_RDONLY;
27273- init_mm.start_code = (unsigned long) _text;
27274- init_mm.end_code = (unsigned long) _etext;
27275+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27276+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27277 init_mm.end_data = (unsigned long) _edata;
27278 init_mm.brk = _brk_end;
27279
27280 mpx_mm_init(&init_mm);
27281
27282- code_resource.start = __pa_symbol(_text);
27283- code_resource.end = __pa_symbol(_etext)-1;
27284- data_resource.start = __pa_symbol(_etext);
27285+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27286+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27287+ data_resource.start = __pa_symbol(_sdata);
27288 data_resource.end = __pa_symbol(_edata)-1;
27289 bss_resource.start = __pa_symbol(__bss_start);
27290 bss_resource.end = __pa_symbol(__bss_stop)-1;
27291diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27292index e4fcb87..9c06c55 100644
27293--- a/arch/x86/kernel/setup_percpu.c
27294+++ b/arch/x86/kernel/setup_percpu.c
27295@@ -21,19 +21,17 @@
27296 #include <asm/cpu.h>
27297 #include <asm/stackprotector.h>
27298
27299-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27300+#ifdef CONFIG_SMP
27301+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27302 EXPORT_PER_CPU_SYMBOL(cpu_number);
27303+#endif
27304
27305-#ifdef CONFIG_X86_64
27306 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27307-#else
27308-#define BOOT_PERCPU_OFFSET 0
27309-#endif
27310
27311 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27312 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27313
27314-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27315+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27316 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27317 };
27318 EXPORT_SYMBOL(__per_cpu_offset);
27319@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27320 {
27321 #ifdef CONFIG_NEED_MULTIPLE_NODES
27322 pg_data_t *last = NULL;
27323- unsigned int cpu;
27324+ int cpu;
27325
27326 for_each_possible_cpu(cpu) {
27327 int node = early_cpu_to_node(cpu);
27328@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27329 {
27330 #ifdef CONFIG_X86_32
27331 struct desc_struct gdt;
27332+ unsigned long base = per_cpu_offset(cpu);
27333
27334- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27335- 0x2 | DESCTYPE_S, 0x8);
27336- gdt.s = 1;
27337+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27338+ 0x83 | DESCTYPE_S, 0xC);
27339 write_gdt_entry(get_cpu_gdt_table(cpu),
27340 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27341 #endif
27342@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27343 /* alrighty, percpu areas up and running */
27344 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27345 for_each_possible_cpu(cpu) {
27346+#ifdef CONFIG_CC_STACKPROTECTOR
27347+#ifdef CONFIG_X86_32
27348+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27349+#endif
27350+#endif
27351 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27352 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27353 per_cpu(cpu_number, cpu) = cpu;
27354@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27355 */
27356 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27357 #endif
27358+#ifdef CONFIG_CC_STACKPROTECTOR
27359+#ifdef CONFIG_X86_32
27360+ if (!cpu)
27361+ per_cpu(stack_canary.canary, cpu) = canary;
27362+#endif
27363+#endif
27364 /*
27365 * Up to this point, the boot CPU has been using .init.data
27366 * area. Reload any changed state for the boot CPU.
27367diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27368index ed37a76..39f936e 100644
27369--- a/arch/x86/kernel/signal.c
27370+++ b/arch/x86/kernel/signal.c
27371@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27372 * Align the stack pointer according to the i386 ABI,
27373 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27374 */
27375- sp = ((sp + 4) & -16ul) - 4;
27376+ sp = ((sp - 12) & -16ul) - 4;
27377 #else /* !CONFIG_X86_32 */
27378 sp = round_down(sp, 16) - 8;
27379 #endif
27380@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27381 }
27382
27383 if (current->mm->context.vdso)
27384- restorer = current->mm->context.vdso +
27385- selected_vdso32->sym___kernel_sigreturn;
27386+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27387 else
27388- restorer = &frame->retcode;
27389+ restorer = (void __user *)&frame->retcode;
27390 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27391 restorer = ksig->ka.sa.sa_restorer;
27392
27393@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27394 * reasons and because gdb uses it as a signature to notice
27395 * signal handler stack frames.
27396 */
27397- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27398+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27399
27400 if (err)
27401 return -EFAULT;
27402@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27403 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27404
27405 /* Set up to return from userspace. */
27406- restorer = current->mm->context.vdso +
27407- selected_vdso32->sym___kernel_rt_sigreturn;
27408+ if (current->mm->context.vdso)
27409+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27410+ else
27411+ restorer = (void __user *)&frame->retcode;
27412 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27413 restorer = ksig->ka.sa.sa_restorer;
27414 put_user_ex(restorer, &frame->pretcode);
27415@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27416 * reasons and because gdb uses it as a signature to notice
27417 * signal handler stack frames.
27418 */
27419- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27420+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27421 } put_user_catch(err);
27422
27423 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27424@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27425 {
27426 int usig = signr_convert(ksig->sig);
27427 sigset_t *set = sigmask_to_save();
27428- compat_sigset_t *cset = (compat_sigset_t *) set;
27429+ sigset_t sigcopy;
27430+ compat_sigset_t *cset;
27431+
27432+ sigcopy = *set;
27433+
27434+ cset = (compat_sigset_t *) &sigcopy;
27435
27436 /* Set up the stack frame */
27437 if (is_ia32_frame()) {
27438@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27439 } else if (is_x32_frame()) {
27440 return x32_setup_rt_frame(ksig, cset, regs);
27441 } else {
27442- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27443+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27444 }
27445 }
27446
27447diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27448index be8e1bd..a3d93fa 100644
27449--- a/arch/x86/kernel/smp.c
27450+++ b/arch/x86/kernel/smp.c
27451@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27452
27453 __setup("nonmi_ipi", nonmi_ipi_setup);
27454
27455-struct smp_ops smp_ops = {
27456+struct smp_ops smp_ops __read_only = {
27457 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27458 .smp_prepare_cpus = native_smp_prepare_cpus,
27459 .smp_cpus_done = native_smp_cpus_done,
27460diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27461index 6d7022c..4feb6be 100644
27462--- a/arch/x86/kernel/smpboot.c
27463+++ b/arch/x86/kernel/smpboot.c
27464@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27465
27466 enable_start_cpu0 = 0;
27467
27468-#ifdef CONFIG_X86_32
27469+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27470+ barrier();
27471+
27472 /* switch away from the initial page table */
27473+#ifdef CONFIG_PAX_PER_CPU_PGD
27474+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27475+#else
27476 load_cr3(swapper_pg_dir);
27477+#endif
27478 __flush_tlb_all();
27479-#endif
27480
27481- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27482- barrier();
27483 /*
27484 * Check TSC synchronization with the BP:
27485 */
27486@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27487 alternatives_enable_smp();
27488
27489 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27490- (THREAD_SIZE + task_stack_page(idle))) - 1);
27491+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27492 per_cpu(current_task, cpu) = idle;
27493+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27494
27495 #ifdef CONFIG_X86_32
27496 /* Stack for startup_32 can be just as for start_secondary onwards */
27497@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27498 clear_tsk_thread_flag(idle, TIF_FORK);
27499 initial_gs = per_cpu_offset(cpu);
27500 #endif
27501- per_cpu(kernel_stack, cpu) =
27502- (unsigned long)task_stack_page(idle) -
27503- KERNEL_STACK_OFFSET + THREAD_SIZE;
27504+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27505+ pax_open_kernel();
27506 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27507+ pax_close_kernel();
27508 initial_code = (unsigned long)start_secondary;
27509 stack_start = idle->thread.sp;
27510
27511@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27512 /* the FPU context is blank, nobody can own it */
27513 __cpu_disable_lazy_restore(cpu);
27514
27515+#ifdef CONFIG_PAX_PER_CPU_PGD
27516+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27517+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27518+ KERNEL_PGD_PTRS);
27519+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27520+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27521+ KERNEL_PGD_PTRS);
27522+#endif
27523+
27524 err = do_boot_cpu(apicid, cpu, tidle);
27525 if (err) {
27526 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27527diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27528index 9b4d51d..5d28b58 100644
27529--- a/arch/x86/kernel/step.c
27530+++ b/arch/x86/kernel/step.c
27531@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27532 struct desc_struct *desc;
27533 unsigned long base;
27534
27535- seg &= ~7UL;
27536+ seg >>= 3;
27537
27538 mutex_lock(&child->mm->context.lock);
27539- if (unlikely((seg >> 3) >= child->mm->context.size))
27540+ if (unlikely(seg >= child->mm->context.size))
27541 addr = -1L; /* bogus selector, access would fault */
27542 else {
27543 desc = child->mm->context.ldt + seg;
27544@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27545 addr += base;
27546 }
27547 mutex_unlock(&child->mm->context.lock);
27548- }
27549+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27550+ addr = ktla_ktva(addr);
27551
27552 return addr;
27553 }
27554@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27555 unsigned char opcode[15];
27556 unsigned long addr = convert_ip_to_linear(child, regs);
27557
27558+ if (addr == -EINVAL)
27559+ return 0;
27560+
27561 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27562 for (i = 0; i < copied; i++) {
27563 switch (opcode[i]) {
27564diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27565new file mode 100644
27566index 0000000..5877189
27567--- /dev/null
27568+++ b/arch/x86/kernel/sys_i386_32.c
27569@@ -0,0 +1,189 @@
27570+/*
27571+ * This file contains various random system calls that
27572+ * have a non-standard calling sequence on the Linux/i386
27573+ * platform.
27574+ */
27575+
27576+#include <linux/errno.h>
27577+#include <linux/sched.h>
27578+#include <linux/mm.h>
27579+#include <linux/fs.h>
27580+#include <linux/smp.h>
27581+#include <linux/sem.h>
27582+#include <linux/msg.h>
27583+#include <linux/shm.h>
27584+#include <linux/stat.h>
27585+#include <linux/syscalls.h>
27586+#include <linux/mman.h>
27587+#include <linux/file.h>
27588+#include <linux/utsname.h>
27589+#include <linux/ipc.h>
27590+#include <linux/elf.h>
27591+
27592+#include <linux/uaccess.h>
27593+#include <linux/unistd.h>
27594+
27595+#include <asm/syscalls.h>
27596+
27597+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27598+{
27599+ unsigned long pax_task_size = TASK_SIZE;
27600+
27601+#ifdef CONFIG_PAX_SEGMEXEC
27602+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27603+ pax_task_size = SEGMEXEC_TASK_SIZE;
27604+#endif
27605+
27606+ if (flags & MAP_FIXED)
27607+ if (len > pax_task_size || addr > pax_task_size - len)
27608+ return -EINVAL;
27609+
27610+ return 0;
27611+}
27612+
27613+/*
27614+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27615+ */
27616+static unsigned long get_align_mask(void)
27617+{
27618+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27619+ return 0;
27620+
27621+ if (!(current->flags & PF_RANDOMIZE))
27622+ return 0;
27623+
27624+ return va_align.mask;
27625+}
27626+
27627+unsigned long
27628+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27629+ unsigned long len, unsigned long pgoff, unsigned long flags)
27630+{
27631+ struct mm_struct *mm = current->mm;
27632+ struct vm_area_struct *vma;
27633+ unsigned long pax_task_size = TASK_SIZE;
27634+ struct vm_unmapped_area_info info;
27635+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27636+
27637+#ifdef CONFIG_PAX_SEGMEXEC
27638+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27639+ pax_task_size = SEGMEXEC_TASK_SIZE;
27640+#endif
27641+
27642+ pax_task_size -= PAGE_SIZE;
27643+
27644+ if (len > pax_task_size)
27645+ return -ENOMEM;
27646+
27647+ if (flags & MAP_FIXED)
27648+ return addr;
27649+
27650+#ifdef CONFIG_PAX_RANDMMAP
27651+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27652+#endif
27653+
27654+ if (addr) {
27655+ addr = PAGE_ALIGN(addr);
27656+ if (pax_task_size - len >= addr) {
27657+ vma = find_vma(mm, addr);
27658+ if (check_heap_stack_gap(vma, addr, len, offset))
27659+ return addr;
27660+ }
27661+ }
27662+
27663+ info.flags = 0;
27664+ info.length = len;
27665+ info.align_mask = filp ? get_align_mask() : 0;
27666+ info.align_offset = pgoff << PAGE_SHIFT;
27667+ info.threadstack_offset = offset;
27668+
27669+#ifdef CONFIG_PAX_PAGEEXEC
27670+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27671+ info.low_limit = 0x00110000UL;
27672+ info.high_limit = mm->start_code;
27673+
27674+#ifdef CONFIG_PAX_RANDMMAP
27675+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27676+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27677+#endif
27678+
27679+ if (info.low_limit < info.high_limit) {
27680+ addr = vm_unmapped_area(&info);
27681+ if (!IS_ERR_VALUE(addr))
27682+ return addr;
27683+ }
27684+ } else
27685+#endif
27686+
27687+ info.low_limit = mm->mmap_base;
27688+ info.high_limit = pax_task_size;
27689+
27690+ return vm_unmapped_area(&info);
27691+}
27692+
27693+unsigned long
27694+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27695+ const unsigned long len, const unsigned long pgoff,
27696+ const unsigned long flags)
27697+{
27698+ struct vm_area_struct *vma;
27699+ struct mm_struct *mm = current->mm;
27700+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27701+ struct vm_unmapped_area_info info;
27702+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27703+
27704+#ifdef CONFIG_PAX_SEGMEXEC
27705+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27706+ pax_task_size = SEGMEXEC_TASK_SIZE;
27707+#endif
27708+
27709+ pax_task_size -= PAGE_SIZE;
27710+
27711+ /* requested length too big for entire address space */
27712+ if (len > pax_task_size)
27713+ return -ENOMEM;
27714+
27715+ if (flags & MAP_FIXED)
27716+ return addr;
27717+
27718+#ifdef CONFIG_PAX_PAGEEXEC
27719+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27720+ goto bottomup;
27721+#endif
27722+
27723+#ifdef CONFIG_PAX_RANDMMAP
27724+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27725+#endif
27726+
27727+ /* requesting a specific address */
27728+ if (addr) {
27729+ addr = PAGE_ALIGN(addr);
27730+ if (pax_task_size - len >= addr) {
27731+ vma = find_vma(mm, addr);
27732+ if (check_heap_stack_gap(vma, addr, len, offset))
27733+ return addr;
27734+ }
27735+ }
27736+
27737+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27738+ info.length = len;
27739+ info.low_limit = PAGE_SIZE;
27740+ info.high_limit = mm->mmap_base;
27741+ info.align_mask = filp ? get_align_mask() : 0;
27742+ info.align_offset = pgoff << PAGE_SHIFT;
27743+ info.threadstack_offset = offset;
27744+
27745+ addr = vm_unmapped_area(&info);
27746+ if (!(addr & ~PAGE_MASK))
27747+ return addr;
27748+ VM_BUG_ON(addr != -ENOMEM);
27749+
27750+bottomup:
27751+ /*
27752+ * A failed mmap() very likely causes application failure,
27753+ * so fall back to the bottom-up function here. This scenario
27754+ * can happen with large stack limits and large mmap()
27755+ * allocations.
27756+ */
27757+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27758+}
27759diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27760index 30277e2..5664a29 100644
27761--- a/arch/x86/kernel/sys_x86_64.c
27762+++ b/arch/x86/kernel/sys_x86_64.c
27763@@ -81,8 +81,8 @@ out:
27764 return error;
27765 }
27766
27767-static void find_start_end(unsigned long flags, unsigned long *begin,
27768- unsigned long *end)
27769+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27770+ unsigned long *begin, unsigned long *end)
27771 {
27772 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27773 unsigned long new_begin;
27774@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27775 *begin = new_begin;
27776 }
27777 } else {
27778- *begin = current->mm->mmap_legacy_base;
27779+ *begin = mm->mmap_legacy_base;
27780 *end = TASK_SIZE;
27781 }
27782 }
27783@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27784 struct vm_area_struct *vma;
27785 struct vm_unmapped_area_info info;
27786 unsigned long begin, end;
27787+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27788
27789 if (flags & MAP_FIXED)
27790 return addr;
27791
27792- find_start_end(flags, &begin, &end);
27793+ find_start_end(mm, flags, &begin, &end);
27794
27795 if (len > end)
27796 return -ENOMEM;
27797
27798+#ifdef CONFIG_PAX_RANDMMAP
27799+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27800+#endif
27801+
27802 if (addr) {
27803 addr = PAGE_ALIGN(addr);
27804 vma = find_vma(mm, addr);
27805- if (end - len >= addr &&
27806- (!vma || addr + len <= vma->vm_start))
27807+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27808 return addr;
27809 }
27810
27811@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27812 info.high_limit = end;
27813 info.align_mask = filp ? get_align_mask() : 0;
27814 info.align_offset = pgoff << PAGE_SHIFT;
27815+ info.threadstack_offset = offset;
27816 return vm_unmapped_area(&info);
27817 }
27818
27819@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27820 struct mm_struct *mm = current->mm;
27821 unsigned long addr = addr0;
27822 struct vm_unmapped_area_info info;
27823+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27824
27825 /* requested length too big for entire address space */
27826 if (len > TASK_SIZE)
27827@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27828 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27829 goto bottomup;
27830
27831+#ifdef CONFIG_PAX_RANDMMAP
27832+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27833+#endif
27834+
27835 /* requesting a specific address */
27836 if (addr) {
27837 addr = PAGE_ALIGN(addr);
27838 vma = find_vma(mm, addr);
27839- if (TASK_SIZE - len >= addr &&
27840- (!vma || addr + len <= vma->vm_start))
27841+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27842 return addr;
27843 }
27844
27845@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27846 info.high_limit = mm->mmap_base;
27847 info.align_mask = filp ? get_align_mask() : 0;
27848 info.align_offset = pgoff << PAGE_SHIFT;
27849+ info.threadstack_offset = offset;
27850 addr = vm_unmapped_area(&info);
27851 if (!(addr & ~PAGE_MASK))
27852 return addr;
27853diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27854index 91a4496..bb87552 100644
27855--- a/arch/x86/kernel/tboot.c
27856+++ b/arch/x86/kernel/tboot.c
27857@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27858
27859 void tboot_shutdown(u32 shutdown_type)
27860 {
27861- void (*shutdown)(void);
27862+ void (* __noreturn shutdown)(void);
27863
27864 if (!tboot_enabled())
27865 return;
27866@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27867
27868 switch_to_tboot_pt();
27869
27870- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27871+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27872 shutdown();
27873
27874 /* should not reach here */
27875@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27876 return -ENODEV;
27877 }
27878
27879-static atomic_t ap_wfs_count;
27880+static atomic_unchecked_t ap_wfs_count;
27881
27882 static int tboot_wait_for_aps(int num_aps)
27883 {
27884@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27885 {
27886 switch (action) {
27887 case CPU_DYING:
27888- atomic_inc(&ap_wfs_count);
27889+ atomic_inc_unchecked(&ap_wfs_count);
27890 if (num_online_cpus() == 1)
27891- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27892+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27893 return NOTIFY_BAD;
27894 break;
27895 }
27896@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27897
27898 tboot_create_trampoline();
27899
27900- atomic_set(&ap_wfs_count, 0);
27901+ atomic_set_unchecked(&ap_wfs_count, 0);
27902 register_hotcpu_notifier(&tboot_cpu_notifier);
27903
27904 #ifdef CONFIG_DEBUG_FS
27905diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27906index 25adc0e..1df4349 100644
27907--- a/arch/x86/kernel/time.c
27908+++ b/arch/x86/kernel/time.c
27909@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27910 {
27911 unsigned long pc = instruction_pointer(regs);
27912
27913- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27914+ if (!user_mode(regs) && in_lock_functions(pc)) {
27915 #ifdef CONFIG_FRAME_POINTER
27916- return *(unsigned long *)(regs->bp + sizeof(long));
27917+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27918 #else
27919 unsigned long *sp =
27920 (unsigned long *)kernel_stack_pointer(regs);
27921@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27922 * or above a saved flags. Eflags has bits 22-31 zero,
27923 * kernel addresses don't.
27924 */
27925+
27926+#ifdef CONFIG_PAX_KERNEXEC
27927+ return ktla_ktva(sp[0]);
27928+#else
27929 if (sp[0] >> 22)
27930 return sp[0];
27931 if (sp[1] >> 22)
27932 return sp[1];
27933 #endif
27934+
27935+#endif
27936 }
27937 return pc;
27938 }
27939diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27940index 7fc5e84..c6e445a 100644
27941--- a/arch/x86/kernel/tls.c
27942+++ b/arch/x86/kernel/tls.c
27943@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27944 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27945 return -EINVAL;
27946
27947+#ifdef CONFIG_PAX_SEGMEXEC
27948+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27949+ return -EINVAL;
27950+#endif
27951+
27952 set_tls_desc(p, idx, &info, 1);
27953
27954 return 0;
27955@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27956
27957 if (kbuf)
27958 info = kbuf;
27959- else if (__copy_from_user(infobuf, ubuf, count))
27960+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27961 return -EFAULT;
27962 else
27963 info = infobuf;
27964diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27965index 1c113db..287b42e 100644
27966--- a/arch/x86/kernel/tracepoint.c
27967+++ b/arch/x86/kernel/tracepoint.c
27968@@ -9,11 +9,11 @@
27969 #include <linux/atomic.h>
27970
27971 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27972-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27973+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27974 (unsigned long) trace_idt_table };
27975
27976 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27977-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27978+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27979
27980 static int trace_irq_vector_refcount;
27981 static DEFINE_MUTEX(irq_vector_mutex);
27982diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27983index 89f4e64..aa4149d 100644
27984--- a/arch/x86/kernel/traps.c
27985+++ b/arch/x86/kernel/traps.c
27986@@ -68,7 +68,7 @@
27987 #include <asm/proto.h>
27988
27989 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27990-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27991+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27992 #else
27993 #include <asm/processor-flags.h>
27994 #include <asm/setup.h>
27995@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27996 #endif
27997
27998 /* Must be page-aligned because the real IDT is used in a fixmap. */
27999-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28000+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28001
28002 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28003 EXPORT_SYMBOL_GPL(used_vectors);
28004@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28005 }
28006
28007 static nokprobe_inline int
28008-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28009+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28010 struct pt_regs *regs, long error_code)
28011 {
28012 #ifdef CONFIG_X86_32
28013- if (regs->flags & X86_VM_MASK) {
28014+ if (v8086_mode(regs)) {
28015 /*
28016 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28017 * On nmi (interrupt 2), do_trap should not be called.
28018@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28019 return -1;
28020 }
28021 #endif
28022- if (!user_mode(regs)) {
28023+ if (!user_mode_novm(regs)) {
28024 if (!fixup_exception(regs)) {
28025 tsk->thread.error_code = error_code;
28026 tsk->thread.trap_nr = trapnr;
28027+
28028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28029+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28030+ str = "PAX: suspicious stack segment fault";
28031+#endif
28032+
28033 die(str, regs, error_code);
28034 }
28035+
28036+#ifdef CONFIG_PAX_REFCOUNT
28037+ if (trapnr == X86_TRAP_OF)
28038+ pax_report_refcount_overflow(regs);
28039+#endif
28040+
28041 return 0;
28042 }
28043
28044@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28045 }
28046
28047 static void
28048-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28049+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28050 long error_code, siginfo_t *info)
28051 {
28052 struct task_struct *tsk = current;
28053@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28054 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28055 printk_ratelimit()) {
28056 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28057- tsk->comm, tsk->pid, str,
28058+ tsk->comm, task_pid_nr(tsk), str,
28059 regs->ip, regs->sp, error_code);
28060 print_vma_addr(" in ", regs->ip);
28061 pr_cont("\n");
28062@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28063 tsk->thread.error_code = error_code;
28064 tsk->thread.trap_nr = X86_TRAP_DF;
28065
28066+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28067+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28068+ die("grsec: kernel stack overflow detected", regs, error_code);
28069+#endif
28070+
28071 #ifdef CONFIG_DOUBLEFAULT
28072 df_debug(regs, error_code);
28073 #endif
28074@@ -300,7 +317,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
28075 goto exit;
28076 conditional_sti(regs);
28077
28078- if (!user_mode_vm(regs))
28079+ if (!user_mode(regs))
28080 die("bounds", regs, error_code);
28081
28082 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
28083@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28084 conditional_sti(regs);
28085
28086 #ifdef CONFIG_X86_32
28087- if (regs->flags & X86_VM_MASK) {
28088+ if (v8086_mode(regs)) {
28089 local_irq_enable();
28090 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28091 goto exit;
28092@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28093 #endif
28094
28095 tsk = current;
28096- if (!user_mode(regs)) {
28097+ if (!user_mode_novm(regs)) {
28098 if (fixup_exception(regs))
28099 goto exit;
28100
28101 tsk->thread.error_code = error_code;
28102 tsk->thread.trap_nr = X86_TRAP_GP;
28103 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28104- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28105+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28106+
28107+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28108+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28109+ die("PAX: suspicious general protection fault", regs, error_code);
28110+ else
28111+#endif
28112+
28113 die("general protection fault", regs, error_code);
28114+ }
28115 goto exit;
28116 }
28117
28118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28119+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28120+ struct mm_struct *mm = tsk->mm;
28121+ unsigned long limit;
28122+
28123+ down_write(&mm->mmap_sem);
28124+ limit = mm->context.user_cs_limit;
28125+ if (limit < TASK_SIZE) {
28126+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28127+ up_write(&mm->mmap_sem);
28128+ return;
28129+ }
28130+ up_write(&mm->mmap_sem);
28131+ }
28132+#endif
28133+
28134 tsk->thread.error_code = error_code;
28135 tsk->thread.trap_nr = X86_TRAP_GP;
28136
28137@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28138 container_of(task_pt_regs(current),
28139 struct bad_iret_stack, regs);
28140
28141+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28142+ new_stack = s;
28143+
28144 /* Copy the IRET target to the new stack. */
28145 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28146
28147 /* Copy the remainder of the stack from the current stack. */
28148 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28149
28150- BUG_ON(!user_mode_vm(&new_stack->regs));
28151+ BUG_ON(!user_mode(&new_stack->regs));
28152 return new_stack;
28153 }
28154 NOKPROBE_SYMBOL(fixup_bad_iret);
28155@@ -566,7 +610,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28156 * then it's very likely the result of an icebp/int01 trap.
28157 * User wants a sigtrap for that.
28158 */
28159- if (!dr6 && user_mode_vm(regs))
28160+ if (!dr6 && user_mode(regs))
28161 user_icebp = 1;
28162
28163 /* Catch kmemcheck conditions first of all! */
28164@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28165 /* It's safe to allow irq's after DR6 has been saved */
28166 preempt_conditional_sti(regs);
28167
28168- if (regs->flags & X86_VM_MASK) {
28169+ if (v8086_mode(regs)) {
28170 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28171 X86_TRAP_DB);
28172 preempt_conditional_cli(regs);
28173@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28174 * We already checked v86 mode above, so we can check for kernel mode
28175 * by just checking the CPL of CS.
28176 */
28177- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28178+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28179 tsk->thread.debugreg6 &= ~DR_STEP;
28180 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28181 regs->flags &= ~X86_EFLAGS_TF;
28182@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28183 return;
28184 conditional_sti(regs);
28185
28186- if (!user_mode_vm(regs))
28187+ if (!user_mode(regs))
28188 {
28189 if (!fixup_exception(regs)) {
28190 task->thread.error_code = error_code;
28191diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28192index 5054497..139f8f8 100644
28193--- a/arch/x86/kernel/tsc.c
28194+++ b/arch/x86/kernel/tsc.c
28195@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28196 */
28197 smp_wmb();
28198
28199- ACCESS_ONCE(c2n->head) = data;
28200+ ACCESS_ONCE_RW(c2n->head) = data;
28201 }
28202
28203 /*
28204diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28205index 8b96a94..792b410 100644
28206--- a/arch/x86/kernel/uprobes.c
28207+++ b/arch/x86/kernel/uprobes.c
28208@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28209 int ret = NOTIFY_DONE;
28210
28211 /* We are only interested in userspace traps */
28212- if (regs && !user_mode_vm(regs))
28213+ if (regs && !user_mode(regs))
28214 return NOTIFY_DONE;
28215
28216 switch (val) {
28217@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28218
28219 if (nleft != rasize) {
28220 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28221- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28222+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28223
28224 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28225 }
28226diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28227index b9242ba..50c5edd 100644
28228--- a/arch/x86/kernel/verify_cpu.S
28229+++ b/arch/x86/kernel/verify_cpu.S
28230@@ -20,6 +20,7 @@
28231 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28232 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28233 * arch/x86/kernel/head_32.S: processor startup
28234+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28235 *
28236 * verify_cpu, returns the status of longmode and SSE in register %eax.
28237 * 0: Success 1: Failure
28238diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28239index e8edcf5..27f9344 100644
28240--- a/arch/x86/kernel/vm86_32.c
28241+++ b/arch/x86/kernel/vm86_32.c
28242@@ -44,6 +44,7 @@
28243 #include <linux/ptrace.h>
28244 #include <linux/audit.h>
28245 #include <linux/stddef.h>
28246+#include <linux/grsecurity.h>
28247
28248 #include <asm/uaccess.h>
28249 #include <asm/io.h>
28250@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28251 do_exit(SIGSEGV);
28252 }
28253
28254- tss = &per_cpu(init_tss, get_cpu());
28255+ tss = init_tss + get_cpu();
28256 current->thread.sp0 = current->thread.saved_sp0;
28257 current->thread.sysenter_cs = __KERNEL_CS;
28258 load_sp0(tss, &current->thread);
28259@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28260
28261 if (tsk->thread.saved_sp0)
28262 return -EPERM;
28263+
28264+#ifdef CONFIG_GRKERNSEC_VM86
28265+ if (!capable(CAP_SYS_RAWIO)) {
28266+ gr_handle_vm86();
28267+ return -EPERM;
28268+ }
28269+#endif
28270+
28271 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28272 offsetof(struct kernel_vm86_struct, vm86plus) -
28273 sizeof(info.regs));
28274@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28275 int tmp;
28276 struct vm86plus_struct __user *v86;
28277
28278+#ifdef CONFIG_GRKERNSEC_VM86
28279+ if (!capable(CAP_SYS_RAWIO)) {
28280+ gr_handle_vm86();
28281+ return -EPERM;
28282+ }
28283+#endif
28284+
28285 tsk = current;
28286 switch (cmd) {
28287 case VM86_REQUEST_IRQ:
28288@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28289 tsk->thread.saved_fs = info->regs32->fs;
28290 tsk->thread.saved_gs = get_user_gs(info->regs32);
28291
28292- tss = &per_cpu(init_tss, get_cpu());
28293+ tss = init_tss + get_cpu();
28294 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28295 if (cpu_has_sep)
28296 tsk->thread.sysenter_cs = 0;
28297@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28298 goto cannot_handle;
28299 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28300 goto cannot_handle;
28301- intr_ptr = (unsigned long __user *) (i << 2);
28302+ intr_ptr = (__force unsigned long __user *) (i << 2);
28303 if (get_user(segoffs, intr_ptr))
28304 goto cannot_handle;
28305 if ((segoffs >> 16) == BIOSSEG)
28306diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28307index 00bf300..129df8e 100644
28308--- a/arch/x86/kernel/vmlinux.lds.S
28309+++ b/arch/x86/kernel/vmlinux.lds.S
28310@@ -26,6 +26,13 @@
28311 #include <asm/page_types.h>
28312 #include <asm/cache.h>
28313 #include <asm/boot.h>
28314+#include <asm/segment.h>
28315+
28316+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28317+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28318+#else
28319+#define __KERNEL_TEXT_OFFSET 0
28320+#endif
28321
28322 #undef i386 /* in case the preprocessor is a 32bit one */
28323
28324@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28325
28326 PHDRS {
28327 text PT_LOAD FLAGS(5); /* R_E */
28328+#ifdef CONFIG_X86_32
28329+ module PT_LOAD FLAGS(5); /* R_E */
28330+#endif
28331+#ifdef CONFIG_XEN
28332+ rodata PT_LOAD FLAGS(5); /* R_E */
28333+#else
28334+ rodata PT_LOAD FLAGS(4); /* R__ */
28335+#endif
28336 data PT_LOAD FLAGS(6); /* RW_ */
28337-#ifdef CONFIG_X86_64
28338+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28339 #ifdef CONFIG_SMP
28340 percpu PT_LOAD FLAGS(6); /* RW_ */
28341 #endif
28342+ text.init PT_LOAD FLAGS(5); /* R_E */
28343+ text.exit PT_LOAD FLAGS(5); /* R_E */
28344 init PT_LOAD FLAGS(7); /* RWE */
28345-#endif
28346 note PT_NOTE FLAGS(0); /* ___ */
28347 }
28348
28349 SECTIONS
28350 {
28351 #ifdef CONFIG_X86_32
28352- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28353- phys_startup_32 = startup_32 - LOAD_OFFSET;
28354+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28355 #else
28356- . = __START_KERNEL;
28357- phys_startup_64 = startup_64 - LOAD_OFFSET;
28358+ . = __START_KERNEL;
28359 #endif
28360
28361 /* Text and read-only data */
28362- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28363- _text = .;
28364+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28365 /* bootstrapping code */
28366+#ifdef CONFIG_X86_32
28367+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28368+#else
28369+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28370+#endif
28371+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28372+ _text = .;
28373 HEAD_TEXT
28374 . = ALIGN(8);
28375 _stext = .;
28376@@ -104,13 +124,47 @@ SECTIONS
28377 IRQENTRY_TEXT
28378 *(.fixup)
28379 *(.gnu.warning)
28380- /* End of text section */
28381- _etext = .;
28382 } :text = 0x9090
28383
28384- NOTES :text :note
28385+ . += __KERNEL_TEXT_OFFSET;
28386
28387- EXCEPTION_TABLE(16) :text = 0x9090
28388+#ifdef CONFIG_X86_32
28389+ . = ALIGN(PAGE_SIZE);
28390+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28391+
28392+#ifdef CONFIG_PAX_KERNEXEC
28393+ MODULES_EXEC_VADDR = .;
28394+ BYTE(0)
28395+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28396+ . = ALIGN(HPAGE_SIZE) - 1;
28397+ MODULES_EXEC_END = .;
28398+#endif
28399+
28400+ } :module
28401+#endif
28402+
28403+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28404+ /* End of text section */
28405+ BYTE(0)
28406+ _etext = . - __KERNEL_TEXT_OFFSET;
28407+ }
28408+
28409+#ifdef CONFIG_X86_32
28410+ . = ALIGN(PAGE_SIZE);
28411+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28412+ . = ALIGN(PAGE_SIZE);
28413+ *(.empty_zero_page)
28414+ *(.initial_pg_fixmap)
28415+ *(.initial_pg_pmd)
28416+ *(.initial_page_table)
28417+ *(.swapper_pg_dir)
28418+ } :rodata
28419+#endif
28420+
28421+ . = ALIGN(PAGE_SIZE);
28422+ NOTES :rodata :note
28423+
28424+ EXCEPTION_TABLE(16) :rodata
28425
28426 #if defined(CONFIG_DEBUG_RODATA)
28427 /* .text should occupy whole number of pages */
28428@@ -122,16 +176,20 @@ SECTIONS
28429
28430 /* Data */
28431 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28432+
28433+#ifdef CONFIG_PAX_KERNEXEC
28434+ . = ALIGN(HPAGE_SIZE);
28435+#else
28436+ . = ALIGN(PAGE_SIZE);
28437+#endif
28438+
28439 /* Start of data section */
28440 _sdata = .;
28441
28442 /* init_task */
28443 INIT_TASK_DATA(THREAD_SIZE)
28444
28445-#ifdef CONFIG_X86_32
28446- /* 32 bit has nosave before _edata */
28447 NOSAVE_DATA
28448-#endif
28449
28450 PAGE_ALIGNED_DATA(PAGE_SIZE)
28451
28452@@ -174,12 +232,19 @@ SECTIONS
28453 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28454
28455 /* Init code and data - will be freed after init */
28456- . = ALIGN(PAGE_SIZE);
28457 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28458+ BYTE(0)
28459+
28460+#ifdef CONFIG_PAX_KERNEXEC
28461+ . = ALIGN(HPAGE_SIZE);
28462+#else
28463+ . = ALIGN(PAGE_SIZE);
28464+#endif
28465+
28466 __init_begin = .; /* paired with __init_end */
28467- }
28468+ } :init.begin
28469
28470-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28471+#ifdef CONFIG_SMP
28472 /*
28473 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28474 * output PHDR, so the next output section - .init.text - should
28475@@ -190,12 +255,27 @@ SECTIONS
28476 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28477 #endif
28478
28479- INIT_TEXT_SECTION(PAGE_SIZE)
28480-#ifdef CONFIG_X86_64
28481- :init
28482-#endif
28483+ . = ALIGN(PAGE_SIZE);
28484+ init_begin = .;
28485+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28486+ VMLINUX_SYMBOL(_sinittext) = .;
28487+ INIT_TEXT
28488+ . = ALIGN(PAGE_SIZE);
28489+ } :text.init
28490
28491- INIT_DATA_SECTION(16)
28492+ /*
28493+ * .exit.text is discard at runtime, not link time, to deal with
28494+ * references from .altinstructions and .eh_frame
28495+ */
28496+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28497+ EXIT_TEXT
28498+ VMLINUX_SYMBOL(_einittext) = .;
28499+ . = ALIGN(16);
28500+ } :text.exit
28501+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28502+
28503+ . = ALIGN(PAGE_SIZE);
28504+ INIT_DATA_SECTION(16) :init
28505
28506 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28507 __x86_cpu_dev_start = .;
28508@@ -266,19 +346,12 @@ SECTIONS
28509 }
28510
28511 . = ALIGN(8);
28512- /*
28513- * .exit.text is discard at runtime, not link time, to deal with
28514- * references from .altinstructions and .eh_frame
28515- */
28516- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28517- EXIT_TEXT
28518- }
28519
28520 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28521 EXIT_DATA
28522 }
28523
28524-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28525+#ifndef CONFIG_SMP
28526 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28527 #endif
28528
28529@@ -297,16 +370,10 @@ SECTIONS
28530 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28531 __smp_locks = .;
28532 *(.smp_locks)
28533- . = ALIGN(PAGE_SIZE);
28534 __smp_locks_end = .;
28535+ . = ALIGN(PAGE_SIZE);
28536 }
28537
28538-#ifdef CONFIG_X86_64
28539- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28540- NOSAVE_DATA
28541- }
28542-#endif
28543-
28544 /* BSS */
28545 . = ALIGN(PAGE_SIZE);
28546 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28547@@ -322,6 +389,7 @@ SECTIONS
28548 __brk_base = .;
28549 . += 64 * 1024; /* 64k alignment slop space */
28550 *(.brk_reservation) /* areas brk users have reserved */
28551+ . = ALIGN(HPAGE_SIZE);
28552 __brk_limit = .;
28553 }
28554
28555@@ -348,13 +416,12 @@ SECTIONS
28556 * for the boot processor.
28557 */
28558 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28559-INIT_PER_CPU(gdt_page);
28560 INIT_PER_CPU(irq_stack_union);
28561
28562 /*
28563 * Build-time check on the image size:
28564 */
28565-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28566+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28567 "kernel image bigger than KERNEL_IMAGE_SIZE");
28568
28569 #ifdef CONFIG_SMP
28570diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28571index 2dcc6ff..082dc7a 100644
28572--- a/arch/x86/kernel/vsyscall_64.c
28573+++ b/arch/x86/kernel/vsyscall_64.c
28574@@ -38,15 +38,13 @@
28575 #define CREATE_TRACE_POINTS
28576 #include "vsyscall_trace.h"
28577
28578-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28579+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28580
28581 static int __init vsyscall_setup(char *str)
28582 {
28583 if (str) {
28584 if (!strcmp("emulate", str))
28585 vsyscall_mode = EMULATE;
28586- else if (!strcmp("native", str))
28587- vsyscall_mode = NATIVE;
28588 else if (!strcmp("none", str))
28589 vsyscall_mode = NONE;
28590 else
28591@@ -264,8 +262,7 @@ do_ret:
28592 return true;
28593
28594 sigsegv:
28595- force_sig(SIGSEGV, current);
28596- return true;
28597+ do_group_exit(SIGKILL);
28598 }
28599
28600 /*
28601@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28602 static struct vm_area_struct gate_vma = {
28603 .vm_start = VSYSCALL_ADDR,
28604 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28605- .vm_page_prot = PAGE_READONLY_EXEC,
28606- .vm_flags = VM_READ | VM_EXEC,
28607+ .vm_page_prot = PAGE_READONLY,
28608+ .vm_flags = VM_READ,
28609 .vm_ops = &gate_vma_ops,
28610 };
28611
28612@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28613 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28614
28615 if (vsyscall_mode != NONE)
28616- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28617- vsyscall_mode == NATIVE
28618- ? PAGE_KERNEL_VSYSCALL
28619- : PAGE_KERNEL_VVAR);
28620+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28621
28622 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28623 (unsigned long)VSYSCALL_ADDR);
28624diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28625index 04068192..4d75aa6 100644
28626--- a/arch/x86/kernel/x8664_ksyms_64.c
28627+++ b/arch/x86/kernel/x8664_ksyms_64.c
28628@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28629 EXPORT_SYMBOL(copy_user_generic_unrolled);
28630 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28631 EXPORT_SYMBOL(__copy_user_nocache);
28632-EXPORT_SYMBOL(_copy_from_user);
28633-EXPORT_SYMBOL(_copy_to_user);
28634
28635 EXPORT_SYMBOL(copy_page);
28636 EXPORT_SYMBOL(clear_page);
28637@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28638 EXPORT_SYMBOL(___preempt_schedule_context);
28639 #endif
28640 #endif
28641+
28642+#ifdef CONFIG_PAX_PER_CPU_PGD
28643+EXPORT_SYMBOL(cpu_pgd);
28644+#endif
28645diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28646index 234b072..b7ab191 100644
28647--- a/arch/x86/kernel/x86_init.c
28648+++ b/arch/x86/kernel/x86_init.c
28649@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28650 static void default_nmi_init(void) { };
28651 static int default_i8042_detect(void) { return 1; };
28652
28653-struct x86_platform_ops x86_platform = {
28654+struct x86_platform_ops x86_platform __read_only = {
28655 .calibrate_tsc = native_calibrate_tsc,
28656 .get_wallclock = mach_get_cmos_time,
28657 .set_wallclock = mach_set_rtc_mmss,
28658@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28659 EXPORT_SYMBOL_GPL(x86_platform);
28660
28661 #if defined(CONFIG_PCI_MSI)
28662-struct x86_msi_ops x86_msi = {
28663+struct x86_msi_ops x86_msi __read_only = {
28664 .setup_msi_irqs = native_setup_msi_irqs,
28665 .compose_msi_msg = native_compose_msi_msg,
28666 .teardown_msi_irq = native_teardown_msi_irq,
28667@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28668 }
28669 #endif
28670
28671-struct x86_io_apic_ops x86_io_apic_ops = {
28672+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28673 .init = native_io_apic_init_mappings,
28674 .read = native_io_apic_read,
28675 .write = native_io_apic_write,
28676diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28677index 8be1e17..07dd990 100644
28678--- a/arch/x86/kernel/xsave.c
28679+++ b/arch/x86/kernel/xsave.c
28680@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28681
28682 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28683 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28684- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28685+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28686
28687 if (!use_xsave())
28688 return err;
28689
28690- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28691+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28692
28693 /*
28694 * Read the xstate_bv which we copied (directly from the cpu or
28695 * from the state in task struct) to the user buffers.
28696 */
28697- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28698+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28699
28700 /*
28701 * For legacy compatible, we always set FP/SSE bits in the bit
28702@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28703 */
28704 xstate_bv |= XSTATE_FPSSE;
28705
28706- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28707+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28708
28709 return err;
28710 }
28711@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28712 {
28713 int err;
28714
28715+ buf = (struct xsave_struct __user *)____m(buf);
28716 if (use_xsave())
28717 err = xsave_user(buf);
28718 else if (use_fxsr())
28719@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28720 */
28721 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28722 {
28723+ buf = (void __user *)____m(buf);
28724 if (use_xsave()) {
28725 if ((unsigned long)buf % 64 || fx_only) {
28726 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28727diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28728index 8a80737..bac4961 100644
28729--- a/arch/x86/kvm/cpuid.c
28730+++ b/arch/x86/kvm/cpuid.c
28731@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28732 struct kvm_cpuid2 *cpuid,
28733 struct kvm_cpuid_entry2 __user *entries)
28734 {
28735- int r;
28736+ int r, i;
28737
28738 r = -E2BIG;
28739 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28740 goto out;
28741 r = -EFAULT;
28742- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28743- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28744+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28745 goto out;
28746+ for (i = 0; i < cpuid->nent; ++i) {
28747+ struct kvm_cpuid_entry2 cpuid_entry;
28748+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28749+ goto out;
28750+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28751+ }
28752 vcpu->arch.cpuid_nent = cpuid->nent;
28753 kvm_apic_set_version(vcpu);
28754 kvm_x86_ops->cpuid_update(vcpu);
28755@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28756 struct kvm_cpuid2 *cpuid,
28757 struct kvm_cpuid_entry2 __user *entries)
28758 {
28759- int r;
28760+ int r, i;
28761
28762 r = -E2BIG;
28763 if (cpuid->nent < vcpu->arch.cpuid_nent)
28764 goto out;
28765 r = -EFAULT;
28766- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28767- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28768+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28769 goto out;
28770+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28771+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28772+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28773+ goto out;
28774+ }
28775 return 0;
28776
28777 out:
28778diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28779index b24c2d8..e1e4e259 100644
28780--- a/arch/x86/kvm/emulate.c
28781+++ b/arch/x86/kvm/emulate.c
28782@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28783 int cr = ctxt->modrm_reg;
28784 u64 efer = 0;
28785
28786- static u64 cr_reserved_bits[] = {
28787+ static const u64 cr_reserved_bits[] = {
28788 0xffffffff00000000ULL,
28789 0, 0, 0, /* CR3 checked later */
28790 CR4_RESERVED_BITS,
28791diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28792index d52dcf0..cec7e84 100644
28793--- a/arch/x86/kvm/lapic.c
28794+++ b/arch/x86/kvm/lapic.c
28795@@ -55,7 +55,7 @@
28796 #define APIC_BUS_CYCLE_NS 1
28797
28798 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28799-#define apic_debug(fmt, arg...)
28800+#define apic_debug(fmt, arg...) do {} while (0)
28801
28802 #define APIC_LVT_NUM 6
28803 /* 14 is the version for Xeon and Pentium 8.4.8*/
28804diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28805index fd49c86..77e1aa0 100644
28806--- a/arch/x86/kvm/paging_tmpl.h
28807+++ b/arch/x86/kvm/paging_tmpl.h
28808@@ -343,7 +343,7 @@ retry_walk:
28809 if (unlikely(kvm_is_error_hva(host_addr)))
28810 goto error;
28811
28812- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28813+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28814 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28815 goto error;
28816 walker->ptep_user[walker->level - 1] = ptep_user;
28817diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28818index 41dd038..de331cf 100644
28819--- a/arch/x86/kvm/svm.c
28820+++ b/arch/x86/kvm/svm.c
28821@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28822 int cpu = raw_smp_processor_id();
28823
28824 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28825+
28826+ pax_open_kernel();
28827 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28828+ pax_close_kernel();
28829+
28830 load_TR_desc();
28831 }
28832
28833@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28834 #endif
28835 #endif
28836
28837+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28838+ __set_fs(current_thread_info()->addr_limit);
28839+#endif
28840+
28841 reload_tss(vcpu);
28842
28843 local_irq_disable();
28844diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28845index d4c58d8..eaf2568 100644
28846--- a/arch/x86/kvm/vmx.c
28847+++ b/arch/x86/kvm/vmx.c
28848@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28849 #endif
28850 }
28851
28852-static void vmcs_clear_bits(unsigned long field, u32 mask)
28853+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28854 {
28855 vmcs_writel(field, vmcs_readl(field) & ~mask);
28856 }
28857
28858-static void vmcs_set_bits(unsigned long field, u32 mask)
28859+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28860 {
28861 vmcs_writel(field, vmcs_readl(field) | mask);
28862 }
28863@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28864 struct desc_struct *descs;
28865
28866 descs = (void *)gdt->address;
28867+
28868+ pax_open_kernel();
28869 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28870+ pax_close_kernel();
28871+
28872 load_TR_desc();
28873 }
28874
28875@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28876 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28877 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28878
28879+#ifdef CONFIG_PAX_PER_CPU_PGD
28880+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28881+#endif
28882+
28883 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28884 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28885 vmx->loaded_vmcs->cpu = cpu;
28886@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28887 * reads and returns guest's timestamp counter "register"
28888 * guest_tsc = host_tsc + tsc_offset -- 21.3
28889 */
28890-static u64 guest_read_tsc(void)
28891+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28892 {
28893 u64 host_tsc, tsc_offset;
28894
28895@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28896 unsigned long cr4;
28897
28898 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28899+
28900+#ifndef CONFIG_PAX_PER_CPU_PGD
28901 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28902+#endif
28903
28904 /* Save the most likely value for this task's CR4 in the VMCS. */
28905 cr4 = read_cr4();
28906@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28907 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28908 vmx->host_idt_base = dt.address;
28909
28910- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28911+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28912
28913 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28914 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28915@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28916 * page upon invalidation. No need to do anything if the
28917 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28918 */
28919- kvm_x86_ops->set_apic_access_page_addr = NULL;
28920+ pax_open_kernel();
28921+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28922+ pax_close_kernel();
28923 }
28924
28925- if (!cpu_has_vmx_tpr_shadow())
28926- kvm_x86_ops->update_cr8_intercept = NULL;
28927+ if (!cpu_has_vmx_tpr_shadow()) {
28928+ pax_open_kernel();
28929+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28930+ pax_close_kernel();
28931+ }
28932
28933 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28934 kvm_disable_largepages();
28935@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28936 if (!cpu_has_vmx_apicv())
28937 enable_apicv = 0;
28938
28939+ pax_open_kernel();
28940 if (enable_apicv)
28941- kvm_x86_ops->update_cr8_intercept = NULL;
28942+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28943 else {
28944- kvm_x86_ops->hwapic_irr_update = NULL;
28945- kvm_x86_ops->deliver_posted_interrupt = NULL;
28946- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28947+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28948+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28949+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28950 }
28951+ pax_close_kernel();
28952
28953 if (nested)
28954 nested_vmx_setup_ctls_msrs();
28955@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28956 "jmp 2f \n\t"
28957 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28958 "2: "
28959+
28960+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28961+ "ljmp %[cs],$3f\n\t"
28962+ "3: "
28963+#endif
28964+
28965 /* Save guest registers, load host registers, keep flags */
28966 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28967 "pop %0 \n\t"
28968@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28969 #endif
28970 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28971 [wordsize]"i"(sizeof(ulong))
28972+
28973+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28974+ ,[cs]"i"(__KERNEL_CS)
28975+#endif
28976+
28977 : "cc", "memory"
28978 #ifdef CONFIG_X86_64
28979 , "rax", "rbx", "rdi", "rsi"
28980@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28981 if (debugctlmsr)
28982 update_debugctlmsr(debugctlmsr);
28983
28984-#ifndef CONFIG_X86_64
28985+#ifdef CONFIG_X86_32
28986 /*
28987 * The sysexit path does not restore ds/es, so we must set them to
28988 * a reasonable value ourselves.
28989@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28990 * may be executed in interrupt context, which saves and restore segments
28991 * around it, nullifying its effect.
28992 */
28993- loadsegment(ds, __USER_DS);
28994- loadsegment(es, __USER_DS);
28995+ loadsegment(ds, __KERNEL_DS);
28996+ loadsegment(es, __KERNEL_DS);
28997+ loadsegment(ss, __KERNEL_DS);
28998+
28999+#ifdef CONFIG_PAX_KERNEXEC
29000+ loadsegment(fs, __KERNEL_PERCPU);
29001+#endif
29002+
29003+#ifdef CONFIG_PAX_MEMORY_UDEREF
29004+ __set_fs(current_thread_info()->addr_limit);
29005+#endif
29006+
29007 #endif
29008
29009 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29010diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29011index 64d76c1..e20a4c1 100644
29012--- a/arch/x86/kvm/x86.c
29013+++ b/arch/x86/kvm/x86.c
29014@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29015 {
29016 struct kvm *kvm = vcpu->kvm;
29017 int lm = is_long_mode(vcpu);
29018- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29019- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29020+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29021+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29022 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29023 : kvm->arch.xen_hvm_config.blob_size_32;
29024 u32 page_num = data & ~PAGE_MASK;
29025@@ -2809,6 +2809,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29026 if (n < msr_list.nmsrs)
29027 goto out;
29028 r = -EFAULT;
29029+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29030+ goto out;
29031 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29032 num_msrs_to_save * sizeof(u32)))
29033 goto out;
29034@@ -5745,7 +5747,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29035 };
29036 #endif
29037
29038-int kvm_arch_init(void *opaque)
29039+int kvm_arch_init(const void *opaque)
29040 {
29041 int r;
29042 struct kvm_x86_ops *ops = opaque;
29043diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29044index c1c1544..f90c9d5 100644
29045--- a/arch/x86/lguest/boot.c
29046+++ b/arch/x86/lguest/boot.c
29047@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29048 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29049 * Launcher to reboot us.
29050 */
29051-static void lguest_restart(char *reason)
29052+static __noreturn void lguest_restart(char *reason)
29053 {
29054 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29055+ BUG();
29056 }
29057
29058 /*G:050
29059diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29060index 00933d5..3a64af9 100644
29061--- a/arch/x86/lib/atomic64_386_32.S
29062+++ b/arch/x86/lib/atomic64_386_32.S
29063@@ -48,6 +48,10 @@ BEGIN(read)
29064 movl (v), %eax
29065 movl 4(v), %edx
29066 RET_ENDP
29067+BEGIN(read_unchecked)
29068+ movl (v), %eax
29069+ movl 4(v), %edx
29070+RET_ENDP
29071 #undef v
29072
29073 #define v %esi
29074@@ -55,6 +59,10 @@ BEGIN(set)
29075 movl %ebx, (v)
29076 movl %ecx, 4(v)
29077 RET_ENDP
29078+BEGIN(set_unchecked)
29079+ movl %ebx, (v)
29080+ movl %ecx, 4(v)
29081+RET_ENDP
29082 #undef v
29083
29084 #define v %esi
29085@@ -70,6 +78,20 @@ RET_ENDP
29086 BEGIN(add)
29087 addl %eax, (v)
29088 adcl %edx, 4(v)
29089+
29090+#ifdef CONFIG_PAX_REFCOUNT
29091+ jno 0f
29092+ subl %eax, (v)
29093+ sbbl %edx, 4(v)
29094+ int $4
29095+0:
29096+ _ASM_EXTABLE(0b, 0b)
29097+#endif
29098+
29099+RET_ENDP
29100+BEGIN(add_unchecked)
29101+ addl %eax, (v)
29102+ adcl %edx, 4(v)
29103 RET_ENDP
29104 #undef v
29105
29106@@ -77,6 +99,24 @@ RET_ENDP
29107 BEGIN(add_return)
29108 addl (v), %eax
29109 adcl 4(v), %edx
29110+
29111+#ifdef CONFIG_PAX_REFCOUNT
29112+ into
29113+1234:
29114+ _ASM_EXTABLE(1234b, 2f)
29115+#endif
29116+
29117+ movl %eax, (v)
29118+ movl %edx, 4(v)
29119+
29120+#ifdef CONFIG_PAX_REFCOUNT
29121+2:
29122+#endif
29123+
29124+RET_ENDP
29125+BEGIN(add_return_unchecked)
29126+ addl (v), %eax
29127+ adcl 4(v), %edx
29128 movl %eax, (v)
29129 movl %edx, 4(v)
29130 RET_ENDP
29131@@ -86,6 +126,20 @@ RET_ENDP
29132 BEGIN(sub)
29133 subl %eax, (v)
29134 sbbl %edx, 4(v)
29135+
29136+#ifdef CONFIG_PAX_REFCOUNT
29137+ jno 0f
29138+ addl %eax, (v)
29139+ adcl %edx, 4(v)
29140+ int $4
29141+0:
29142+ _ASM_EXTABLE(0b, 0b)
29143+#endif
29144+
29145+RET_ENDP
29146+BEGIN(sub_unchecked)
29147+ subl %eax, (v)
29148+ sbbl %edx, 4(v)
29149 RET_ENDP
29150 #undef v
29151
29152@@ -96,6 +150,27 @@ BEGIN(sub_return)
29153 sbbl $0, %edx
29154 addl (v), %eax
29155 adcl 4(v), %edx
29156+
29157+#ifdef CONFIG_PAX_REFCOUNT
29158+ into
29159+1234:
29160+ _ASM_EXTABLE(1234b, 2f)
29161+#endif
29162+
29163+ movl %eax, (v)
29164+ movl %edx, 4(v)
29165+
29166+#ifdef CONFIG_PAX_REFCOUNT
29167+2:
29168+#endif
29169+
29170+RET_ENDP
29171+BEGIN(sub_return_unchecked)
29172+ negl %edx
29173+ negl %eax
29174+ sbbl $0, %edx
29175+ addl (v), %eax
29176+ adcl 4(v), %edx
29177 movl %eax, (v)
29178 movl %edx, 4(v)
29179 RET_ENDP
29180@@ -105,6 +180,20 @@ RET_ENDP
29181 BEGIN(inc)
29182 addl $1, (v)
29183 adcl $0, 4(v)
29184+
29185+#ifdef CONFIG_PAX_REFCOUNT
29186+ jno 0f
29187+ subl $1, (v)
29188+ sbbl $0, 4(v)
29189+ int $4
29190+0:
29191+ _ASM_EXTABLE(0b, 0b)
29192+#endif
29193+
29194+RET_ENDP
29195+BEGIN(inc_unchecked)
29196+ addl $1, (v)
29197+ adcl $0, 4(v)
29198 RET_ENDP
29199 #undef v
29200
29201@@ -114,6 +203,26 @@ BEGIN(inc_return)
29202 movl 4(v), %edx
29203 addl $1, %eax
29204 adcl $0, %edx
29205+
29206+#ifdef CONFIG_PAX_REFCOUNT
29207+ into
29208+1234:
29209+ _ASM_EXTABLE(1234b, 2f)
29210+#endif
29211+
29212+ movl %eax, (v)
29213+ movl %edx, 4(v)
29214+
29215+#ifdef CONFIG_PAX_REFCOUNT
29216+2:
29217+#endif
29218+
29219+RET_ENDP
29220+BEGIN(inc_return_unchecked)
29221+ movl (v), %eax
29222+ movl 4(v), %edx
29223+ addl $1, %eax
29224+ adcl $0, %edx
29225 movl %eax, (v)
29226 movl %edx, 4(v)
29227 RET_ENDP
29228@@ -123,6 +232,20 @@ RET_ENDP
29229 BEGIN(dec)
29230 subl $1, (v)
29231 sbbl $0, 4(v)
29232+
29233+#ifdef CONFIG_PAX_REFCOUNT
29234+ jno 0f
29235+ addl $1, (v)
29236+ adcl $0, 4(v)
29237+ int $4
29238+0:
29239+ _ASM_EXTABLE(0b, 0b)
29240+#endif
29241+
29242+RET_ENDP
29243+BEGIN(dec_unchecked)
29244+ subl $1, (v)
29245+ sbbl $0, 4(v)
29246 RET_ENDP
29247 #undef v
29248
29249@@ -132,6 +255,26 @@ BEGIN(dec_return)
29250 movl 4(v), %edx
29251 subl $1, %eax
29252 sbbl $0, %edx
29253+
29254+#ifdef CONFIG_PAX_REFCOUNT
29255+ into
29256+1234:
29257+ _ASM_EXTABLE(1234b, 2f)
29258+#endif
29259+
29260+ movl %eax, (v)
29261+ movl %edx, 4(v)
29262+
29263+#ifdef CONFIG_PAX_REFCOUNT
29264+2:
29265+#endif
29266+
29267+RET_ENDP
29268+BEGIN(dec_return_unchecked)
29269+ movl (v), %eax
29270+ movl 4(v), %edx
29271+ subl $1, %eax
29272+ sbbl $0, %edx
29273 movl %eax, (v)
29274 movl %edx, 4(v)
29275 RET_ENDP
29276@@ -143,6 +286,13 @@ BEGIN(add_unless)
29277 adcl %edx, %edi
29278 addl (v), %eax
29279 adcl 4(v), %edx
29280+
29281+#ifdef CONFIG_PAX_REFCOUNT
29282+ into
29283+1234:
29284+ _ASM_EXTABLE(1234b, 2f)
29285+#endif
29286+
29287 cmpl %eax, %ecx
29288 je 3f
29289 1:
29290@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29291 1:
29292 addl $1, %eax
29293 adcl $0, %edx
29294+
29295+#ifdef CONFIG_PAX_REFCOUNT
29296+ into
29297+1234:
29298+ _ASM_EXTABLE(1234b, 2f)
29299+#endif
29300+
29301 movl %eax, (v)
29302 movl %edx, 4(v)
29303 movl $1, %eax
29304@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29305 movl 4(v), %edx
29306 subl $1, %eax
29307 sbbl $0, %edx
29308+
29309+#ifdef CONFIG_PAX_REFCOUNT
29310+ into
29311+1234:
29312+ _ASM_EXTABLE(1234b, 1f)
29313+#endif
29314+
29315 js 1f
29316 movl %eax, (v)
29317 movl %edx, 4(v)
29318diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29319index f5cc9eb..51fa319 100644
29320--- a/arch/x86/lib/atomic64_cx8_32.S
29321+++ b/arch/x86/lib/atomic64_cx8_32.S
29322@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29323 CFI_STARTPROC
29324
29325 read64 %ecx
29326+ pax_force_retaddr
29327 ret
29328 CFI_ENDPROC
29329 ENDPROC(atomic64_read_cx8)
29330
29331+ENTRY(atomic64_read_unchecked_cx8)
29332+ CFI_STARTPROC
29333+
29334+ read64 %ecx
29335+ pax_force_retaddr
29336+ ret
29337+ CFI_ENDPROC
29338+ENDPROC(atomic64_read_unchecked_cx8)
29339+
29340 ENTRY(atomic64_set_cx8)
29341 CFI_STARTPROC
29342
29343@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29344 cmpxchg8b (%esi)
29345 jne 1b
29346
29347+ pax_force_retaddr
29348 ret
29349 CFI_ENDPROC
29350 ENDPROC(atomic64_set_cx8)
29351
29352+ENTRY(atomic64_set_unchecked_cx8)
29353+ CFI_STARTPROC
29354+
29355+1:
29356+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29357+ * are atomic on 586 and newer */
29358+ cmpxchg8b (%esi)
29359+ jne 1b
29360+
29361+ pax_force_retaddr
29362+ ret
29363+ CFI_ENDPROC
29364+ENDPROC(atomic64_set_unchecked_cx8)
29365+
29366 ENTRY(atomic64_xchg_cx8)
29367 CFI_STARTPROC
29368
29369@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29370 cmpxchg8b (%esi)
29371 jne 1b
29372
29373+ pax_force_retaddr
29374 ret
29375 CFI_ENDPROC
29376 ENDPROC(atomic64_xchg_cx8)
29377
29378-.macro addsub_return func ins insc
29379-ENTRY(atomic64_\func\()_return_cx8)
29380+.macro addsub_return func ins insc unchecked=""
29381+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29382 CFI_STARTPROC
29383 SAVE ebp
29384 SAVE ebx
29385@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29386 movl %edx, %ecx
29387 \ins\()l %esi, %ebx
29388 \insc\()l %edi, %ecx
29389+
29390+.ifb \unchecked
29391+#ifdef CONFIG_PAX_REFCOUNT
29392+ into
29393+2:
29394+ _ASM_EXTABLE(2b, 3f)
29395+#endif
29396+.endif
29397+
29398 LOCK_PREFIX
29399 cmpxchg8b (%ebp)
29400 jne 1b
29401-
29402-10:
29403 movl %ebx, %eax
29404 movl %ecx, %edx
29405+
29406+.ifb \unchecked
29407+#ifdef CONFIG_PAX_REFCOUNT
29408+3:
29409+#endif
29410+.endif
29411+
29412 RESTORE edi
29413 RESTORE esi
29414 RESTORE ebx
29415 RESTORE ebp
29416+ pax_force_retaddr
29417 ret
29418 CFI_ENDPROC
29419-ENDPROC(atomic64_\func\()_return_cx8)
29420+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29421 .endm
29422
29423 addsub_return add add adc
29424 addsub_return sub sub sbb
29425+addsub_return add add adc _unchecked
29426+addsub_return sub sub sbb _unchecked
29427
29428-.macro incdec_return func ins insc
29429-ENTRY(atomic64_\func\()_return_cx8)
29430+.macro incdec_return func ins insc unchecked=""
29431+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29432 CFI_STARTPROC
29433 SAVE ebx
29434
29435@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29436 movl %edx, %ecx
29437 \ins\()l $1, %ebx
29438 \insc\()l $0, %ecx
29439+
29440+.ifb \unchecked
29441+#ifdef CONFIG_PAX_REFCOUNT
29442+ into
29443+2:
29444+ _ASM_EXTABLE(2b, 3f)
29445+#endif
29446+.endif
29447+
29448 LOCK_PREFIX
29449 cmpxchg8b (%esi)
29450 jne 1b
29451
29452-10:
29453 movl %ebx, %eax
29454 movl %ecx, %edx
29455+
29456+.ifb \unchecked
29457+#ifdef CONFIG_PAX_REFCOUNT
29458+3:
29459+#endif
29460+.endif
29461+
29462 RESTORE ebx
29463+ pax_force_retaddr
29464 ret
29465 CFI_ENDPROC
29466-ENDPROC(atomic64_\func\()_return_cx8)
29467+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29468 .endm
29469
29470 incdec_return inc add adc
29471 incdec_return dec sub sbb
29472+incdec_return inc add adc _unchecked
29473+incdec_return dec sub sbb _unchecked
29474
29475 ENTRY(atomic64_dec_if_positive_cx8)
29476 CFI_STARTPROC
29477@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29478 movl %edx, %ecx
29479 subl $1, %ebx
29480 sbb $0, %ecx
29481+
29482+#ifdef CONFIG_PAX_REFCOUNT
29483+ into
29484+1234:
29485+ _ASM_EXTABLE(1234b, 2f)
29486+#endif
29487+
29488 js 2f
29489 LOCK_PREFIX
29490 cmpxchg8b (%esi)
29491@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29492 movl %ebx, %eax
29493 movl %ecx, %edx
29494 RESTORE ebx
29495+ pax_force_retaddr
29496 ret
29497 CFI_ENDPROC
29498 ENDPROC(atomic64_dec_if_positive_cx8)
29499@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29500 movl %edx, %ecx
29501 addl %ebp, %ebx
29502 adcl %edi, %ecx
29503+
29504+#ifdef CONFIG_PAX_REFCOUNT
29505+ into
29506+1234:
29507+ _ASM_EXTABLE(1234b, 3f)
29508+#endif
29509+
29510 LOCK_PREFIX
29511 cmpxchg8b (%esi)
29512 jne 1b
29513@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29514 CFI_ADJUST_CFA_OFFSET -8
29515 RESTORE ebx
29516 RESTORE ebp
29517+ pax_force_retaddr
29518 ret
29519 4:
29520 cmpl %edx, 4(%esp)
29521@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29522 xorl %ecx, %ecx
29523 addl $1, %ebx
29524 adcl %edx, %ecx
29525+
29526+#ifdef CONFIG_PAX_REFCOUNT
29527+ into
29528+1234:
29529+ _ASM_EXTABLE(1234b, 3f)
29530+#endif
29531+
29532 LOCK_PREFIX
29533 cmpxchg8b (%esi)
29534 jne 1b
29535@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29536 movl $1, %eax
29537 3:
29538 RESTORE ebx
29539+ pax_force_retaddr
29540 ret
29541 CFI_ENDPROC
29542 ENDPROC(atomic64_inc_not_zero_cx8)
29543diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29544index e78b8eee..7e173a8 100644
29545--- a/arch/x86/lib/checksum_32.S
29546+++ b/arch/x86/lib/checksum_32.S
29547@@ -29,7 +29,8 @@
29548 #include <asm/dwarf2.h>
29549 #include <asm/errno.h>
29550 #include <asm/asm.h>
29551-
29552+#include <asm/segment.h>
29553+
29554 /*
29555 * computes a partial checksum, e.g. for TCP/UDP fragments
29556 */
29557@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29558
29559 #define ARGBASE 16
29560 #define FP 12
29561-
29562-ENTRY(csum_partial_copy_generic)
29563+
29564+ENTRY(csum_partial_copy_generic_to_user)
29565 CFI_STARTPROC
29566+
29567+#ifdef CONFIG_PAX_MEMORY_UDEREF
29568+ pushl_cfi %gs
29569+ popl_cfi %es
29570+ jmp csum_partial_copy_generic
29571+#endif
29572+
29573+ENTRY(csum_partial_copy_generic_from_user)
29574+
29575+#ifdef CONFIG_PAX_MEMORY_UDEREF
29576+ pushl_cfi %gs
29577+ popl_cfi %ds
29578+#endif
29579+
29580+ENTRY(csum_partial_copy_generic)
29581 subl $4,%esp
29582 CFI_ADJUST_CFA_OFFSET 4
29583 pushl_cfi %edi
29584@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29585 jmp 4f
29586 SRC(1: movw (%esi), %bx )
29587 addl $2, %esi
29588-DST( movw %bx, (%edi) )
29589+DST( movw %bx, %es:(%edi) )
29590 addl $2, %edi
29591 addw %bx, %ax
29592 adcl $0, %eax
29593@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29594 SRC(1: movl (%esi), %ebx )
29595 SRC( movl 4(%esi), %edx )
29596 adcl %ebx, %eax
29597-DST( movl %ebx, (%edi) )
29598+DST( movl %ebx, %es:(%edi) )
29599 adcl %edx, %eax
29600-DST( movl %edx, 4(%edi) )
29601+DST( movl %edx, %es:4(%edi) )
29602
29603 SRC( movl 8(%esi), %ebx )
29604 SRC( movl 12(%esi), %edx )
29605 adcl %ebx, %eax
29606-DST( movl %ebx, 8(%edi) )
29607+DST( movl %ebx, %es:8(%edi) )
29608 adcl %edx, %eax
29609-DST( movl %edx, 12(%edi) )
29610+DST( movl %edx, %es:12(%edi) )
29611
29612 SRC( movl 16(%esi), %ebx )
29613 SRC( movl 20(%esi), %edx )
29614 adcl %ebx, %eax
29615-DST( movl %ebx, 16(%edi) )
29616+DST( movl %ebx, %es:16(%edi) )
29617 adcl %edx, %eax
29618-DST( movl %edx, 20(%edi) )
29619+DST( movl %edx, %es:20(%edi) )
29620
29621 SRC( movl 24(%esi), %ebx )
29622 SRC( movl 28(%esi), %edx )
29623 adcl %ebx, %eax
29624-DST( movl %ebx, 24(%edi) )
29625+DST( movl %ebx, %es:24(%edi) )
29626 adcl %edx, %eax
29627-DST( movl %edx, 28(%edi) )
29628+DST( movl %edx, %es:28(%edi) )
29629
29630 lea 32(%esi), %esi
29631 lea 32(%edi), %edi
29632@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29633 shrl $2, %edx # This clears CF
29634 SRC(3: movl (%esi), %ebx )
29635 adcl %ebx, %eax
29636-DST( movl %ebx, (%edi) )
29637+DST( movl %ebx, %es:(%edi) )
29638 lea 4(%esi), %esi
29639 lea 4(%edi), %edi
29640 dec %edx
29641@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29642 jb 5f
29643 SRC( movw (%esi), %cx )
29644 leal 2(%esi), %esi
29645-DST( movw %cx, (%edi) )
29646+DST( movw %cx, %es:(%edi) )
29647 leal 2(%edi), %edi
29648 je 6f
29649 shll $16,%ecx
29650 SRC(5: movb (%esi), %cl )
29651-DST( movb %cl, (%edi) )
29652+DST( movb %cl, %es:(%edi) )
29653 6: addl %ecx, %eax
29654 adcl $0, %eax
29655 7:
29656@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29657
29658 6001:
29659 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29660- movl $-EFAULT, (%ebx)
29661+ movl $-EFAULT, %ss:(%ebx)
29662
29663 # zero the complete destination - computing the rest
29664 # is too much work
29665@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29666
29667 6002:
29668 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29669- movl $-EFAULT,(%ebx)
29670+ movl $-EFAULT,%ss:(%ebx)
29671 jmp 5000b
29672
29673 .previous
29674
29675+ pushl_cfi %ss
29676+ popl_cfi %ds
29677+ pushl_cfi %ss
29678+ popl_cfi %es
29679 popl_cfi %ebx
29680 CFI_RESTORE ebx
29681 popl_cfi %esi
29682@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29683 popl_cfi %ecx # equivalent to addl $4,%esp
29684 ret
29685 CFI_ENDPROC
29686-ENDPROC(csum_partial_copy_generic)
29687+ENDPROC(csum_partial_copy_generic_to_user)
29688
29689 #else
29690
29691 /* Version for PentiumII/PPro */
29692
29693 #define ROUND1(x) \
29694+ nop; nop; nop; \
29695 SRC(movl x(%esi), %ebx ) ; \
29696 addl %ebx, %eax ; \
29697- DST(movl %ebx, x(%edi) ) ;
29698+ DST(movl %ebx, %es:x(%edi)) ;
29699
29700 #define ROUND(x) \
29701+ nop; nop; nop; \
29702 SRC(movl x(%esi), %ebx ) ; \
29703 adcl %ebx, %eax ; \
29704- DST(movl %ebx, x(%edi) ) ;
29705+ DST(movl %ebx, %es:x(%edi)) ;
29706
29707 #define ARGBASE 12
29708-
29709-ENTRY(csum_partial_copy_generic)
29710+
29711+ENTRY(csum_partial_copy_generic_to_user)
29712 CFI_STARTPROC
29713+
29714+#ifdef CONFIG_PAX_MEMORY_UDEREF
29715+ pushl_cfi %gs
29716+ popl_cfi %es
29717+ jmp csum_partial_copy_generic
29718+#endif
29719+
29720+ENTRY(csum_partial_copy_generic_from_user)
29721+
29722+#ifdef CONFIG_PAX_MEMORY_UDEREF
29723+ pushl_cfi %gs
29724+ popl_cfi %ds
29725+#endif
29726+
29727+ENTRY(csum_partial_copy_generic)
29728 pushl_cfi %ebx
29729 CFI_REL_OFFSET ebx, 0
29730 pushl_cfi %edi
29731@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29732 subl %ebx, %edi
29733 lea -1(%esi),%edx
29734 andl $-32,%edx
29735- lea 3f(%ebx,%ebx), %ebx
29736+ lea 3f(%ebx,%ebx,2), %ebx
29737 testl %esi, %esi
29738 jmp *%ebx
29739 1: addl $64,%esi
29740@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29741 jb 5f
29742 SRC( movw (%esi), %dx )
29743 leal 2(%esi), %esi
29744-DST( movw %dx, (%edi) )
29745+DST( movw %dx, %es:(%edi) )
29746 leal 2(%edi), %edi
29747 je 6f
29748 shll $16,%edx
29749 5:
29750 SRC( movb (%esi), %dl )
29751-DST( movb %dl, (%edi) )
29752+DST( movb %dl, %es:(%edi) )
29753 6: addl %edx, %eax
29754 adcl $0, %eax
29755 7:
29756 .section .fixup, "ax"
29757 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29758- movl $-EFAULT, (%ebx)
29759+ movl $-EFAULT, %ss:(%ebx)
29760 # zero the complete destination (computing the rest is too much work)
29761 movl ARGBASE+8(%esp),%edi # dst
29762 movl ARGBASE+12(%esp),%ecx # len
29763@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29764 rep; stosb
29765 jmp 7b
29766 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29767- movl $-EFAULT, (%ebx)
29768+ movl $-EFAULT, %ss:(%ebx)
29769 jmp 7b
29770 .previous
29771
29772+#ifdef CONFIG_PAX_MEMORY_UDEREF
29773+ pushl_cfi %ss
29774+ popl_cfi %ds
29775+ pushl_cfi %ss
29776+ popl_cfi %es
29777+#endif
29778+
29779 popl_cfi %esi
29780 CFI_RESTORE esi
29781 popl_cfi %edi
29782@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29783 CFI_RESTORE ebx
29784 ret
29785 CFI_ENDPROC
29786-ENDPROC(csum_partial_copy_generic)
29787+ENDPROC(csum_partial_copy_generic_to_user)
29788
29789 #undef ROUND
29790 #undef ROUND1
29791diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29792index f2145cf..cea889d 100644
29793--- a/arch/x86/lib/clear_page_64.S
29794+++ b/arch/x86/lib/clear_page_64.S
29795@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29796 movl $4096/8,%ecx
29797 xorl %eax,%eax
29798 rep stosq
29799+ pax_force_retaddr
29800 ret
29801 CFI_ENDPROC
29802 ENDPROC(clear_page_c)
29803@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29804 movl $4096,%ecx
29805 xorl %eax,%eax
29806 rep stosb
29807+ pax_force_retaddr
29808 ret
29809 CFI_ENDPROC
29810 ENDPROC(clear_page_c_e)
29811@@ -43,6 +45,7 @@ ENTRY(clear_page)
29812 leaq 64(%rdi),%rdi
29813 jnz .Lloop
29814 nop
29815+ pax_force_retaddr
29816 ret
29817 CFI_ENDPROC
29818 .Lclear_page_end:
29819@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29820
29821 #include <asm/cpufeature.h>
29822
29823- .section .altinstr_replacement,"ax"
29824+ .section .altinstr_replacement,"a"
29825 1: .byte 0xeb /* jmp <disp8> */
29826 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29827 2: .byte 0xeb /* jmp <disp8> */
29828diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29829index 40a1725..5d12ac4 100644
29830--- a/arch/x86/lib/cmpxchg16b_emu.S
29831+++ b/arch/x86/lib/cmpxchg16b_emu.S
29832@@ -8,6 +8,7 @@
29833 #include <linux/linkage.h>
29834 #include <asm/dwarf2.h>
29835 #include <asm/percpu.h>
29836+#include <asm/alternative-asm.h>
29837
29838 .text
29839
29840@@ -46,12 +47,14 @@ CFI_STARTPROC
29841 CFI_REMEMBER_STATE
29842 popfq_cfi
29843 mov $1, %al
29844+ pax_force_retaddr
29845 ret
29846
29847 CFI_RESTORE_STATE
29848 .Lnot_same:
29849 popfq_cfi
29850 xor %al,%al
29851+ pax_force_retaddr
29852 ret
29853
29854 CFI_ENDPROC
29855diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29856index 176cca6..e0d658e 100644
29857--- a/arch/x86/lib/copy_page_64.S
29858+++ b/arch/x86/lib/copy_page_64.S
29859@@ -9,6 +9,7 @@ copy_page_rep:
29860 CFI_STARTPROC
29861 movl $4096/8, %ecx
29862 rep movsq
29863+ pax_force_retaddr
29864 ret
29865 CFI_ENDPROC
29866 ENDPROC(copy_page_rep)
29867@@ -24,8 +25,8 @@ ENTRY(copy_page)
29868 CFI_ADJUST_CFA_OFFSET 2*8
29869 movq %rbx, (%rsp)
29870 CFI_REL_OFFSET rbx, 0
29871- movq %r12, 1*8(%rsp)
29872- CFI_REL_OFFSET r12, 1*8
29873+ movq %r13, 1*8(%rsp)
29874+ CFI_REL_OFFSET r13, 1*8
29875
29876 movl $(4096/64)-5, %ecx
29877 .p2align 4
29878@@ -38,7 +39,7 @@ ENTRY(copy_page)
29879 movq 0x8*4(%rsi), %r9
29880 movq 0x8*5(%rsi), %r10
29881 movq 0x8*6(%rsi), %r11
29882- movq 0x8*7(%rsi), %r12
29883+ movq 0x8*7(%rsi), %r13
29884
29885 prefetcht0 5*64(%rsi)
29886
29887@@ -49,7 +50,7 @@ ENTRY(copy_page)
29888 movq %r9, 0x8*4(%rdi)
29889 movq %r10, 0x8*5(%rdi)
29890 movq %r11, 0x8*6(%rdi)
29891- movq %r12, 0x8*7(%rdi)
29892+ movq %r13, 0x8*7(%rdi)
29893
29894 leaq 64 (%rsi), %rsi
29895 leaq 64 (%rdi), %rdi
29896@@ -68,7 +69,7 @@ ENTRY(copy_page)
29897 movq 0x8*4(%rsi), %r9
29898 movq 0x8*5(%rsi), %r10
29899 movq 0x8*6(%rsi), %r11
29900- movq 0x8*7(%rsi), %r12
29901+ movq 0x8*7(%rsi), %r13
29902
29903 movq %rax, 0x8*0(%rdi)
29904 movq %rbx, 0x8*1(%rdi)
29905@@ -77,7 +78,7 @@ ENTRY(copy_page)
29906 movq %r9, 0x8*4(%rdi)
29907 movq %r10, 0x8*5(%rdi)
29908 movq %r11, 0x8*6(%rdi)
29909- movq %r12, 0x8*7(%rdi)
29910+ movq %r13, 0x8*7(%rdi)
29911
29912 leaq 64(%rdi), %rdi
29913 leaq 64(%rsi), %rsi
29914@@ -85,10 +86,11 @@ ENTRY(copy_page)
29915
29916 movq (%rsp), %rbx
29917 CFI_RESTORE rbx
29918- movq 1*8(%rsp), %r12
29919- CFI_RESTORE r12
29920+ movq 1*8(%rsp), %r13
29921+ CFI_RESTORE r13
29922 addq $2*8, %rsp
29923 CFI_ADJUST_CFA_OFFSET -2*8
29924+ pax_force_retaddr
29925 ret
29926 .Lcopy_page_end:
29927 CFI_ENDPROC
29928@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29929
29930 #include <asm/cpufeature.h>
29931
29932- .section .altinstr_replacement,"ax"
29933+ .section .altinstr_replacement,"a"
29934 1: .byte 0xeb /* jmp <disp8> */
29935 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29936 2:
29937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29938index dee945d..a84067b 100644
29939--- a/arch/x86/lib/copy_user_64.S
29940+++ b/arch/x86/lib/copy_user_64.S
29941@@ -18,31 +18,7 @@
29942 #include <asm/alternative-asm.h>
29943 #include <asm/asm.h>
29944 #include <asm/smap.h>
29945-
29946-/*
29947- * By placing feature2 after feature1 in altinstructions section, we logically
29948- * implement:
29949- * If CPU has feature2, jmp to alt2 is used
29950- * else if CPU has feature1, jmp to alt1 is used
29951- * else jmp to orig is used.
29952- */
29953- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29954-0:
29955- .byte 0xe9 /* 32bit jump */
29956- .long \orig-1f /* by default jump to orig */
29957-1:
29958- .section .altinstr_replacement,"ax"
29959-2: .byte 0xe9 /* near jump with 32bit immediate */
29960- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29961-3: .byte 0xe9 /* near jump with 32bit immediate */
29962- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29963- .previous
29964-
29965- .section .altinstructions,"a"
29966- altinstruction_entry 0b,2b,\feature1,5,5
29967- altinstruction_entry 0b,3b,\feature2,5,5
29968- .previous
29969- .endm
29970+#include <asm/pgtable.h>
29971
29972 .macro ALIGN_DESTINATION
29973 #ifdef FIX_ALIGNMENT
29974@@ -70,52 +46,6 @@
29975 #endif
29976 .endm
29977
29978-/* Standard copy_to_user with segment limit checking */
29979-ENTRY(_copy_to_user)
29980- CFI_STARTPROC
29981- GET_THREAD_INFO(%rax)
29982- movq %rdi,%rcx
29983- addq %rdx,%rcx
29984- jc bad_to_user
29985- cmpq TI_addr_limit(%rax),%rcx
29986- ja bad_to_user
29987- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29988- copy_user_generic_unrolled,copy_user_generic_string, \
29989- copy_user_enhanced_fast_string
29990- CFI_ENDPROC
29991-ENDPROC(_copy_to_user)
29992-
29993-/* Standard copy_from_user with segment limit checking */
29994-ENTRY(_copy_from_user)
29995- CFI_STARTPROC
29996- GET_THREAD_INFO(%rax)
29997- movq %rsi,%rcx
29998- addq %rdx,%rcx
29999- jc bad_from_user
30000- cmpq TI_addr_limit(%rax),%rcx
30001- ja bad_from_user
30002- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30003- copy_user_generic_unrolled,copy_user_generic_string, \
30004- copy_user_enhanced_fast_string
30005- CFI_ENDPROC
30006-ENDPROC(_copy_from_user)
30007-
30008- .section .fixup,"ax"
30009- /* must zero dest */
30010-ENTRY(bad_from_user)
30011-bad_from_user:
30012- CFI_STARTPROC
30013- movl %edx,%ecx
30014- xorl %eax,%eax
30015- rep
30016- stosb
30017-bad_to_user:
30018- movl %edx,%eax
30019- ret
30020- CFI_ENDPROC
30021-ENDPROC(bad_from_user)
30022- .previous
30023-
30024 /*
30025 * copy_user_generic_unrolled - memory copy with exception handling.
30026 * This version is for CPUs like P4 that don't have efficient micro
30027@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30028 */
30029 ENTRY(copy_user_generic_unrolled)
30030 CFI_STARTPROC
30031+ ASM_PAX_OPEN_USERLAND
30032 ASM_STAC
30033 cmpl $8,%edx
30034 jb 20f /* less then 8 bytes, go to byte copy loop */
30035@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30036 jnz 21b
30037 23: xor %eax,%eax
30038 ASM_CLAC
30039+ ASM_PAX_CLOSE_USERLAND
30040+ pax_force_retaddr
30041 ret
30042
30043 .section .fixup,"ax"
30044@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30045 */
30046 ENTRY(copy_user_generic_string)
30047 CFI_STARTPROC
30048+ ASM_PAX_OPEN_USERLAND
30049 ASM_STAC
30050 cmpl $8,%edx
30051 jb 2f /* less than 8 bytes, go to byte copy loop */
30052@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30053 movsb
30054 xorl %eax,%eax
30055 ASM_CLAC
30056+ ASM_PAX_CLOSE_USERLAND
30057+ pax_force_retaddr
30058 ret
30059
30060 .section .fixup,"ax"
30061@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30062 */
30063 ENTRY(copy_user_enhanced_fast_string)
30064 CFI_STARTPROC
30065+ ASM_PAX_OPEN_USERLAND
30066 ASM_STAC
30067 movl %edx,%ecx
30068 1: rep
30069 movsb
30070 xorl %eax,%eax
30071 ASM_CLAC
30072+ ASM_PAX_CLOSE_USERLAND
30073+ pax_force_retaddr
30074 ret
30075
30076 .section .fixup,"ax"
30077diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30078index 6a4f43c..c70fb52 100644
30079--- a/arch/x86/lib/copy_user_nocache_64.S
30080+++ b/arch/x86/lib/copy_user_nocache_64.S
30081@@ -8,6 +8,7 @@
30082
30083 #include <linux/linkage.h>
30084 #include <asm/dwarf2.h>
30085+#include <asm/alternative-asm.h>
30086
30087 #define FIX_ALIGNMENT 1
30088
30089@@ -16,6 +17,7 @@
30090 #include <asm/thread_info.h>
30091 #include <asm/asm.h>
30092 #include <asm/smap.h>
30093+#include <asm/pgtable.h>
30094
30095 .macro ALIGN_DESTINATION
30096 #ifdef FIX_ALIGNMENT
30097@@ -49,6 +51,16 @@
30098 */
30099 ENTRY(__copy_user_nocache)
30100 CFI_STARTPROC
30101+
30102+#ifdef CONFIG_PAX_MEMORY_UDEREF
30103+ mov pax_user_shadow_base,%rcx
30104+ cmp %rcx,%rsi
30105+ jae 1f
30106+ add %rcx,%rsi
30107+1:
30108+#endif
30109+
30110+ ASM_PAX_OPEN_USERLAND
30111 ASM_STAC
30112 cmpl $8,%edx
30113 jb 20f /* less then 8 bytes, go to byte copy loop */
30114@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30115 jnz 21b
30116 23: xorl %eax,%eax
30117 ASM_CLAC
30118+ ASM_PAX_CLOSE_USERLAND
30119 sfence
30120+ pax_force_retaddr
30121 ret
30122
30123 .section .fixup,"ax"
30124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30125index 2419d5f..fe52d0e 100644
30126--- a/arch/x86/lib/csum-copy_64.S
30127+++ b/arch/x86/lib/csum-copy_64.S
30128@@ -9,6 +9,7 @@
30129 #include <asm/dwarf2.h>
30130 #include <asm/errno.h>
30131 #include <asm/asm.h>
30132+#include <asm/alternative-asm.h>
30133
30134 /*
30135 * Checksum copy with exception handling.
30136@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30137 CFI_ADJUST_CFA_OFFSET 7*8
30138 movq %rbx, 2*8(%rsp)
30139 CFI_REL_OFFSET rbx, 2*8
30140- movq %r12, 3*8(%rsp)
30141- CFI_REL_OFFSET r12, 3*8
30142+ movq %r15, 3*8(%rsp)
30143+ CFI_REL_OFFSET r15, 3*8
30144 movq %r14, 4*8(%rsp)
30145 CFI_REL_OFFSET r14, 4*8
30146 movq %r13, 5*8(%rsp)
30147@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30148 movl %edx, %ecx
30149
30150 xorl %r9d, %r9d
30151- movq %rcx, %r12
30152+ movq %rcx, %r15
30153
30154- shrq $6, %r12
30155+ shrq $6, %r15
30156 jz .Lhandle_tail /* < 64 */
30157
30158 clc
30159
30160 /* main loop. clear in 64 byte blocks */
30161 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30162- /* r11: temp3, rdx: temp4, r12 loopcnt */
30163+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30164 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30165 .p2align 4
30166 .Lloop:
30167@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30168 adcq %r14, %rax
30169 adcq %r13, %rax
30170
30171- decl %r12d
30172+ decl %r15d
30173
30174 dest
30175 movq %rbx, (%rsi)
30176@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30177 .Lende:
30178 movq 2*8(%rsp), %rbx
30179 CFI_RESTORE rbx
30180- movq 3*8(%rsp), %r12
30181- CFI_RESTORE r12
30182+ movq 3*8(%rsp), %r15
30183+ CFI_RESTORE r15
30184 movq 4*8(%rsp), %r14
30185 CFI_RESTORE r14
30186 movq 5*8(%rsp), %r13
30187@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30188 CFI_RESTORE rbp
30189 addq $7*8, %rsp
30190 CFI_ADJUST_CFA_OFFSET -7*8
30191+ pax_force_retaddr
30192 ret
30193 CFI_RESTORE_STATE
30194
30195diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30196index 1318f75..44c30fd 100644
30197--- a/arch/x86/lib/csum-wrappers_64.c
30198+++ b/arch/x86/lib/csum-wrappers_64.c
30199@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30200 len -= 2;
30201 }
30202 }
30203+ pax_open_userland();
30204 stac();
30205- isum = csum_partial_copy_generic((__force const void *)src,
30206+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30207 dst, len, isum, errp, NULL);
30208 clac();
30209+ pax_close_userland();
30210 if (unlikely(*errp))
30211 goto out_err;
30212
30213@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30214 }
30215
30216 *errp = 0;
30217+ pax_open_userland();
30218 stac();
30219- ret = csum_partial_copy_generic(src, (void __force *)dst,
30220+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30221 len, isum, NULL, errp);
30222 clac();
30223+ pax_close_userland();
30224 return ret;
30225 }
30226 EXPORT_SYMBOL(csum_partial_copy_to_user);
30227diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30228index a451235..a74bfa3 100644
30229--- a/arch/x86/lib/getuser.S
30230+++ b/arch/x86/lib/getuser.S
30231@@ -33,17 +33,40 @@
30232 #include <asm/thread_info.h>
30233 #include <asm/asm.h>
30234 #include <asm/smap.h>
30235+#include <asm/segment.h>
30236+#include <asm/pgtable.h>
30237+#include <asm/alternative-asm.h>
30238+
30239+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30240+#define __copyuser_seg gs;
30241+#else
30242+#define __copyuser_seg
30243+#endif
30244
30245 .text
30246 ENTRY(__get_user_1)
30247 CFI_STARTPROC
30248+
30249+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30250 GET_THREAD_INFO(%_ASM_DX)
30251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30252 jae bad_get_user
30253+
30254+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30255+ mov pax_user_shadow_base,%_ASM_DX
30256+ cmp %_ASM_DX,%_ASM_AX
30257+ jae 1234f
30258+ add %_ASM_DX,%_ASM_AX
30259+1234:
30260+#endif
30261+
30262+#endif
30263+
30264 ASM_STAC
30265-1: movzbl (%_ASM_AX),%edx
30266+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30267 xor %eax,%eax
30268 ASM_CLAC
30269+ pax_force_retaddr
30270 ret
30271 CFI_ENDPROC
30272 ENDPROC(__get_user_1)
30273@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30274 ENTRY(__get_user_2)
30275 CFI_STARTPROC
30276 add $1,%_ASM_AX
30277+
30278+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30279 jc bad_get_user
30280 GET_THREAD_INFO(%_ASM_DX)
30281 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30282 jae bad_get_user
30283+
30284+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30285+ mov pax_user_shadow_base,%_ASM_DX
30286+ cmp %_ASM_DX,%_ASM_AX
30287+ jae 1234f
30288+ add %_ASM_DX,%_ASM_AX
30289+1234:
30290+#endif
30291+
30292+#endif
30293+
30294 ASM_STAC
30295-2: movzwl -1(%_ASM_AX),%edx
30296+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30297 xor %eax,%eax
30298 ASM_CLAC
30299+ pax_force_retaddr
30300 ret
30301 CFI_ENDPROC
30302 ENDPROC(__get_user_2)
30303@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30304 ENTRY(__get_user_4)
30305 CFI_STARTPROC
30306 add $3,%_ASM_AX
30307+
30308+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30309 jc bad_get_user
30310 GET_THREAD_INFO(%_ASM_DX)
30311 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30312 jae bad_get_user
30313+
30314+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30315+ mov pax_user_shadow_base,%_ASM_DX
30316+ cmp %_ASM_DX,%_ASM_AX
30317+ jae 1234f
30318+ add %_ASM_DX,%_ASM_AX
30319+1234:
30320+#endif
30321+
30322+#endif
30323+
30324 ASM_STAC
30325-3: movl -3(%_ASM_AX),%edx
30326+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30327 xor %eax,%eax
30328 ASM_CLAC
30329+ pax_force_retaddr
30330 ret
30331 CFI_ENDPROC
30332 ENDPROC(__get_user_4)
30333@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30334 GET_THREAD_INFO(%_ASM_DX)
30335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30336 jae bad_get_user
30337+
30338+#ifdef CONFIG_PAX_MEMORY_UDEREF
30339+ mov pax_user_shadow_base,%_ASM_DX
30340+ cmp %_ASM_DX,%_ASM_AX
30341+ jae 1234f
30342+ add %_ASM_DX,%_ASM_AX
30343+1234:
30344+#endif
30345+
30346 ASM_STAC
30347 4: movq -7(%_ASM_AX),%rdx
30348 xor %eax,%eax
30349 ASM_CLAC
30350+ pax_force_retaddr
30351 ret
30352 #else
30353 add $7,%_ASM_AX
30354@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30355 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30356 jae bad_get_user_8
30357 ASM_STAC
30358-4: movl -7(%_ASM_AX),%edx
30359-5: movl -3(%_ASM_AX),%ecx
30360+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30361+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30362 xor %eax,%eax
30363 ASM_CLAC
30364+ pax_force_retaddr
30365 ret
30366 #endif
30367 CFI_ENDPROC
30368@@ -113,6 +175,7 @@ bad_get_user:
30369 xor %edx,%edx
30370 mov $(-EFAULT),%_ASM_AX
30371 ASM_CLAC
30372+ pax_force_retaddr
30373 ret
30374 CFI_ENDPROC
30375 END(bad_get_user)
30376@@ -124,6 +187,7 @@ bad_get_user_8:
30377 xor %ecx,%ecx
30378 mov $(-EFAULT),%_ASM_AX
30379 ASM_CLAC
30380+ pax_force_retaddr
30381 ret
30382 CFI_ENDPROC
30383 END(bad_get_user_8)
30384diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30385index 1313ae6..84f25ea 100644
30386--- a/arch/x86/lib/insn.c
30387+++ b/arch/x86/lib/insn.c
30388@@ -20,8 +20,10 @@
30389
30390 #ifdef __KERNEL__
30391 #include <linux/string.h>
30392+#include <asm/pgtable_types.h>
30393 #else
30394 #include <string.h>
30395+#define ktla_ktva(addr) addr
30396 #endif
30397 #include <asm/inat.h>
30398 #include <asm/insn.h>
30399@@ -53,9 +55,9 @@
30400 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30401 {
30402 memset(insn, 0, sizeof(*insn));
30403- insn->kaddr = kaddr;
30404- insn->end_kaddr = kaddr + buf_len;
30405- insn->next_byte = kaddr;
30406+ insn->kaddr = ktla_ktva(kaddr);
30407+ insn->end_kaddr = insn->kaddr + buf_len;
30408+ insn->next_byte = insn->kaddr;
30409 insn->x86_64 = x86_64 ? 1 : 0;
30410 insn->opnd_bytes = 4;
30411 if (x86_64)
30412diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30413index 05a95e7..326f2fa 100644
30414--- a/arch/x86/lib/iomap_copy_64.S
30415+++ b/arch/x86/lib/iomap_copy_64.S
30416@@ -17,6 +17,7 @@
30417
30418 #include <linux/linkage.h>
30419 #include <asm/dwarf2.h>
30420+#include <asm/alternative-asm.h>
30421
30422 /*
30423 * override generic version in lib/iomap_copy.c
30424@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30425 CFI_STARTPROC
30426 movl %edx,%ecx
30427 rep movsd
30428+ pax_force_retaddr
30429 ret
30430 CFI_ENDPROC
30431 ENDPROC(__iowrite32_copy)
30432diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30433index 56313a3..0db417e 100644
30434--- a/arch/x86/lib/memcpy_64.S
30435+++ b/arch/x86/lib/memcpy_64.S
30436@@ -24,7 +24,7 @@
30437 * This gets patched over the unrolled variant (below) via the
30438 * alternative instructions framework:
30439 */
30440- .section .altinstr_replacement, "ax", @progbits
30441+ .section .altinstr_replacement, "a", @progbits
30442 .Lmemcpy_c:
30443 movq %rdi, %rax
30444 movq %rdx, %rcx
30445@@ -33,6 +33,7 @@
30446 rep movsq
30447 movl %edx, %ecx
30448 rep movsb
30449+ pax_force_retaddr
30450 ret
30451 .Lmemcpy_e:
30452 .previous
30453@@ -44,11 +45,12 @@
30454 * This gets patched over the unrolled variant (below) via the
30455 * alternative instructions framework:
30456 */
30457- .section .altinstr_replacement, "ax", @progbits
30458+ .section .altinstr_replacement, "a", @progbits
30459 .Lmemcpy_c_e:
30460 movq %rdi, %rax
30461 movq %rdx, %rcx
30462 rep movsb
30463+ pax_force_retaddr
30464 ret
30465 .Lmemcpy_e_e:
30466 .previous
30467@@ -136,6 +138,7 @@ ENTRY(memcpy)
30468 movq %r9, 1*8(%rdi)
30469 movq %r10, -2*8(%rdi, %rdx)
30470 movq %r11, -1*8(%rdi, %rdx)
30471+ pax_force_retaddr
30472 retq
30473 .p2align 4
30474 .Lless_16bytes:
30475@@ -148,6 +151,7 @@ ENTRY(memcpy)
30476 movq -1*8(%rsi, %rdx), %r9
30477 movq %r8, 0*8(%rdi)
30478 movq %r9, -1*8(%rdi, %rdx)
30479+ pax_force_retaddr
30480 retq
30481 .p2align 4
30482 .Lless_8bytes:
30483@@ -161,6 +165,7 @@ ENTRY(memcpy)
30484 movl -4(%rsi, %rdx), %r8d
30485 movl %ecx, (%rdi)
30486 movl %r8d, -4(%rdi, %rdx)
30487+ pax_force_retaddr
30488 retq
30489 .p2align 4
30490 .Lless_3bytes:
30491@@ -179,6 +184,7 @@ ENTRY(memcpy)
30492 movb %cl, (%rdi)
30493
30494 .Lend:
30495+ pax_force_retaddr
30496 retq
30497 CFI_ENDPROC
30498 ENDPROC(memcpy)
30499diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30500index 65268a6..dd1de11 100644
30501--- a/arch/x86/lib/memmove_64.S
30502+++ b/arch/x86/lib/memmove_64.S
30503@@ -202,14 +202,16 @@ ENTRY(memmove)
30504 movb (%rsi), %r11b
30505 movb %r11b, (%rdi)
30506 13:
30507+ pax_force_retaddr
30508 retq
30509 CFI_ENDPROC
30510
30511- .section .altinstr_replacement,"ax"
30512+ .section .altinstr_replacement,"a"
30513 .Lmemmove_begin_forward_efs:
30514 /* Forward moving data. */
30515 movq %rdx, %rcx
30516 rep movsb
30517+ pax_force_retaddr
30518 retq
30519 .Lmemmove_end_forward_efs:
30520 .previous
30521diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30522index 2dcb380..2eb79fe 100644
30523--- a/arch/x86/lib/memset_64.S
30524+++ b/arch/x86/lib/memset_64.S
30525@@ -16,7 +16,7 @@
30526 *
30527 * rax original destination
30528 */
30529- .section .altinstr_replacement, "ax", @progbits
30530+ .section .altinstr_replacement, "a", @progbits
30531 .Lmemset_c:
30532 movq %rdi,%r9
30533 movq %rdx,%rcx
30534@@ -30,6 +30,7 @@
30535 movl %edx,%ecx
30536 rep stosb
30537 movq %r9,%rax
30538+ pax_force_retaddr
30539 ret
30540 .Lmemset_e:
30541 .previous
30542@@ -45,13 +46,14 @@
30543 *
30544 * rax original destination
30545 */
30546- .section .altinstr_replacement, "ax", @progbits
30547+ .section .altinstr_replacement, "a", @progbits
30548 .Lmemset_c_e:
30549 movq %rdi,%r9
30550 movb %sil,%al
30551 movq %rdx,%rcx
30552 rep stosb
30553 movq %r9,%rax
30554+ pax_force_retaddr
30555 ret
30556 .Lmemset_e_e:
30557 .previous
30558@@ -118,6 +120,7 @@ ENTRY(__memset)
30559
30560 .Lende:
30561 movq %r10,%rax
30562+ pax_force_retaddr
30563 ret
30564
30565 CFI_RESTORE_STATE
30566diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30567index c9f2d9b..e7fd2c0 100644
30568--- a/arch/x86/lib/mmx_32.c
30569+++ b/arch/x86/lib/mmx_32.c
30570@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30571 {
30572 void *p;
30573 int i;
30574+ unsigned long cr0;
30575
30576 if (unlikely(in_interrupt()))
30577 return __memcpy(to, from, len);
30578@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30579 kernel_fpu_begin();
30580
30581 __asm__ __volatile__ (
30582- "1: prefetch (%0)\n" /* This set is 28 bytes */
30583- " prefetch 64(%0)\n"
30584- " prefetch 128(%0)\n"
30585- " prefetch 192(%0)\n"
30586- " prefetch 256(%0)\n"
30587+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30588+ " prefetch 64(%1)\n"
30589+ " prefetch 128(%1)\n"
30590+ " prefetch 192(%1)\n"
30591+ " prefetch 256(%1)\n"
30592 "2: \n"
30593 ".section .fixup, \"ax\"\n"
30594- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30595+ "3: \n"
30596+
30597+#ifdef CONFIG_PAX_KERNEXEC
30598+ " movl %%cr0, %0\n"
30599+ " movl %0, %%eax\n"
30600+ " andl $0xFFFEFFFF, %%eax\n"
30601+ " movl %%eax, %%cr0\n"
30602+#endif
30603+
30604+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30605+
30606+#ifdef CONFIG_PAX_KERNEXEC
30607+ " movl %0, %%cr0\n"
30608+#endif
30609+
30610 " jmp 2b\n"
30611 ".previous\n"
30612 _ASM_EXTABLE(1b, 3b)
30613- : : "r" (from));
30614+ : "=&r" (cr0) : "r" (from) : "ax");
30615
30616 for ( ; i > 5; i--) {
30617 __asm__ __volatile__ (
30618- "1: prefetch 320(%0)\n"
30619- "2: movq (%0), %%mm0\n"
30620- " movq 8(%0), %%mm1\n"
30621- " movq 16(%0), %%mm2\n"
30622- " movq 24(%0), %%mm3\n"
30623- " movq %%mm0, (%1)\n"
30624- " movq %%mm1, 8(%1)\n"
30625- " movq %%mm2, 16(%1)\n"
30626- " movq %%mm3, 24(%1)\n"
30627- " movq 32(%0), %%mm0\n"
30628- " movq 40(%0), %%mm1\n"
30629- " movq 48(%0), %%mm2\n"
30630- " movq 56(%0), %%mm3\n"
30631- " movq %%mm0, 32(%1)\n"
30632- " movq %%mm1, 40(%1)\n"
30633- " movq %%mm2, 48(%1)\n"
30634- " movq %%mm3, 56(%1)\n"
30635+ "1: prefetch 320(%1)\n"
30636+ "2: movq (%1), %%mm0\n"
30637+ " movq 8(%1), %%mm1\n"
30638+ " movq 16(%1), %%mm2\n"
30639+ " movq 24(%1), %%mm3\n"
30640+ " movq %%mm0, (%2)\n"
30641+ " movq %%mm1, 8(%2)\n"
30642+ " movq %%mm2, 16(%2)\n"
30643+ " movq %%mm3, 24(%2)\n"
30644+ " movq 32(%1), %%mm0\n"
30645+ " movq 40(%1), %%mm1\n"
30646+ " movq 48(%1), %%mm2\n"
30647+ " movq 56(%1), %%mm3\n"
30648+ " movq %%mm0, 32(%2)\n"
30649+ " movq %%mm1, 40(%2)\n"
30650+ " movq %%mm2, 48(%2)\n"
30651+ " movq %%mm3, 56(%2)\n"
30652 ".section .fixup, \"ax\"\n"
30653- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30654+ "3:\n"
30655+
30656+#ifdef CONFIG_PAX_KERNEXEC
30657+ " movl %%cr0, %0\n"
30658+ " movl %0, %%eax\n"
30659+ " andl $0xFFFEFFFF, %%eax\n"
30660+ " movl %%eax, %%cr0\n"
30661+#endif
30662+
30663+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30664+
30665+#ifdef CONFIG_PAX_KERNEXEC
30666+ " movl %0, %%cr0\n"
30667+#endif
30668+
30669 " jmp 2b\n"
30670 ".previous\n"
30671 _ASM_EXTABLE(1b, 3b)
30672- : : "r" (from), "r" (to) : "memory");
30673+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30674
30675 from += 64;
30676 to += 64;
30677@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30678 static void fast_copy_page(void *to, void *from)
30679 {
30680 int i;
30681+ unsigned long cr0;
30682
30683 kernel_fpu_begin();
30684
30685@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30686 * but that is for later. -AV
30687 */
30688 __asm__ __volatile__(
30689- "1: prefetch (%0)\n"
30690- " prefetch 64(%0)\n"
30691- " prefetch 128(%0)\n"
30692- " prefetch 192(%0)\n"
30693- " prefetch 256(%0)\n"
30694+ "1: prefetch (%1)\n"
30695+ " prefetch 64(%1)\n"
30696+ " prefetch 128(%1)\n"
30697+ " prefetch 192(%1)\n"
30698+ " prefetch 256(%1)\n"
30699 "2: \n"
30700 ".section .fixup, \"ax\"\n"
30701- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30702+ "3: \n"
30703+
30704+#ifdef CONFIG_PAX_KERNEXEC
30705+ " movl %%cr0, %0\n"
30706+ " movl %0, %%eax\n"
30707+ " andl $0xFFFEFFFF, %%eax\n"
30708+ " movl %%eax, %%cr0\n"
30709+#endif
30710+
30711+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30712+
30713+#ifdef CONFIG_PAX_KERNEXEC
30714+ " movl %0, %%cr0\n"
30715+#endif
30716+
30717 " jmp 2b\n"
30718 ".previous\n"
30719- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30720+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30721
30722 for (i = 0; i < (4096-320)/64; i++) {
30723 __asm__ __volatile__ (
30724- "1: prefetch 320(%0)\n"
30725- "2: movq (%0), %%mm0\n"
30726- " movntq %%mm0, (%1)\n"
30727- " movq 8(%0), %%mm1\n"
30728- " movntq %%mm1, 8(%1)\n"
30729- " movq 16(%0), %%mm2\n"
30730- " movntq %%mm2, 16(%1)\n"
30731- " movq 24(%0), %%mm3\n"
30732- " movntq %%mm3, 24(%1)\n"
30733- " movq 32(%0), %%mm4\n"
30734- " movntq %%mm4, 32(%1)\n"
30735- " movq 40(%0), %%mm5\n"
30736- " movntq %%mm5, 40(%1)\n"
30737- " movq 48(%0), %%mm6\n"
30738- " movntq %%mm6, 48(%1)\n"
30739- " movq 56(%0), %%mm7\n"
30740- " movntq %%mm7, 56(%1)\n"
30741+ "1: prefetch 320(%1)\n"
30742+ "2: movq (%1), %%mm0\n"
30743+ " movntq %%mm0, (%2)\n"
30744+ " movq 8(%1), %%mm1\n"
30745+ " movntq %%mm1, 8(%2)\n"
30746+ " movq 16(%1), %%mm2\n"
30747+ " movntq %%mm2, 16(%2)\n"
30748+ " movq 24(%1), %%mm3\n"
30749+ " movntq %%mm3, 24(%2)\n"
30750+ " movq 32(%1), %%mm4\n"
30751+ " movntq %%mm4, 32(%2)\n"
30752+ " movq 40(%1), %%mm5\n"
30753+ " movntq %%mm5, 40(%2)\n"
30754+ " movq 48(%1), %%mm6\n"
30755+ " movntq %%mm6, 48(%2)\n"
30756+ " movq 56(%1), %%mm7\n"
30757+ " movntq %%mm7, 56(%2)\n"
30758 ".section .fixup, \"ax\"\n"
30759- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30760+ "3:\n"
30761+
30762+#ifdef CONFIG_PAX_KERNEXEC
30763+ " movl %%cr0, %0\n"
30764+ " movl %0, %%eax\n"
30765+ " andl $0xFFFEFFFF, %%eax\n"
30766+ " movl %%eax, %%cr0\n"
30767+#endif
30768+
30769+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30770+
30771+#ifdef CONFIG_PAX_KERNEXEC
30772+ " movl %0, %%cr0\n"
30773+#endif
30774+
30775 " jmp 2b\n"
30776 ".previous\n"
30777- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30778+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30779
30780 from += 64;
30781 to += 64;
30782@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30783 static void fast_copy_page(void *to, void *from)
30784 {
30785 int i;
30786+ unsigned long cr0;
30787
30788 kernel_fpu_begin();
30789
30790 __asm__ __volatile__ (
30791- "1: prefetch (%0)\n"
30792- " prefetch 64(%0)\n"
30793- " prefetch 128(%0)\n"
30794- " prefetch 192(%0)\n"
30795- " prefetch 256(%0)\n"
30796+ "1: prefetch (%1)\n"
30797+ " prefetch 64(%1)\n"
30798+ " prefetch 128(%1)\n"
30799+ " prefetch 192(%1)\n"
30800+ " prefetch 256(%1)\n"
30801 "2: \n"
30802 ".section .fixup, \"ax\"\n"
30803- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30804+ "3: \n"
30805+
30806+#ifdef CONFIG_PAX_KERNEXEC
30807+ " movl %%cr0, %0\n"
30808+ " movl %0, %%eax\n"
30809+ " andl $0xFFFEFFFF, %%eax\n"
30810+ " movl %%eax, %%cr0\n"
30811+#endif
30812+
30813+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30814+
30815+#ifdef CONFIG_PAX_KERNEXEC
30816+ " movl %0, %%cr0\n"
30817+#endif
30818+
30819 " jmp 2b\n"
30820 ".previous\n"
30821- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30822+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30823
30824 for (i = 0; i < 4096/64; i++) {
30825 __asm__ __volatile__ (
30826- "1: prefetch 320(%0)\n"
30827- "2: movq (%0), %%mm0\n"
30828- " movq 8(%0), %%mm1\n"
30829- " movq 16(%0), %%mm2\n"
30830- " movq 24(%0), %%mm3\n"
30831- " movq %%mm0, (%1)\n"
30832- " movq %%mm1, 8(%1)\n"
30833- " movq %%mm2, 16(%1)\n"
30834- " movq %%mm3, 24(%1)\n"
30835- " movq 32(%0), %%mm0\n"
30836- " movq 40(%0), %%mm1\n"
30837- " movq 48(%0), %%mm2\n"
30838- " movq 56(%0), %%mm3\n"
30839- " movq %%mm0, 32(%1)\n"
30840- " movq %%mm1, 40(%1)\n"
30841- " movq %%mm2, 48(%1)\n"
30842- " movq %%mm3, 56(%1)\n"
30843+ "1: prefetch 320(%1)\n"
30844+ "2: movq (%1), %%mm0\n"
30845+ " movq 8(%1), %%mm1\n"
30846+ " movq 16(%1), %%mm2\n"
30847+ " movq 24(%1), %%mm3\n"
30848+ " movq %%mm0, (%2)\n"
30849+ " movq %%mm1, 8(%2)\n"
30850+ " movq %%mm2, 16(%2)\n"
30851+ " movq %%mm3, 24(%2)\n"
30852+ " movq 32(%1), %%mm0\n"
30853+ " movq 40(%1), %%mm1\n"
30854+ " movq 48(%1), %%mm2\n"
30855+ " movq 56(%1), %%mm3\n"
30856+ " movq %%mm0, 32(%2)\n"
30857+ " movq %%mm1, 40(%2)\n"
30858+ " movq %%mm2, 48(%2)\n"
30859+ " movq %%mm3, 56(%2)\n"
30860 ".section .fixup, \"ax\"\n"
30861- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30862+ "3:\n"
30863+
30864+#ifdef CONFIG_PAX_KERNEXEC
30865+ " movl %%cr0, %0\n"
30866+ " movl %0, %%eax\n"
30867+ " andl $0xFFFEFFFF, %%eax\n"
30868+ " movl %%eax, %%cr0\n"
30869+#endif
30870+
30871+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30872+
30873+#ifdef CONFIG_PAX_KERNEXEC
30874+ " movl %0, %%cr0\n"
30875+#endif
30876+
30877 " jmp 2b\n"
30878 ".previous\n"
30879 _ASM_EXTABLE(1b, 3b)
30880- : : "r" (from), "r" (to) : "memory");
30881+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30882
30883 from += 64;
30884 to += 64;
30885diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30886index f6d13ee..d789440 100644
30887--- a/arch/x86/lib/msr-reg.S
30888+++ b/arch/x86/lib/msr-reg.S
30889@@ -3,6 +3,7 @@
30890 #include <asm/dwarf2.h>
30891 #include <asm/asm.h>
30892 #include <asm/msr.h>
30893+#include <asm/alternative-asm.h>
30894
30895 #ifdef CONFIG_X86_64
30896 /*
30897@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30898 movl %edi, 28(%r10)
30899 popq_cfi %rbp
30900 popq_cfi %rbx
30901+ pax_force_retaddr
30902 ret
30903 3:
30904 CFI_RESTORE_STATE
30905diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30906index fc6ba17..14ad9a5 100644
30907--- a/arch/x86/lib/putuser.S
30908+++ b/arch/x86/lib/putuser.S
30909@@ -16,7 +16,9 @@
30910 #include <asm/errno.h>
30911 #include <asm/asm.h>
30912 #include <asm/smap.h>
30913-
30914+#include <asm/segment.h>
30915+#include <asm/pgtable.h>
30916+#include <asm/alternative-asm.h>
30917
30918 /*
30919 * __put_user_X
30920@@ -30,57 +32,125 @@
30921 * as they get called from within inline assembly.
30922 */
30923
30924-#define ENTER CFI_STARTPROC ; \
30925- GET_THREAD_INFO(%_ASM_BX)
30926-#define EXIT ASM_CLAC ; \
30927- ret ; \
30928+#define ENTER CFI_STARTPROC
30929+#define EXIT ASM_CLAC ; \
30930+ pax_force_retaddr ; \
30931+ ret ; \
30932 CFI_ENDPROC
30933
30934+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30935+#define _DEST %_ASM_CX,%_ASM_BX
30936+#else
30937+#define _DEST %_ASM_CX
30938+#endif
30939+
30940+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30941+#define __copyuser_seg gs;
30942+#else
30943+#define __copyuser_seg
30944+#endif
30945+
30946 .text
30947 ENTRY(__put_user_1)
30948 ENTER
30949+
30950+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30951+ GET_THREAD_INFO(%_ASM_BX)
30952 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30953 jae bad_put_user
30954+
30955+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30956+ mov pax_user_shadow_base,%_ASM_BX
30957+ cmp %_ASM_BX,%_ASM_CX
30958+ jb 1234f
30959+ xor %ebx,%ebx
30960+1234:
30961+#endif
30962+
30963+#endif
30964+
30965 ASM_STAC
30966-1: movb %al,(%_ASM_CX)
30967+1: __copyuser_seg movb %al,(_DEST)
30968 xor %eax,%eax
30969 EXIT
30970 ENDPROC(__put_user_1)
30971
30972 ENTRY(__put_user_2)
30973 ENTER
30974+
30975+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30976+ GET_THREAD_INFO(%_ASM_BX)
30977 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30978 sub $1,%_ASM_BX
30979 cmp %_ASM_BX,%_ASM_CX
30980 jae bad_put_user
30981+
30982+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30983+ mov pax_user_shadow_base,%_ASM_BX
30984+ cmp %_ASM_BX,%_ASM_CX
30985+ jb 1234f
30986+ xor %ebx,%ebx
30987+1234:
30988+#endif
30989+
30990+#endif
30991+
30992 ASM_STAC
30993-2: movw %ax,(%_ASM_CX)
30994+2: __copyuser_seg movw %ax,(_DEST)
30995 xor %eax,%eax
30996 EXIT
30997 ENDPROC(__put_user_2)
30998
30999 ENTRY(__put_user_4)
31000 ENTER
31001+
31002+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31003+ GET_THREAD_INFO(%_ASM_BX)
31004 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31005 sub $3,%_ASM_BX
31006 cmp %_ASM_BX,%_ASM_CX
31007 jae bad_put_user
31008+
31009+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31010+ mov pax_user_shadow_base,%_ASM_BX
31011+ cmp %_ASM_BX,%_ASM_CX
31012+ jb 1234f
31013+ xor %ebx,%ebx
31014+1234:
31015+#endif
31016+
31017+#endif
31018+
31019 ASM_STAC
31020-3: movl %eax,(%_ASM_CX)
31021+3: __copyuser_seg movl %eax,(_DEST)
31022 xor %eax,%eax
31023 EXIT
31024 ENDPROC(__put_user_4)
31025
31026 ENTRY(__put_user_8)
31027 ENTER
31028+
31029+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31030+ GET_THREAD_INFO(%_ASM_BX)
31031 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31032 sub $7,%_ASM_BX
31033 cmp %_ASM_BX,%_ASM_CX
31034 jae bad_put_user
31035+
31036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31037+ mov pax_user_shadow_base,%_ASM_BX
31038+ cmp %_ASM_BX,%_ASM_CX
31039+ jb 1234f
31040+ xor %ebx,%ebx
31041+1234:
31042+#endif
31043+
31044+#endif
31045+
31046 ASM_STAC
31047-4: mov %_ASM_AX,(%_ASM_CX)
31048+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31049 #ifdef CONFIG_X86_32
31050-5: movl %edx,4(%_ASM_CX)
31051+5: __copyuser_seg movl %edx,4(_DEST)
31052 #endif
31053 xor %eax,%eax
31054 EXIT
31055diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31056index 5dff5f0..cadebf4 100644
31057--- a/arch/x86/lib/rwsem.S
31058+++ b/arch/x86/lib/rwsem.S
31059@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31060 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31061 CFI_RESTORE __ASM_REG(dx)
31062 restore_common_regs
31063+ pax_force_retaddr
31064 ret
31065 CFI_ENDPROC
31066 ENDPROC(call_rwsem_down_read_failed)
31067@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31068 movq %rax,%rdi
31069 call rwsem_down_write_failed
31070 restore_common_regs
31071+ pax_force_retaddr
31072 ret
31073 CFI_ENDPROC
31074 ENDPROC(call_rwsem_down_write_failed)
31075@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31076 movq %rax,%rdi
31077 call rwsem_wake
31078 restore_common_regs
31079-1: ret
31080+1: pax_force_retaddr
31081+ ret
31082 CFI_ENDPROC
31083 ENDPROC(call_rwsem_wake)
31084
31085@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31086 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31087 CFI_RESTORE __ASM_REG(dx)
31088 restore_common_regs
31089+ pax_force_retaddr
31090 ret
31091 CFI_ENDPROC
31092 ENDPROC(call_rwsem_downgrade_wake)
31093diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31094index b30b5eb..2b57052 100644
31095--- a/arch/x86/lib/thunk_64.S
31096+++ b/arch/x86/lib/thunk_64.S
31097@@ -9,6 +9,7 @@
31098 #include <asm/dwarf2.h>
31099 #include <asm/calling.h>
31100 #include <asm/asm.h>
31101+#include <asm/alternative-asm.h>
31102
31103 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31104 .macro THUNK name, func, put_ret_addr_in_rdi=0
31105@@ -16,11 +17,11 @@
31106 \name:
31107 CFI_STARTPROC
31108
31109- /* this one pushes 9 elems, the next one would be %rIP */
31110- SAVE_ARGS
31111+ /* this one pushes 15+1 elems, the next one would be %rIP */
31112+ SAVE_ARGS 8
31113
31114 .if \put_ret_addr_in_rdi
31115- movq_cfi_restore 9*8, rdi
31116+ movq_cfi_restore RIP, rdi
31117 .endif
31118
31119 call \func
31120@@ -47,9 +48,10 @@
31121
31122 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31123 CFI_STARTPROC
31124- SAVE_ARGS
31125+ SAVE_ARGS 8
31126 restore:
31127- RESTORE_ARGS
31128+ RESTORE_ARGS 1,8
31129+ pax_force_retaddr
31130 ret
31131 CFI_ENDPROC
31132 _ASM_NOKPROBE(restore)
31133diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31134index e2f5e21..4b22130 100644
31135--- a/arch/x86/lib/usercopy_32.c
31136+++ b/arch/x86/lib/usercopy_32.c
31137@@ -42,11 +42,13 @@ do { \
31138 int __d0; \
31139 might_fault(); \
31140 __asm__ __volatile__( \
31141+ __COPYUSER_SET_ES \
31142 ASM_STAC "\n" \
31143 "0: rep; stosl\n" \
31144 " movl %2,%0\n" \
31145 "1: rep; stosb\n" \
31146 "2: " ASM_CLAC "\n" \
31147+ __COPYUSER_RESTORE_ES \
31148 ".section .fixup,\"ax\"\n" \
31149 "3: lea 0(%2,%0,4),%0\n" \
31150 " jmp 2b\n" \
31151@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31152
31153 #ifdef CONFIG_X86_INTEL_USERCOPY
31154 static unsigned long
31155-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31156+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31157 {
31158 int d0, d1;
31159 __asm__ __volatile__(
31160@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31161 " .align 2,0x90\n"
31162 "3: movl 0(%4), %%eax\n"
31163 "4: movl 4(%4), %%edx\n"
31164- "5: movl %%eax, 0(%3)\n"
31165- "6: movl %%edx, 4(%3)\n"
31166+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31167+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31168 "7: movl 8(%4), %%eax\n"
31169 "8: movl 12(%4),%%edx\n"
31170- "9: movl %%eax, 8(%3)\n"
31171- "10: movl %%edx, 12(%3)\n"
31172+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31173+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31174 "11: movl 16(%4), %%eax\n"
31175 "12: movl 20(%4), %%edx\n"
31176- "13: movl %%eax, 16(%3)\n"
31177- "14: movl %%edx, 20(%3)\n"
31178+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31179+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31180 "15: movl 24(%4), %%eax\n"
31181 "16: movl 28(%4), %%edx\n"
31182- "17: movl %%eax, 24(%3)\n"
31183- "18: movl %%edx, 28(%3)\n"
31184+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31185+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31186 "19: movl 32(%4), %%eax\n"
31187 "20: movl 36(%4), %%edx\n"
31188- "21: movl %%eax, 32(%3)\n"
31189- "22: movl %%edx, 36(%3)\n"
31190+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31191+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31192 "23: movl 40(%4), %%eax\n"
31193 "24: movl 44(%4), %%edx\n"
31194- "25: movl %%eax, 40(%3)\n"
31195- "26: movl %%edx, 44(%3)\n"
31196+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31197+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31198 "27: movl 48(%4), %%eax\n"
31199 "28: movl 52(%4), %%edx\n"
31200- "29: movl %%eax, 48(%3)\n"
31201- "30: movl %%edx, 52(%3)\n"
31202+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31203+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31204 "31: movl 56(%4), %%eax\n"
31205 "32: movl 60(%4), %%edx\n"
31206- "33: movl %%eax, 56(%3)\n"
31207- "34: movl %%edx, 60(%3)\n"
31208+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31209+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31210 " addl $-64, %0\n"
31211 " addl $64, %4\n"
31212 " addl $64, %3\n"
31213@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31214 " shrl $2, %0\n"
31215 " andl $3, %%eax\n"
31216 " cld\n"
31217+ __COPYUSER_SET_ES
31218 "99: rep; movsl\n"
31219 "36: movl %%eax, %0\n"
31220 "37: rep; movsb\n"
31221 "100:\n"
31222+ __COPYUSER_RESTORE_ES
31223+ ".section .fixup,\"ax\"\n"
31224+ "101: lea 0(%%eax,%0,4),%0\n"
31225+ " jmp 100b\n"
31226+ ".previous\n"
31227+ _ASM_EXTABLE(1b,100b)
31228+ _ASM_EXTABLE(2b,100b)
31229+ _ASM_EXTABLE(3b,100b)
31230+ _ASM_EXTABLE(4b,100b)
31231+ _ASM_EXTABLE(5b,100b)
31232+ _ASM_EXTABLE(6b,100b)
31233+ _ASM_EXTABLE(7b,100b)
31234+ _ASM_EXTABLE(8b,100b)
31235+ _ASM_EXTABLE(9b,100b)
31236+ _ASM_EXTABLE(10b,100b)
31237+ _ASM_EXTABLE(11b,100b)
31238+ _ASM_EXTABLE(12b,100b)
31239+ _ASM_EXTABLE(13b,100b)
31240+ _ASM_EXTABLE(14b,100b)
31241+ _ASM_EXTABLE(15b,100b)
31242+ _ASM_EXTABLE(16b,100b)
31243+ _ASM_EXTABLE(17b,100b)
31244+ _ASM_EXTABLE(18b,100b)
31245+ _ASM_EXTABLE(19b,100b)
31246+ _ASM_EXTABLE(20b,100b)
31247+ _ASM_EXTABLE(21b,100b)
31248+ _ASM_EXTABLE(22b,100b)
31249+ _ASM_EXTABLE(23b,100b)
31250+ _ASM_EXTABLE(24b,100b)
31251+ _ASM_EXTABLE(25b,100b)
31252+ _ASM_EXTABLE(26b,100b)
31253+ _ASM_EXTABLE(27b,100b)
31254+ _ASM_EXTABLE(28b,100b)
31255+ _ASM_EXTABLE(29b,100b)
31256+ _ASM_EXTABLE(30b,100b)
31257+ _ASM_EXTABLE(31b,100b)
31258+ _ASM_EXTABLE(32b,100b)
31259+ _ASM_EXTABLE(33b,100b)
31260+ _ASM_EXTABLE(34b,100b)
31261+ _ASM_EXTABLE(35b,100b)
31262+ _ASM_EXTABLE(36b,100b)
31263+ _ASM_EXTABLE(37b,100b)
31264+ _ASM_EXTABLE(99b,101b)
31265+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31266+ : "1"(to), "2"(from), "0"(size)
31267+ : "eax", "edx", "memory");
31268+ return size;
31269+}
31270+
31271+static unsigned long
31272+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31273+{
31274+ int d0, d1;
31275+ __asm__ __volatile__(
31276+ " .align 2,0x90\n"
31277+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31278+ " cmpl $67, %0\n"
31279+ " jbe 3f\n"
31280+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31281+ " .align 2,0x90\n"
31282+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31283+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31284+ "5: movl %%eax, 0(%3)\n"
31285+ "6: movl %%edx, 4(%3)\n"
31286+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31287+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31288+ "9: movl %%eax, 8(%3)\n"
31289+ "10: movl %%edx, 12(%3)\n"
31290+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31291+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31292+ "13: movl %%eax, 16(%3)\n"
31293+ "14: movl %%edx, 20(%3)\n"
31294+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31295+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31296+ "17: movl %%eax, 24(%3)\n"
31297+ "18: movl %%edx, 28(%3)\n"
31298+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31299+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31300+ "21: movl %%eax, 32(%3)\n"
31301+ "22: movl %%edx, 36(%3)\n"
31302+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31303+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31304+ "25: movl %%eax, 40(%3)\n"
31305+ "26: movl %%edx, 44(%3)\n"
31306+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31307+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31308+ "29: movl %%eax, 48(%3)\n"
31309+ "30: movl %%edx, 52(%3)\n"
31310+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31311+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31312+ "33: movl %%eax, 56(%3)\n"
31313+ "34: movl %%edx, 60(%3)\n"
31314+ " addl $-64, %0\n"
31315+ " addl $64, %4\n"
31316+ " addl $64, %3\n"
31317+ " cmpl $63, %0\n"
31318+ " ja 1b\n"
31319+ "35: movl %0, %%eax\n"
31320+ " shrl $2, %0\n"
31321+ " andl $3, %%eax\n"
31322+ " cld\n"
31323+ "99: rep; "__copyuser_seg" movsl\n"
31324+ "36: movl %%eax, %0\n"
31325+ "37: rep; "__copyuser_seg" movsb\n"
31326+ "100:\n"
31327 ".section .fixup,\"ax\"\n"
31328 "101: lea 0(%%eax,%0,4),%0\n"
31329 " jmp 100b\n"
31330@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31331 int d0, d1;
31332 __asm__ __volatile__(
31333 " .align 2,0x90\n"
31334- "0: movl 32(%4), %%eax\n"
31335+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31336 " cmpl $67, %0\n"
31337 " jbe 2f\n"
31338- "1: movl 64(%4), %%eax\n"
31339+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31340 " .align 2,0x90\n"
31341- "2: movl 0(%4), %%eax\n"
31342- "21: movl 4(%4), %%edx\n"
31343+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31344+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31345 " movl %%eax, 0(%3)\n"
31346 " movl %%edx, 4(%3)\n"
31347- "3: movl 8(%4), %%eax\n"
31348- "31: movl 12(%4),%%edx\n"
31349+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31350+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31351 " movl %%eax, 8(%3)\n"
31352 " movl %%edx, 12(%3)\n"
31353- "4: movl 16(%4), %%eax\n"
31354- "41: movl 20(%4), %%edx\n"
31355+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31356+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31357 " movl %%eax, 16(%3)\n"
31358 " movl %%edx, 20(%3)\n"
31359- "10: movl 24(%4), %%eax\n"
31360- "51: movl 28(%4), %%edx\n"
31361+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31362+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31363 " movl %%eax, 24(%3)\n"
31364 " movl %%edx, 28(%3)\n"
31365- "11: movl 32(%4), %%eax\n"
31366- "61: movl 36(%4), %%edx\n"
31367+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31368+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31369 " movl %%eax, 32(%3)\n"
31370 " movl %%edx, 36(%3)\n"
31371- "12: movl 40(%4), %%eax\n"
31372- "71: movl 44(%4), %%edx\n"
31373+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31374+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31375 " movl %%eax, 40(%3)\n"
31376 " movl %%edx, 44(%3)\n"
31377- "13: movl 48(%4), %%eax\n"
31378- "81: movl 52(%4), %%edx\n"
31379+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31380+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31381 " movl %%eax, 48(%3)\n"
31382 " movl %%edx, 52(%3)\n"
31383- "14: movl 56(%4), %%eax\n"
31384- "91: movl 60(%4), %%edx\n"
31385+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31386+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31387 " movl %%eax, 56(%3)\n"
31388 " movl %%edx, 60(%3)\n"
31389 " addl $-64, %0\n"
31390@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31391 " shrl $2, %0\n"
31392 " andl $3, %%eax\n"
31393 " cld\n"
31394- "6: rep; movsl\n"
31395+ "6: rep; "__copyuser_seg" movsl\n"
31396 " movl %%eax,%0\n"
31397- "7: rep; movsb\n"
31398+ "7: rep; "__copyuser_seg" movsb\n"
31399 "8:\n"
31400 ".section .fixup,\"ax\"\n"
31401 "9: lea 0(%%eax,%0,4),%0\n"
31402@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31403
31404 __asm__ __volatile__(
31405 " .align 2,0x90\n"
31406- "0: movl 32(%4), %%eax\n"
31407+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31408 " cmpl $67, %0\n"
31409 " jbe 2f\n"
31410- "1: movl 64(%4), %%eax\n"
31411+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31412 " .align 2,0x90\n"
31413- "2: movl 0(%4), %%eax\n"
31414- "21: movl 4(%4), %%edx\n"
31415+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31416+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31417 " movnti %%eax, 0(%3)\n"
31418 " movnti %%edx, 4(%3)\n"
31419- "3: movl 8(%4), %%eax\n"
31420- "31: movl 12(%4),%%edx\n"
31421+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31422+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31423 " movnti %%eax, 8(%3)\n"
31424 " movnti %%edx, 12(%3)\n"
31425- "4: movl 16(%4), %%eax\n"
31426- "41: movl 20(%4), %%edx\n"
31427+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31428+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31429 " movnti %%eax, 16(%3)\n"
31430 " movnti %%edx, 20(%3)\n"
31431- "10: movl 24(%4), %%eax\n"
31432- "51: movl 28(%4), %%edx\n"
31433+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31434+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31435 " movnti %%eax, 24(%3)\n"
31436 " movnti %%edx, 28(%3)\n"
31437- "11: movl 32(%4), %%eax\n"
31438- "61: movl 36(%4), %%edx\n"
31439+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31440+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31441 " movnti %%eax, 32(%3)\n"
31442 " movnti %%edx, 36(%3)\n"
31443- "12: movl 40(%4), %%eax\n"
31444- "71: movl 44(%4), %%edx\n"
31445+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31446+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31447 " movnti %%eax, 40(%3)\n"
31448 " movnti %%edx, 44(%3)\n"
31449- "13: movl 48(%4), %%eax\n"
31450- "81: movl 52(%4), %%edx\n"
31451+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31452+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31453 " movnti %%eax, 48(%3)\n"
31454 " movnti %%edx, 52(%3)\n"
31455- "14: movl 56(%4), %%eax\n"
31456- "91: movl 60(%4), %%edx\n"
31457+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31458+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31459 " movnti %%eax, 56(%3)\n"
31460 " movnti %%edx, 60(%3)\n"
31461 " addl $-64, %0\n"
31462@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31463 " shrl $2, %0\n"
31464 " andl $3, %%eax\n"
31465 " cld\n"
31466- "6: rep; movsl\n"
31467+ "6: rep; "__copyuser_seg" movsl\n"
31468 " movl %%eax,%0\n"
31469- "7: rep; movsb\n"
31470+ "7: rep; "__copyuser_seg" movsb\n"
31471 "8:\n"
31472 ".section .fixup,\"ax\"\n"
31473 "9: lea 0(%%eax,%0,4),%0\n"
31474@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31475
31476 __asm__ __volatile__(
31477 " .align 2,0x90\n"
31478- "0: movl 32(%4), %%eax\n"
31479+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31480 " cmpl $67, %0\n"
31481 " jbe 2f\n"
31482- "1: movl 64(%4), %%eax\n"
31483+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31484 " .align 2,0x90\n"
31485- "2: movl 0(%4), %%eax\n"
31486- "21: movl 4(%4), %%edx\n"
31487+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31488+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31489 " movnti %%eax, 0(%3)\n"
31490 " movnti %%edx, 4(%3)\n"
31491- "3: movl 8(%4), %%eax\n"
31492- "31: movl 12(%4),%%edx\n"
31493+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31494+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31495 " movnti %%eax, 8(%3)\n"
31496 " movnti %%edx, 12(%3)\n"
31497- "4: movl 16(%4), %%eax\n"
31498- "41: movl 20(%4), %%edx\n"
31499+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31500+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31501 " movnti %%eax, 16(%3)\n"
31502 " movnti %%edx, 20(%3)\n"
31503- "10: movl 24(%4), %%eax\n"
31504- "51: movl 28(%4), %%edx\n"
31505+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31506+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31507 " movnti %%eax, 24(%3)\n"
31508 " movnti %%edx, 28(%3)\n"
31509- "11: movl 32(%4), %%eax\n"
31510- "61: movl 36(%4), %%edx\n"
31511+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31512+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31513 " movnti %%eax, 32(%3)\n"
31514 " movnti %%edx, 36(%3)\n"
31515- "12: movl 40(%4), %%eax\n"
31516- "71: movl 44(%4), %%edx\n"
31517+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31518+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31519 " movnti %%eax, 40(%3)\n"
31520 " movnti %%edx, 44(%3)\n"
31521- "13: movl 48(%4), %%eax\n"
31522- "81: movl 52(%4), %%edx\n"
31523+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31524+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31525 " movnti %%eax, 48(%3)\n"
31526 " movnti %%edx, 52(%3)\n"
31527- "14: movl 56(%4), %%eax\n"
31528- "91: movl 60(%4), %%edx\n"
31529+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31530+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31531 " movnti %%eax, 56(%3)\n"
31532 " movnti %%edx, 60(%3)\n"
31533 " addl $-64, %0\n"
31534@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31535 " shrl $2, %0\n"
31536 " andl $3, %%eax\n"
31537 " cld\n"
31538- "6: rep; movsl\n"
31539+ "6: rep; "__copyuser_seg" movsl\n"
31540 " movl %%eax,%0\n"
31541- "7: rep; movsb\n"
31542+ "7: rep; "__copyuser_seg" movsb\n"
31543 "8:\n"
31544 ".section .fixup,\"ax\"\n"
31545 "9: lea 0(%%eax,%0,4),%0\n"
31546@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31547 */
31548 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31549 unsigned long size);
31550-unsigned long __copy_user_intel(void __user *to, const void *from,
31551+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31552+ unsigned long size);
31553+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31554 unsigned long size);
31555 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31556 const void __user *from, unsigned long size);
31557 #endif /* CONFIG_X86_INTEL_USERCOPY */
31558
31559 /* Generic arbitrary sized copy. */
31560-#define __copy_user(to, from, size) \
31561+#define __copy_user(to, from, size, prefix, set, restore) \
31562 do { \
31563 int __d0, __d1, __d2; \
31564 __asm__ __volatile__( \
31565+ set \
31566 " cmp $7,%0\n" \
31567 " jbe 1f\n" \
31568 " movl %1,%0\n" \
31569 " negl %0\n" \
31570 " andl $7,%0\n" \
31571 " subl %0,%3\n" \
31572- "4: rep; movsb\n" \
31573+ "4: rep; "prefix"movsb\n" \
31574 " movl %3,%0\n" \
31575 " shrl $2,%0\n" \
31576 " andl $3,%3\n" \
31577 " .align 2,0x90\n" \
31578- "0: rep; movsl\n" \
31579+ "0: rep; "prefix"movsl\n" \
31580 " movl %3,%0\n" \
31581- "1: rep; movsb\n" \
31582+ "1: rep; "prefix"movsb\n" \
31583 "2:\n" \
31584+ restore \
31585 ".section .fixup,\"ax\"\n" \
31586 "5: addl %3,%0\n" \
31587 " jmp 2b\n" \
31588@@ -538,14 +650,14 @@ do { \
31589 " negl %0\n" \
31590 " andl $7,%0\n" \
31591 " subl %0,%3\n" \
31592- "4: rep; movsb\n" \
31593+ "4: rep; "__copyuser_seg"movsb\n" \
31594 " movl %3,%0\n" \
31595 " shrl $2,%0\n" \
31596 " andl $3,%3\n" \
31597 " .align 2,0x90\n" \
31598- "0: rep; movsl\n" \
31599+ "0: rep; "__copyuser_seg"movsl\n" \
31600 " movl %3,%0\n" \
31601- "1: rep; movsb\n" \
31602+ "1: rep; "__copyuser_seg"movsb\n" \
31603 "2:\n" \
31604 ".section .fixup,\"ax\"\n" \
31605 "5: addl %3,%0\n" \
31606@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31607 {
31608 stac();
31609 if (movsl_is_ok(to, from, n))
31610- __copy_user(to, from, n);
31611+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31612 else
31613- n = __copy_user_intel(to, from, n);
31614+ n = __generic_copy_to_user_intel(to, from, n);
31615 clac();
31616 return n;
31617 }
31618@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31619 {
31620 stac();
31621 if (movsl_is_ok(to, from, n))
31622- __copy_user(to, from, n);
31623+ __copy_user(to, from, n, __copyuser_seg, "", "");
31624 else
31625- n = __copy_user_intel((void __user *)to,
31626- (const void *)from, n);
31627+ n = __generic_copy_from_user_intel(to, from, n);
31628 clac();
31629 return n;
31630 }
31631@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31632 if (n > 64 && cpu_has_xmm2)
31633 n = __copy_user_intel_nocache(to, from, n);
31634 else
31635- __copy_user(to, from, n);
31636+ __copy_user(to, from, n, __copyuser_seg, "", "");
31637 #else
31638- __copy_user(to, from, n);
31639+ __copy_user(to, from, n, __copyuser_seg, "", "");
31640 #endif
31641 clac();
31642 return n;
31643 }
31644 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31645
31646-/**
31647- * copy_to_user: - Copy a block of data into user space.
31648- * @to: Destination address, in user space.
31649- * @from: Source address, in kernel space.
31650- * @n: Number of bytes to copy.
31651- *
31652- * Context: User context only. This function may sleep.
31653- *
31654- * Copy data from kernel space to user space.
31655- *
31656- * Returns number of bytes that could not be copied.
31657- * On success, this will be zero.
31658- */
31659-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31660+#ifdef CONFIG_PAX_MEMORY_UDEREF
31661+void __set_fs(mm_segment_t x)
31662 {
31663- if (access_ok(VERIFY_WRITE, to, n))
31664- n = __copy_to_user(to, from, n);
31665- return n;
31666+ switch (x.seg) {
31667+ case 0:
31668+ loadsegment(gs, 0);
31669+ break;
31670+ case TASK_SIZE_MAX:
31671+ loadsegment(gs, __USER_DS);
31672+ break;
31673+ case -1UL:
31674+ loadsegment(gs, __KERNEL_DS);
31675+ break;
31676+ default:
31677+ BUG();
31678+ }
31679 }
31680-EXPORT_SYMBOL(_copy_to_user);
31681+EXPORT_SYMBOL(__set_fs);
31682
31683-/**
31684- * copy_from_user: - Copy a block of data from user space.
31685- * @to: Destination address, in kernel space.
31686- * @from: Source address, in user space.
31687- * @n: Number of bytes to copy.
31688- *
31689- * Context: User context only. This function may sleep.
31690- *
31691- * Copy data from user space to kernel space.
31692- *
31693- * Returns number of bytes that could not be copied.
31694- * On success, this will be zero.
31695- *
31696- * If some data could not be copied, this function will pad the copied
31697- * data to the requested size using zero bytes.
31698- */
31699-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31700+void set_fs(mm_segment_t x)
31701 {
31702- if (access_ok(VERIFY_READ, from, n))
31703- n = __copy_from_user(to, from, n);
31704- else
31705- memset(to, 0, n);
31706- return n;
31707+ current_thread_info()->addr_limit = x;
31708+ __set_fs(x);
31709 }
31710-EXPORT_SYMBOL(_copy_from_user);
31711+EXPORT_SYMBOL(set_fs);
31712+#endif
31713diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31714index c905e89..01ab928 100644
31715--- a/arch/x86/lib/usercopy_64.c
31716+++ b/arch/x86/lib/usercopy_64.c
31717@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31718 might_fault();
31719 /* no memory constraint because it doesn't change any memory gcc knows
31720 about */
31721+ pax_open_userland();
31722 stac();
31723 asm volatile(
31724 " testq %[size8],%[size8]\n"
31725@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31726 _ASM_EXTABLE(0b,3b)
31727 _ASM_EXTABLE(1b,2b)
31728 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31729- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31730+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31731 [zero] "r" (0UL), [eight] "r" (8UL));
31732 clac();
31733+ pax_close_userland();
31734 return size;
31735 }
31736 EXPORT_SYMBOL(__clear_user);
31737@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31738 }
31739 EXPORT_SYMBOL(clear_user);
31740
31741-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31742+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31743 {
31744- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31745- return copy_user_generic((__force void *)to, (__force void *)from, len);
31746- }
31747- return len;
31748+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31749+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31750+ return len;
31751 }
31752 EXPORT_SYMBOL(copy_in_user);
31753
31754@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31755 * it is not necessary to optimize tail handling.
31756 */
31757 __visible unsigned long
31758-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31759+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31760 {
31761 char c;
31762 unsigned zero_len;
31763
31764+ clac();
31765+ pax_close_userland();
31766 for (; len; --len, to++) {
31767 if (__get_user_nocheck(c, from++, sizeof(char)))
31768 break;
31769@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31770 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31771 if (__put_user_nocheck(c, to++, sizeof(char)))
31772 break;
31773- clac();
31774 return len;
31775 }
31776diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31777index ecfdc46..55b9309 100644
31778--- a/arch/x86/mm/Makefile
31779+++ b/arch/x86/mm/Makefile
31780@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31781 obj-$(CONFIG_MEMTEST) += memtest.o
31782
31783 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31784+
31785+quote:="
31786+obj-$(CONFIG_X86_64) += uderef_64.o
31787+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31788diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31789index 903ec1e..c4166b2 100644
31790--- a/arch/x86/mm/extable.c
31791+++ b/arch/x86/mm/extable.c
31792@@ -6,12 +6,24 @@
31793 static inline unsigned long
31794 ex_insn_addr(const struct exception_table_entry *x)
31795 {
31796- return (unsigned long)&x->insn + x->insn;
31797+ unsigned long reloc = 0;
31798+
31799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31800+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31801+#endif
31802+
31803+ return (unsigned long)&x->insn + x->insn + reloc;
31804 }
31805 static inline unsigned long
31806 ex_fixup_addr(const struct exception_table_entry *x)
31807 {
31808- return (unsigned long)&x->fixup + x->fixup;
31809+ unsigned long reloc = 0;
31810+
31811+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31812+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31813+#endif
31814+
31815+ return (unsigned long)&x->fixup + x->fixup + reloc;
31816 }
31817
31818 int fixup_exception(struct pt_regs *regs)
31819@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31820 unsigned long new_ip;
31821
31822 #ifdef CONFIG_PNPBIOS
31823- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31824+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31825 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31826 extern u32 pnp_bios_is_utter_crap;
31827 pnp_bios_is_utter_crap = 1;
31828@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31829 i += 4;
31830 p->fixup -= i;
31831 i += 4;
31832+
31833+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31834+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31835+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31836+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31837+#endif
31838+
31839 }
31840 }
31841
31842diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31843index e3ff27a..f38f7c0 100644
31844--- a/arch/x86/mm/fault.c
31845+++ b/arch/x86/mm/fault.c
31846@@ -13,12 +13,19 @@
31847 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31848 #include <linux/prefetch.h> /* prefetchw */
31849 #include <linux/context_tracking.h> /* exception_enter(), ... */
31850+#include <linux/unistd.h>
31851+#include <linux/compiler.h>
31852
31853 #include <asm/traps.h> /* dotraplinkage, ... */
31854 #include <asm/pgalloc.h> /* pgd_*(), ... */
31855 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31856 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31857 #include <asm/vsyscall.h> /* emulate_vsyscall */
31858+#include <asm/tlbflush.h>
31859+
31860+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31861+#include <asm/stacktrace.h>
31862+#endif
31863
31864 #define CREATE_TRACE_POINTS
31865 #include <asm/trace/exceptions.h>
31866@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31867 int ret = 0;
31868
31869 /* kprobe_running() needs smp_processor_id() */
31870- if (kprobes_built_in() && !user_mode_vm(regs)) {
31871+ if (kprobes_built_in() && !user_mode(regs)) {
31872 preempt_disable();
31873 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31874 ret = 1;
31875@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31876 return !instr_lo || (instr_lo>>1) == 1;
31877 case 0x00:
31878 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31879- if (probe_kernel_address(instr, opcode))
31880+ if (user_mode(regs)) {
31881+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31882+ return 0;
31883+ } else if (probe_kernel_address(instr, opcode))
31884 return 0;
31885
31886 *prefetch = (instr_lo == 0xF) &&
31887@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31888 while (instr < max_instr) {
31889 unsigned char opcode;
31890
31891- if (probe_kernel_address(instr, opcode))
31892+ if (user_mode(regs)) {
31893+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31894+ break;
31895+ } else if (probe_kernel_address(instr, opcode))
31896 break;
31897
31898 instr++;
31899@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31900 force_sig_info(si_signo, &info, tsk);
31901 }
31902
31903+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31904+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31905+#endif
31906+
31907+#ifdef CONFIG_PAX_EMUTRAMP
31908+static int pax_handle_fetch_fault(struct pt_regs *regs);
31909+#endif
31910+
31911+#ifdef CONFIG_PAX_PAGEEXEC
31912+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31913+{
31914+ pgd_t *pgd;
31915+ pud_t *pud;
31916+ pmd_t *pmd;
31917+
31918+ pgd = pgd_offset(mm, address);
31919+ if (!pgd_present(*pgd))
31920+ return NULL;
31921+ pud = pud_offset(pgd, address);
31922+ if (!pud_present(*pud))
31923+ return NULL;
31924+ pmd = pmd_offset(pud, address);
31925+ if (!pmd_present(*pmd))
31926+ return NULL;
31927+ return pmd;
31928+}
31929+#endif
31930+
31931 DEFINE_SPINLOCK(pgd_lock);
31932 LIST_HEAD(pgd_list);
31933
31934@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31935 for (address = VMALLOC_START & PMD_MASK;
31936 address >= TASK_SIZE && address < FIXADDR_TOP;
31937 address += PMD_SIZE) {
31938+
31939+#ifdef CONFIG_PAX_PER_CPU_PGD
31940+ unsigned long cpu;
31941+#else
31942 struct page *page;
31943+#endif
31944
31945 spin_lock(&pgd_lock);
31946+
31947+#ifdef CONFIG_PAX_PER_CPU_PGD
31948+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31949+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31950+ pmd_t *ret;
31951+
31952+ ret = vmalloc_sync_one(pgd, address);
31953+ if (!ret)
31954+ break;
31955+ pgd = get_cpu_pgd(cpu, kernel);
31956+#else
31957 list_for_each_entry(page, &pgd_list, lru) {
31958+ pgd_t *pgd;
31959 spinlock_t *pgt_lock;
31960 pmd_t *ret;
31961
31962@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31963 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31964
31965 spin_lock(pgt_lock);
31966- ret = vmalloc_sync_one(page_address(page), address);
31967+ pgd = page_address(page);
31968+#endif
31969+
31970+ ret = vmalloc_sync_one(pgd, address);
31971+
31972+#ifndef CONFIG_PAX_PER_CPU_PGD
31973 spin_unlock(pgt_lock);
31974+#endif
31975
31976 if (!ret)
31977 break;
31978@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31979 * an interrupt in the middle of a task switch..
31980 */
31981 pgd_paddr = read_cr3();
31982+
31983+#ifdef CONFIG_PAX_PER_CPU_PGD
31984+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31985+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31986+#endif
31987+
31988 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31989 if (!pmd_k)
31990 return -1;
31991@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31992 * happen within a race in page table update. In the later
31993 * case just flush:
31994 */
31995- pgd = pgd_offset(current->active_mm, address);
31996+
31997 pgd_ref = pgd_offset_k(address);
31998 if (pgd_none(*pgd_ref))
31999 return -1;
32000
32001+#ifdef CONFIG_PAX_PER_CPU_PGD
32002+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32003+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32004+ if (pgd_none(*pgd)) {
32005+ set_pgd(pgd, *pgd_ref);
32006+ arch_flush_lazy_mmu_mode();
32007+ } else {
32008+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32009+ }
32010+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32011+#else
32012+ pgd = pgd_offset(current->active_mm, address);
32013+#endif
32014+
32015 if (pgd_none(*pgd)) {
32016 set_pgd(pgd, *pgd_ref);
32017 arch_flush_lazy_mmu_mode();
32018@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32019 static int is_errata100(struct pt_regs *regs, unsigned long address)
32020 {
32021 #ifdef CONFIG_X86_64
32022- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32023+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32024 return 1;
32025 #endif
32026 return 0;
32027@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32028 }
32029
32030 static const char nx_warning[] = KERN_CRIT
32031-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32032+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32033 static const char smep_warning[] = KERN_CRIT
32034-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32035+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32036
32037 static void
32038 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32039@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32040 if (!oops_may_print())
32041 return;
32042
32043- if (error_code & PF_INSTR) {
32044+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32045 unsigned int level;
32046 pgd_t *pgd;
32047 pte_t *pte;
32048@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32049 pte = lookup_address_in_pgd(pgd, address, &level);
32050
32051 if (pte && pte_present(*pte) && !pte_exec(*pte))
32052- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32053+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32054 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32055 (pgd_flags(*pgd) & _PAGE_USER) &&
32056 (read_cr4() & X86_CR4_SMEP))
32057- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32058+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32059 }
32060
32061+#ifdef CONFIG_PAX_KERNEXEC
32062+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32063+ if (current->signal->curr_ip)
32064+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32065+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32066+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32067+ else
32068+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32069+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32070+ }
32071+#endif
32072+
32073 printk(KERN_ALERT "BUG: unable to handle kernel ");
32074 if (address < PAGE_SIZE)
32075 printk(KERN_CONT "NULL pointer dereference");
32076@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32077 return;
32078 }
32079 #endif
32080+
32081+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32082+ if (pax_is_fetch_fault(regs, error_code, address)) {
32083+
32084+#ifdef CONFIG_PAX_EMUTRAMP
32085+ switch (pax_handle_fetch_fault(regs)) {
32086+ case 2:
32087+ return;
32088+ }
32089+#endif
32090+
32091+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32092+ do_group_exit(SIGKILL);
32093+ }
32094+#endif
32095+
32096 /* Kernel addresses are always protection faults: */
32097 if (address >= TASK_SIZE)
32098 error_code |= PF_PROT;
32099@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32100 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32101 printk(KERN_ERR
32102 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32103- tsk->comm, tsk->pid, address);
32104+ tsk->comm, task_pid_nr(tsk), address);
32105 code = BUS_MCEERR_AR;
32106 }
32107 #endif
32108@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32109 return 1;
32110 }
32111
32112+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32113+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32114+{
32115+ pte_t *pte;
32116+ pmd_t *pmd;
32117+ spinlock_t *ptl;
32118+ unsigned char pte_mask;
32119+
32120+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32121+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32122+ return 0;
32123+
32124+ /* PaX: it's our fault, let's handle it if we can */
32125+
32126+ /* PaX: take a look at read faults before acquiring any locks */
32127+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32128+ /* instruction fetch attempt from a protected page in user mode */
32129+ up_read(&mm->mmap_sem);
32130+
32131+#ifdef CONFIG_PAX_EMUTRAMP
32132+ switch (pax_handle_fetch_fault(regs)) {
32133+ case 2:
32134+ return 1;
32135+ }
32136+#endif
32137+
32138+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32139+ do_group_exit(SIGKILL);
32140+ }
32141+
32142+ pmd = pax_get_pmd(mm, address);
32143+ if (unlikely(!pmd))
32144+ return 0;
32145+
32146+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32147+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32148+ pte_unmap_unlock(pte, ptl);
32149+ return 0;
32150+ }
32151+
32152+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32153+ /* write attempt to a protected page in user mode */
32154+ pte_unmap_unlock(pte, ptl);
32155+ return 0;
32156+ }
32157+
32158+#ifdef CONFIG_SMP
32159+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32160+#else
32161+ if (likely(address > get_limit(regs->cs)))
32162+#endif
32163+ {
32164+ set_pte(pte, pte_mkread(*pte));
32165+ __flush_tlb_one(address);
32166+ pte_unmap_unlock(pte, ptl);
32167+ up_read(&mm->mmap_sem);
32168+ return 1;
32169+ }
32170+
32171+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32172+
32173+ /*
32174+ * PaX: fill DTLB with user rights and retry
32175+ */
32176+ __asm__ __volatile__ (
32177+ "orb %2,(%1)\n"
32178+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32179+/*
32180+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32181+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32182+ * page fault when examined during a TLB load attempt. this is true not only
32183+ * for PTEs holding a non-present entry but also present entries that will
32184+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32185+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32186+ * for our target pages since their PTEs are simply not in the TLBs at all.
32187+
32188+ * the best thing in omitting it is that we gain around 15-20% speed in the
32189+ * fast path of the page fault handler and can get rid of tracing since we
32190+ * can no longer flush unintended entries.
32191+ */
32192+ "invlpg (%0)\n"
32193+#endif
32194+ __copyuser_seg"testb $0,(%0)\n"
32195+ "xorb %3,(%1)\n"
32196+ :
32197+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32198+ : "memory", "cc");
32199+ pte_unmap_unlock(pte, ptl);
32200+ up_read(&mm->mmap_sem);
32201+ return 1;
32202+}
32203+#endif
32204+
32205 /*
32206 * Handle a spurious fault caused by a stale TLB entry.
32207 *
32208@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32209 static inline int
32210 access_error(unsigned long error_code, struct vm_area_struct *vma)
32211 {
32212+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32213+ return 1;
32214+
32215 if (error_code & PF_WRITE) {
32216 /* write, present and write, not present: */
32217 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32218@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32219 if (error_code & PF_USER)
32220 return false;
32221
32222- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32223+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32224 return false;
32225
32226 return true;
32227@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32228 tsk = current;
32229 mm = tsk->mm;
32230
32231+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32232+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32233+ if (!search_exception_tables(regs->ip)) {
32234+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32235+ bad_area_nosemaphore(regs, error_code, address);
32236+ return;
32237+ }
32238+ if (address < pax_user_shadow_base) {
32239+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32240+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32241+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32242+ } else
32243+ address -= pax_user_shadow_base;
32244+ }
32245+#endif
32246+
32247 /*
32248 * Detect and handle instructions that would cause a page fault for
32249 * both a tracked kernel page and a userspace page.
32250@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32251 * User-mode registers count as a user access even for any
32252 * potential system fault or CPU buglet:
32253 */
32254- if (user_mode_vm(regs)) {
32255+ if (user_mode(regs)) {
32256 local_irq_enable();
32257 error_code |= PF_USER;
32258 flags |= FAULT_FLAG_USER;
32259@@ -1187,6 +1411,11 @@ retry:
32260 might_sleep();
32261 }
32262
32263+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32264+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32265+ return;
32266+#endif
32267+
32268 vma = find_vma(mm, address);
32269 if (unlikely(!vma)) {
32270 bad_area(regs, error_code, address);
32271@@ -1198,18 +1427,24 @@ retry:
32272 bad_area(regs, error_code, address);
32273 return;
32274 }
32275- if (error_code & PF_USER) {
32276- /*
32277- * Accessing the stack below %sp is always a bug.
32278- * The large cushion allows instructions like enter
32279- * and pusha to work. ("enter $65535, $31" pushes
32280- * 32 pointers and then decrements %sp by 65535.)
32281- */
32282- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32283- bad_area(regs, error_code, address);
32284- return;
32285- }
32286+ /*
32287+ * Accessing the stack below %sp is always a bug.
32288+ * The large cushion allows instructions like enter
32289+ * and pusha to work. ("enter $65535, $31" pushes
32290+ * 32 pointers and then decrements %sp by 65535.)
32291+ */
32292+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32293+ bad_area(regs, error_code, address);
32294+ return;
32295 }
32296+
32297+#ifdef CONFIG_PAX_SEGMEXEC
32298+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32299+ bad_area(regs, error_code, address);
32300+ return;
32301+ }
32302+#endif
32303+
32304 if (unlikely(expand_stack(vma, address))) {
32305 bad_area(regs, error_code, address);
32306 return;
32307@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32308 }
32309 NOKPROBE_SYMBOL(trace_do_page_fault);
32310 #endif /* CONFIG_TRACING */
32311+
32312+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32313+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32314+{
32315+ struct mm_struct *mm = current->mm;
32316+ unsigned long ip = regs->ip;
32317+
32318+ if (v8086_mode(regs))
32319+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32320+
32321+#ifdef CONFIG_PAX_PAGEEXEC
32322+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32323+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32324+ return true;
32325+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32326+ return true;
32327+ return false;
32328+ }
32329+#endif
32330+
32331+#ifdef CONFIG_PAX_SEGMEXEC
32332+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32333+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32334+ return true;
32335+ return false;
32336+ }
32337+#endif
32338+
32339+ return false;
32340+}
32341+#endif
32342+
32343+#ifdef CONFIG_PAX_EMUTRAMP
32344+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32345+{
32346+ int err;
32347+
32348+ do { /* PaX: libffi trampoline emulation */
32349+ unsigned char mov, jmp;
32350+ unsigned int addr1, addr2;
32351+
32352+#ifdef CONFIG_X86_64
32353+ if ((regs->ip + 9) >> 32)
32354+ break;
32355+#endif
32356+
32357+ err = get_user(mov, (unsigned char __user *)regs->ip);
32358+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32359+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32360+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32361+
32362+ if (err)
32363+ break;
32364+
32365+ if (mov == 0xB8 && jmp == 0xE9) {
32366+ regs->ax = addr1;
32367+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32368+ return 2;
32369+ }
32370+ } while (0);
32371+
32372+ do { /* PaX: gcc trampoline emulation #1 */
32373+ unsigned char mov1, mov2;
32374+ unsigned short jmp;
32375+ unsigned int addr1, addr2;
32376+
32377+#ifdef CONFIG_X86_64
32378+ if ((regs->ip + 11) >> 32)
32379+ break;
32380+#endif
32381+
32382+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32383+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32384+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32385+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32386+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32387+
32388+ if (err)
32389+ break;
32390+
32391+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32392+ regs->cx = addr1;
32393+ regs->ax = addr2;
32394+ regs->ip = addr2;
32395+ return 2;
32396+ }
32397+ } while (0);
32398+
32399+ do { /* PaX: gcc trampoline emulation #2 */
32400+ unsigned char mov, jmp;
32401+ unsigned int addr1, addr2;
32402+
32403+#ifdef CONFIG_X86_64
32404+ if ((regs->ip + 9) >> 32)
32405+ break;
32406+#endif
32407+
32408+ err = get_user(mov, (unsigned char __user *)regs->ip);
32409+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32410+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32411+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32412+
32413+ if (err)
32414+ break;
32415+
32416+ if (mov == 0xB9 && jmp == 0xE9) {
32417+ regs->cx = addr1;
32418+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32419+ return 2;
32420+ }
32421+ } while (0);
32422+
32423+ return 1; /* PaX in action */
32424+}
32425+
32426+#ifdef CONFIG_X86_64
32427+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32428+{
32429+ int err;
32430+
32431+ do { /* PaX: libffi trampoline emulation */
32432+ unsigned short mov1, mov2, jmp1;
32433+ unsigned char stcclc, jmp2;
32434+ unsigned long addr1, addr2;
32435+
32436+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32437+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32438+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32439+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32440+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32441+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32442+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32443+
32444+ if (err)
32445+ break;
32446+
32447+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32448+ regs->r11 = addr1;
32449+ regs->r10 = addr2;
32450+ if (stcclc == 0xF8)
32451+ regs->flags &= ~X86_EFLAGS_CF;
32452+ else
32453+ regs->flags |= X86_EFLAGS_CF;
32454+ regs->ip = addr1;
32455+ return 2;
32456+ }
32457+ } while (0);
32458+
32459+ do { /* PaX: gcc trampoline emulation #1 */
32460+ unsigned short mov1, mov2, jmp1;
32461+ unsigned char jmp2;
32462+ unsigned int addr1;
32463+ unsigned long addr2;
32464+
32465+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32466+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32467+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32468+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32469+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32470+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32471+
32472+ if (err)
32473+ break;
32474+
32475+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32476+ regs->r11 = addr1;
32477+ regs->r10 = addr2;
32478+ regs->ip = addr1;
32479+ return 2;
32480+ }
32481+ } while (0);
32482+
32483+ do { /* PaX: gcc trampoline emulation #2 */
32484+ unsigned short mov1, mov2, jmp1;
32485+ unsigned char jmp2;
32486+ unsigned long addr1, addr2;
32487+
32488+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32489+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32490+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32491+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32492+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32493+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32494+
32495+ if (err)
32496+ break;
32497+
32498+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32499+ regs->r11 = addr1;
32500+ regs->r10 = addr2;
32501+ regs->ip = addr1;
32502+ return 2;
32503+ }
32504+ } while (0);
32505+
32506+ return 1; /* PaX in action */
32507+}
32508+#endif
32509+
32510+/*
32511+ * PaX: decide what to do with offenders (regs->ip = fault address)
32512+ *
32513+ * returns 1 when task should be killed
32514+ * 2 when gcc trampoline was detected
32515+ */
32516+static int pax_handle_fetch_fault(struct pt_regs *regs)
32517+{
32518+ if (v8086_mode(regs))
32519+ return 1;
32520+
32521+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32522+ return 1;
32523+
32524+#ifdef CONFIG_X86_32
32525+ return pax_handle_fetch_fault_32(regs);
32526+#else
32527+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32528+ return pax_handle_fetch_fault_32(regs);
32529+ else
32530+ return pax_handle_fetch_fault_64(regs);
32531+#endif
32532+}
32533+#endif
32534+
32535+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32536+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32537+{
32538+ long i;
32539+
32540+ printk(KERN_ERR "PAX: bytes at PC: ");
32541+ for (i = 0; i < 20; i++) {
32542+ unsigned char c;
32543+ if (get_user(c, (unsigned char __force_user *)pc+i))
32544+ printk(KERN_CONT "?? ");
32545+ else
32546+ printk(KERN_CONT "%02x ", c);
32547+ }
32548+ printk("\n");
32549+
32550+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32551+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32552+ unsigned long c;
32553+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32554+#ifdef CONFIG_X86_32
32555+ printk(KERN_CONT "???????? ");
32556+#else
32557+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32558+ printk(KERN_CONT "???????? ???????? ");
32559+ else
32560+ printk(KERN_CONT "???????????????? ");
32561+#endif
32562+ } else {
32563+#ifdef CONFIG_X86_64
32564+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32565+ printk(KERN_CONT "%08x ", (unsigned int)c);
32566+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32567+ } else
32568+#endif
32569+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32570+ }
32571+ }
32572+ printk("\n");
32573+}
32574+#endif
32575+
32576+/**
32577+ * probe_kernel_write(): safely attempt to write to a location
32578+ * @dst: address to write to
32579+ * @src: pointer to the data that shall be written
32580+ * @size: size of the data chunk
32581+ *
32582+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32583+ * happens, handle that and return -EFAULT.
32584+ */
32585+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32586+{
32587+ long ret;
32588+ mm_segment_t old_fs = get_fs();
32589+
32590+ set_fs(KERNEL_DS);
32591+ pagefault_disable();
32592+ pax_open_kernel();
32593+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32594+ pax_close_kernel();
32595+ pagefault_enable();
32596+ set_fs(old_fs);
32597+
32598+ return ret ? -EFAULT : 0;
32599+}
32600diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32601index 224b142..c2c9423 100644
32602--- a/arch/x86/mm/gup.c
32603+++ b/arch/x86/mm/gup.c
32604@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32605 addr = start;
32606 len = (unsigned long) nr_pages << PAGE_SHIFT;
32607 end = start + len;
32608- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32609+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32610 (void __user *)start, len)))
32611 return 0;
32612
32613@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32614 goto slow_irqon;
32615 #endif
32616
32617+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32618+ (void __user *)start, len)))
32619+ return 0;
32620+
32621 /*
32622 * XXX: batch / limit 'nr', to avoid large irq off latency
32623 * needs some instrumenting to determine the common sizes used by
32624diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32625index 4500142..53a363c 100644
32626--- a/arch/x86/mm/highmem_32.c
32627+++ b/arch/x86/mm/highmem_32.c
32628@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32629 idx = type + KM_TYPE_NR*smp_processor_id();
32630 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32631 BUG_ON(!pte_none(*(kmap_pte-idx)));
32632+
32633+ pax_open_kernel();
32634 set_pte(kmap_pte-idx, mk_pte(page, prot));
32635+ pax_close_kernel();
32636+
32637 arch_flush_lazy_mmu_mode();
32638
32639 return (void *)vaddr;
32640diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32641index 006cc91..bf05a83 100644
32642--- a/arch/x86/mm/hugetlbpage.c
32643+++ b/arch/x86/mm/hugetlbpage.c
32644@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32645 #ifdef CONFIG_HUGETLB_PAGE
32646 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32647 unsigned long addr, unsigned long len,
32648- unsigned long pgoff, unsigned long flags)
32649+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32650 {
32651 struct hstate *h = hstate_file(file);
32652 struct vm_unmapped_area_info info;
32653-
32654+
32655 info.flags = 0;
32656 info.length = len;
32657 info.low_limit = current->mm->mmap_legacy_base;
32658 info.high_limit = TASK_SIZE;
32659 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32660 info.align_offset = 0;
32661+ info.threadstack_offset = offset;
32662 return vm_unmapped_area(&info);
32663 }
32664
32665 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32666 unsigned long addr0, unsigned long len,
32667- unsigned long pgoff, unsigned long flags)
32668+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32669 {
32670 struct hstate *h = hstate_file(file);
32671 struct vm_unmapped_area_info info;
32672@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32673 info.high_limit = current->mm->mmap_base;
32674 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32675 info.align_offset = 0;
32676+ info.threadstack_offset = offset;
32677 addr = vm_unmapped_area(&info);
32678
32679 /*
32680@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32681 VM_BUG_ON(addr != -ENOMEM);
32682 info.flags = 0;
32683 info.low_limit = TASK_UNMAPPED_BASE;
32684+
32685+#ifdef CONFIG_PAX_RANDMMAP
32686+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32687+ info.low_limit += current->mm->delta_mmap;
32688+#endif
32689+
32690 info.high_limit = TASK_SIZE;
32691 addr = vm_unmapped_area(&info);
32692 }
32693@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32694 struct hstate *h = hstate_file(file);
32695 struct mm_struct *mm = current->mm;
32696 struct vm_area_struct *vma;
32697+ unsigned long pax_task_size = TASK_SIZE;
32698+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32699
32700 if (len & ~huge_page_mask(h))
32701 return -EINVAL;
32702- if (len > TASK_SIZE)
32703+
32704+#ifdef CONFIG_PAX_SEGMEXEC
32705+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32706+ pax_task_size = SEGMEXEC_TASK_SIZE;
32707+#endif
32708+
32709+ pax_task_size -= PAGE_SIZE;
32710+
32711+ if (len > pax_task_size)
32712 return -ENOMEM;
32713
32714 if (flags & MAP_FIXED) {
32715@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32716 return addr;
32717 }
32718
32719+#ifdef CONFIG_PAX_RANDMMAP
32720+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32721+#endif
32722+
32723 if (addr) {
32724 addr = ALIGN(addr, huge_page_size(h));
32725 vma = find_vma(mm, addr);
32726- if (TASK_SIZE - len >= addr &&
32727- (!vma || addr + len <= vma->vm_start))
32728+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32729 return addr;
32730 }
32731 if (mm->get_unmapped_area == arch_get_unmapped_area)
32732 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32733- pgoff, flags);
32734+ pgoff, flags, offset);
32735 else
32736 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32737- pgoff, flags);
32738+ pgoff, flags, offset);
32739 }
32740 #endif /* CONFIG_HUGETLB_PAGE */
32741
32742diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32743index 079c3b6..7069023 100644
32744--- a/arch/x86/mm/init.c
32745+++ b/arch/x86/mm/init.c
32746@@ -4,6 +4,7 @@
32747 #include <linux/swap.h>
32748 #include <linux/memblock.h>
32749 #include <linux/bootmem.h> /* for max_low_pfn */
32750+#include <linux/tboot.h>
32751
32752 #include <asm/cacheflush.h>
32753 #include <asm/e820.h>
32754@@ -17,6 +18,8 @@
32755 #include <asm/proto.h>
32756 #include <asm/dma.h> /* for MAX_DMA_PFN */
32757 #include <asm/microcode.h>
32758+#include <asm/desc.h>
32759+#include <asm/bios_ebda.h>
32760
32761 /*
32762 * We need to define the tracepoints somewhere, and tlb.c
32763@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32764 early_ioremap_page_table_range_init();
32765 #endif
32766
32767+#ifdef CONFIG_PAX_PER_CPU_PGD
32768+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32769+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32770+ KERNEL_PGD_PTRS);
32771+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32772+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32773+ KERNEL_PGD_PTRS);
32774+ load_cr3(get_cpu_pgd(0, kernel));
32775+#else
32776 load_cr3(swapper_pg_dir);
32777+#endif
32778+
32779 __flush_tlb_all();
32780
32781 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32782@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32783 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32784 * mmio resources as well as potential bios/acpi data regions.
32785 */
32786+
32787+#ifdef CONFIG_GRKERNSEC_KMEM
32788+static unsigned int ebda_start __read_only;
32789+static unsigned int ebda_end __read_only;
32790+#endif
32791+
32792 int devmem_is_allowed(unsigned long pagenr)
32793 {
32794- if (pagenr < 256)
32795+#ifdef CONFIG_GRKERNSEC_KMEM
32796+ /* allow BDA */
32797+ if (!pagenr)
32798 return 1;
32799+ /* allow EBDA */
32800+ if (pagenr >= ebda_start && pagenr < ebda_end)
32801+ return 1;
32802+ /* if tboot is in use, allow access to its hardcoded serial log range */
32803+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32804+ return 1;
32805+#else
32806+ if (!pagenr)
32807+ return 1;
32808+#ifdef CONFIG_VM86
32809+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32810+ return 1;
32811+#endif
32812+#endif
32813+
32814+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32815+ return 1;
32816+#ifdef CONFIG_GRKERNSEC_KMEM
32817+ /* throw out everything else below 1MB */
32818+ if (pagenr <= 256)
32819+ return 0;
32820+#endif
32821 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32822 return 0;
32823 if (!page_is_ram(pagenr))
32824@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32825 #endif
32826 }
32827
32828+#ifdef CONFIG_GRKERNSEC_KMEM
32829+static inline void gr_init_ebda(void)
32830+{
32831+ unsigned int ebda_addr;
32832+ unsigned int ebda_size = 0;
32833+
32834+ ebda_addr = get_bios_ebda();
32835+ if (ebda_addr) {
32836+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32837+ ebda_size <<= 10;
32838+ }
32839+ if (ebda_addr && ebda_size) {
32840+ ebda_start = ebda_addr >> PAGE_SHIFT;
32841+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32842+ } else {
32843+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32844+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32845+ }
32846+}
32847+#else
32848+static inline void gr_init_ebda(void) { }
32849+#endif
32850+
32851 void free_initmem(void)
32852 {
32853+#ifdef CONFIG_PAX_KERNEXEC
32854+#ifdef CONFIG_X86_32
32855+ /* PaX: limit KERNEL_CS to actual size */
32856+ unsigned long addr, limit;
32857+ struct desc_struct d;
32858+ int cpu;
32859+#else
32860+ pgd_t *pgd;
32861+ pud_t *pud;
32862+ pmd_t *pmd;
32863+ unsigned long addr, end;
32864+#endif
32865+#endif
32866+
32867+ gr_init_ebda();
32868+
32869+#ifdef CONFIG_PAX_KERNEXEC
32870+#ifdef CONFIG_X86_32
32871+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32872+ limit = (limit - 1UL) >> PAGE_SHIFT;
32873+
32874+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32875+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32876+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32877+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32878+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32879+ }
32880+
32881+ /* PaX: make KERNEL_CS read-only */
32882+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32883+ if (!paravirt_enabled())
32884+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32885+/*
32886+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32887+ pgd = pgd_offset_k(addr);
32888+ pud = pud_offset(pgd, addr);
32889+ pmd = pmd_offset(pud, addr);
32890+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32891+ }
32892+*/
32893+#ifdef CONFIG_X86_PAE
32894+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32895+/*
32896+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32897+ pgd = pgd_offset_k(addr);
32898+ pud = pud_offset(pgd, addr);
32899+ pmd = pmd_offset(pud, addr);
32900+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32901+ }
32902+*/
32903+#endif
32904+
32905+#ifdef CONFIG_MODULES
32906+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32907+#endif
32908+
32909+#else
32910+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32911+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32912+ pgd = pgd_offset_k(addr);
32913+ pud = pud_offset(pgd, addr);
32914+ pmd = pmd_offset(pud, addr);
32915+ if (!pmd_present(*pmd))
32916+ continue;
32917+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32918+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32919+ else
32920+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32921+ }
32922+
32923+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32924+ end = addr + KERNEL_IMAGE_SIZE;
32925+ for (; addr < end; addr += PMD_SIZE) {
32926+ pgd = pgd_offset_k(addr);
32927+ pud = pud_offset(pgd, addr);
32928+ pmd = pmd_offset(pud, addr);
32929+ if (!pmd_present(*pmd))
32930+ continue;
32931+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32932+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32933+ }
32934+#endif
32935+
32936+ flush_tlb_all();
32937+#endif
32938+
32939 free_init_pages("unused kernel",
32940 (unsigned long)(&__init_begin),
32941 (unsigned long)(&__init_end));
32942diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32943index c8140e1..59257fc 100644
32944--- a/arch/x86/mm/init_32.c
32945+++ b/arch/x86/mm/init_32.c
32946@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32947 bool __read_mostly __vmalloc_start_set = false;
32948
32949 /*
32950- * Creates a middle page table and puts a pointer to it in the
32951- * given global directory entry. This only returns the gd entry
32952- * in non-PAE compilation mode, since the middle layer is folded.
32953- */
32954-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32955-{
32956- pud_t *pud;
32957- pmd_t *pmd_table;
32958-
32959-#ifdef CONFIG_X86_PAE
32960- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32961- pmd_table = (pmd_t *)alloc_low_page();
32962- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32963- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32964- pud = pud_offset(pgd, 0);
32965- BUG_ON(pmd_table != pmd_offset(pud, 0));
32966-
32967- return pmd_table;
32968- }
32969-#endif
32970- pud = pud_offset(pgd, 0);
32971- pmd_table = pmd_offset(pud, 0);
32972-
32973- return pmd_table;
32974-}
32975-
32976-/*
32977 * Create a page table and place a pointer to it in a middle page
32978 * directory entry:
32979 */
32980@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32981 pte_t *page_table = (pte_t *)alloc_low_page();
32982
32983 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32984+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32985+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32986+#else
32987 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32988+#endif
32989 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32990 }
32991
32992 return pte_offset_kernel(pmd, 0);
32993 }
32994
32995+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32996+{
32997+ pud_t *pud;
32998+ pmd_t *pmd_table;
32999+
33000+ pud = pud_offset(pgd, 0);
33001+ pmd_table = pmd_offset(pud, 0);
33002+
33003+ return pmd_table;
33004+}
33005+
33006 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33007 {
33008 int pgd_idx = pgd_index(vaddr);
33009@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33010 int pgd_idx, pmd_idx;
33011 unsigned long vaddr;
33012 pgd_t *pgd;
33013+ pud_t *pud;
33014 pmd_t *pmd;
33015 pte_t *pte = NULL;
33016 unsigned long count = page_table_range_init_count(start, end);
33017@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33018 pgd = pgd_base + pgd_idx;
33019
33020 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33021- pmd = one_md_table_init(pgd);
33022- pmd = pmd + pmd_index(vaddr);
33023+ pud = pud_offset(pgd, vaddr);
33024+ pmd = pmd_offset(pud, vaddr);
33025+
33026+#ifdef CONFIG_X86_PAE
33027+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33028+#endif
33029+
33030 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33031 pmd++, pmd_idx++) {
33032 pte = page_table_kmap_check(one_page_table_init(pmd),
33033@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33034 }
33035 }
33036
33037-static inline int is_kernel_text(unsigned long addr)
33038+static inline int is_kernel_text(unsigned long start, unsigned long end)
33039 {
33040- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33041- return 1;
33042- return 0;
33043+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33044+ end <= ktla_ktva((unsigned long)_stext)) &&
33045+ (start >= ktla_ktva((unsigned long)_einittext) ||
33046+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33047+
33048+#ifdef CONFIG_ACPI_SLEEP
33049+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33050+#endif
33051+
33052+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33053+ return 0;
33054+ return 1;
33055 }
33056
33057 /*
33058@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33059 unsigned long last_map_addr = end;
33060 unsigned long start_pfn, end_pfn;
33061 pgd_t *pgd_base = swapper_pg_dir;
33062- int pgd_idx, pmd_idx, pte_ofs;
33063+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33064 unsigned long pfn;
33065 pgd_t *pgd;
33066+ pud_t *pud;
33067 pmd_t *pmd;
33068 pte_t *pte;
33069 unsigned pages_2m, pages_4k;
33070@@ -291,8 +295,13 @@ repeat:
33071 pfn = start_pfn;
33072 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33073 pgd = pgd_base + pgd_idx;
33074- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33075- pmd = one_md_table_init(pgd);
33076+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33077+ pud = pud_offset(pgd, 0);
33078+ pmd = pmd_offset(pud, 0);
33079+
33080+#ifdef CONFIG_X86_PAE
33081+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33082+#endif
33083
33084 if (pfn >= end_pfn)
33085 continue;
33086@@ -304,14 +313,13 @@ repeat:
33087 #endif
33088 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33089 pmd++, pmd_idx++) {
33090- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33091+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33092
33093 /*
33094 * Map with big pages if possible, otherwise
33095 * create normal page tables:
33096 */
33097 if (use_pse) {
33098- unsigned int addr2;
33099 pgprot_t prot = PAGE_KERNEL_LARGE;
33100 /*
33101 * first pass will use the same initial
33102@@ -322,11 +330,7 @@ repeat:
33103 _PAGE_PSE);
33104
33105 pfn &= PMD_MASK >> PAGE_SHIFT;
33106- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33107- PAGE_OFFSET + PAGE_SIZE-1;
33108-
33109- if (is_kernel_text(addr) ||
33110- is_kernel_text(addr2))
33111+ if (is_kernel_text(address, address + PMD_SIZE))
33112 prot = PAGE_KERNEL_LARGE_EXEC;
33113
33114 pages_2m++;
33115@@ -343,7 +347,7 @@ repeat:
33116 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33117 pte += pte_ofs;
33118 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33119- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33120+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33121 pgprot_t prot = PAGE_KERNEL;
33122 /*
33123 * first pass will use the same initial
33124@@ -351,7 +355,7 @@ repeat:
33125 */
33126 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33127
33128- if (is_kernel_text(addr))
33129+ if (is_kernel_text(address, address + PAGE_SIZE))
33130 prot = PAGE_KERNEL_EXEC;
33131
33132 pages_4k++;
33133@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33134
33135 pud = pud_offset(pgd, va);
33136 pmd = pmd_offset(pud, va);
33137- if (!pmd_present(*pmd))
33138+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33139 break;
33140
33141 /* should not be large page here */
33142@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33143
33144 static void __init pagetable_init(void)
33145 {
33146- pgd_t *pgd_base = swapper_pg_dir;
33147-
33148- permanent_kmaps_init(pgd_base);
33149+ permanent_kmaps_init(swapper_pg_dir);
33150 }
33151
33152-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33153+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33154 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33155
33156 /* user-defined highmem size */
33157@@ -787,10 +789,10 @@ void __init mem_init(void)
33158 ((unsigned long)&__init_end -
33159 (unsigned long)&__init_begin) >> 10,
33160
33161- (unsigned long)&_etext, (unsigned long)&_edata,
33162- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33163+ (unsigned long)&_sdata, (unsigned long)&_edata,
33164+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33165
33166- (unsigned long)&_text, (unsigned long)&_etext,
33167+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33168 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33169
33170 /*
33171@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33172 if (!kernel_set_to_readonly)
33173 return;
33174
33175+ start = ktla_ktva(start);
33176 pr_debug("Set kernel text: %lx - %lx for read write\n",
33177 start, start+size);
33178
33179@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33180 if (!kernel_set_to_readonly)
33181 return;
33182
33183+ start = ktla_ktva(start);
33184 pr_debug("Set kernel text: %lx - %lx for read only\n",
33185 start, start+size);
33186
33187@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33188 unsigned long start = PFN_ALIGN(_text);
33189 unsigned long size = PFN_ALIGN(_etext) - start;
33190
33191+ start = ktla_ktva(start);
33192 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33193 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33194 size >> 10);
33195diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33196index 30eb05a..ae671ac 100644
33197--- a/arch/x86/mm/init_64.c
33198+++ b/arch/x86/mm/init_64.c
33199@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33200 * around without checking the pgd every time.
33201 */
33202
33203-pteval_t __supported_pte_mask __read_mostly = ~0;
33204+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33205 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33206
33207 int force_personality32;
33208@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33209
33210 for (address = start; address <= end; address += PGDIR_SIZE) {
33211 const pgd_t *pgd_ref = pgd_offset_k(address);
33212+
33213+#ifdef CONFIG_PAX_PER_CPU_PGD
33214+ unsigned long cpu;
33215+#else
33216 struct page *page;
33217+#endif
33218
33219 /*
33220 * When it is called after memory hot remove, pgd_none()
33221@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33222 continue;
33223
33224 spin_lock(&pgd_lock);
33225+
33226+#ifdef CONFIG_PAX_PER_CPU_PGD
33227+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33228+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33229+
33230+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33231+ BUG_ON(pgd_page_vaddr(*pgd)
33232+ != pgd_page_vaddr(*pgd_ref));
33233+
33234+ if (removed) {
33235+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33236+ pgd_clear(pgd);
33237+ } else {
33238+ if (pgd_none(*pgd))
33239+ set_pgd(pgd, *pgd_ref);
33240+ }
33241+
33242+ pgd = pgd_offset_cpu(cpu, kernel, address);
33243+#else
33244 list_for_each_entry(page, &pgd_list, lru) {
33245 pgd_t *pgd;
33246 spinlock_t *pgt_lock;
33247@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33248 /* the pgt_lock only for Xen */
33249 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33250 spin_lock(pgt_lock);
33251+#endif
33252
33253 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33254 BUG_ON(pgd_page_vaddr(*pgd)
33255@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33256 set_pgd(pgd, *pgd_ref);
33257 }
33258
33259+#ifndef CONFIG_PAX_PER_CPU_PGD
33260 spin_unlock(pgt_lock);
33261+#endif
33262+
33263 }
33264 spin_unlock(&pgd_lock);
33265 }
33266@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33267 {
33268 if (pgd_none(*pgd)) {
33269 pud_t *pud = (pud_t *)spp_getpage();
33270- pgd_populate(&init_mm, pgd, pud);
33271+ pgd_populate_kernel(&init_mm, pgd, pud);
33272 if (pud != pud_offset(pgd, 0))
33273 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33274 pud, pud_offset(pgd, 0));
33275@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33276 {
33277 if (pud_none(*pud)) {
33278 pmd_t *pmd = (pmd_t *) spp_getpage();
33279- pud_populate(&init_mm, pud, pmd);
33280+ pud_populate_kernel(&init_mm, pud, pmd);
33281 if (pmd != pmd_offset(pud, 0))
33282 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33283 pmd, pmd_offset(pud, 0));
33284@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33285 pmd = fill_pmd(pud, vaddr);
33286 pte = fill_pte(pmd, vaddr);
33287
33288+ pax_open_kernel();
33289 set_pte(pte, new_pte);
33290+ pax_close_kernel();
33291
33292 /*
33293 * It's enough to flush this one mapping.
33294@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33295 pgd = pgd_offset_k((unsigned long)__va(phys));
33296 if (pgd_none(*pgd)) {
33297 pud = (pud_t *) spp_getpage();
33298- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33299- _PAGE_USER));
33300+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33301 }
33302 pud = pud_offset(pgd, (unsigned long)__va(phys));
33303 if (pud_none(*pud)) {
33304 pmd = (pmd_t *) spp_getpage();
33305- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33306- _PAGE_USER));
33307+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33308 }
33309 pmd = pmd_offset(pud, phys);
33310 BUG_ON(!pmd_none(*pmd));
33311@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33312 prot);
33313
33314 spin_lock(&init_mm.page_table_lock);
33315- pud_populate(&init_mm, pud, pmd);
33316+ pud_populate_kernel(&init_mm, pud, pmd);
33317 spin_unlock(&init_mm.page_table_lock);
33318 }
33319 __flush_tlb_all();
33320@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33321 page_size_mask);
33322
33323 spin_lock(&init_mm.page_table_lock);
33324- pgd_populate(&init_mm, pgd, pud);
33325+ pgd_populate_kernel(&init_mm, pgd, pud);
33326 spin_unlock(&init_mm.page_table_lock);
33327 pgd_changed = true;
33328 }
33329diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33330index 9ca35fc..4b2b7b7 100644
33331--- a/arch/x86/mm/iomap_32.c
33332+++ b/arch/x86/mm/iomap_32.c
33333@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33334 type = kmap_atomic_idx_push();
33335 idx = type + KM_TYPE_NR * smp_processor_id();
33336 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33337+
33338+ pax_open_kernel();
33339 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33340+ pax_close_kernel();
33341+
33342 arch_flush_lazy_mmu_mode();
33343
33344 return (void *)vaddr;
33345diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33346index fdf617c..b9e85bc 100644
33347--- a/arch/x86/mm/ioremap.c
33348+++ b/arch/x86/mm/ioremap.c
33349@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33350 unsigned long i;
33351
33352 for (i = 0; i < nr_pages; ++i)
33353- if (pfn_valid(start_pfn + i) &&
33354- !PageReserved(pfn_to_page(start_pfn + i)))
33355+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33356+ !PageReserved(pfn_to_page(start_pfn + i))))
33357 return 1;
33358
33359 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33360@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33361 *
33362 * Caller must ensure there is only one unmapping for the same pointer.
33363 */
33364-void iounmap(volatile void __iomem *addr)
33365+void iounmap(const volatile void __iomem *addr)
33366 {
33367 struct vm_struct *p, *o;
33368
33369@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33370 */
33371 void *xlate_dev_mem_ptr(phys_addr_t phys)
33372 {
33373- void *addr;
33374- unsigned long start = phys & PAGE_MASK;
33375-
33376 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33377- if (page_is_ram(start >> PAGE_SHIFT))
33378+ if (page_is_ram(phys >> PAGE_SHIFT))
33379+#ifdef CONFIG_HIGHMEM
33380+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33381+#endif
33382 return __va(phys);
33383
33384- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33385- if (addr)
33386- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33387-
33388- return addr;
33389+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33390 }
33391
33392 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33393 {
33394 if (page_is_ram(phys >> PAGE_SHIFT))
33395+#ifdef CONFIG_HIGHMEM
33396+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33397+#endif
33398 return;
33399
33400 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33401 return;
33402 }
33403
33404-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33405+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33406
33407 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33408 {
33409@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33410 early_ioremap_setup();
33411
33412 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33413- memset(bm_pte, 0, sizeof(bm_pte));
33414- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33415+ pmd_populate_user(&init_mm, pmd, bm_pte);
33416
33417 /*
33418 * The boot-ioremap range spans multiple pmds, for which
33419diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33420index b4f2e7e..96c9c3e 100644
33421--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33422+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33423@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33424 * memory (e.g. tracked pages)? For now, we need this to avoid
33425 * invoking kmemcheck for PnP BIOS calls.
33426 */
33427- if (regs->flags & X86_VM_MASK)
33428+ if (v8086_mode(regs))
33429 return false;
33430- if (regs->cs != __KERNEL_CS)
33431+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33432 return false;
33433
33434 pte = kmemcheck_pte_lookup(address);
33435diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33436index df4552b..12c129c 100644
33437--- a/arch/x86/mm/mmap.c
33438+++ b/arch/x86/mm/mmap.c
33439@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33440 * Leave an at least ~128 MB hole with possible stack randomization.
33441 */
33442 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33443-#define MAX_GAP (TASK_SIZE/6*5)
33444+#define MAX_GAP (pax_task_size/6*5)
33445
33446 static int mmap_is_legacy(void)
33447 {
33448@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33449 return rnd << PAGE_SHIFT;
33450 }
33451
33452-static unsigned long mmap_base(void)
33453+static unsigned long mmap_base(struct mm_struct *mm)
33454 {
33455 unsigned long gap = rlimit(RLIMIT_STACK);
33456+ unsigned long pax_task_size = TASK_SIZE;
33457+
33458+#ifdef CONFIG_PAX_SEGMEXEC
33459+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33460+ pax_task_size = SEGMEXEC_TASK_SIZE;
33461+#endif
33462
33463 if (gap < MIN_GAP)
33464 gap = MIN_GAP;
33465 else if (gap > MAX_GAP)
33466 gap = MAX_GAP;
33467
33468- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33469+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33470 }
33471
33472 /*
33473 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33474 * does, but not when emulating X86_32
33475 */
33476-static unsigned long mmap_legacy_base(void)
33477+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33478 {
33479- if (mmap_is_ia32())
33480+ if (mmap_is_ia32()) {
33481+
33482+#ifdef CONFIG_PAX_SEGMEXEC
33483+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33484+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33485+ else
33486+#endif
33487+
33488 return TASK_UNMAPPED_BASE;
33489- else
33490+ } else
33491 return TASK_UNMAPPED_BASE + mmap_rnd();
33492 }
33493
33494@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33495 */
33496 void arch_pick_mmap_layout(struct mm_struct *mm)
33497 {
33498- mm->mmap_legacy_base = mmap_legacy_base();
33499- mm->mmap_base = mmap_base();
33500+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33501+ mm->mmap_base = mmap_base(mm);
33502+
33503+#ifdef CONFIG_PAX_RANDMMAP
33504+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33505+ mm->mmap_legacy_base += mm->delta_mmap;
33506+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33507+ }
33508+#endif
33509
33510 if (mmap_is_legacy()) {
33511 mm->mmap_base = mm->mmap_legacy_base;
33512diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33513index 0057a7a..95c7edd 100644
33514--- a/arch/x86/mm/mmio-mod.c
33515+++ b/arch/x86/mm/mmio-mod.c
33516@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33517 break;
33518 default:
33519 {
33520- unsigned char *ip = (unsigned char *)instptr;
33521+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33522 my_trace->opcode = MMIO_UNKNOWN_OP;
33523 my_trace->width = 0;
33524 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33525@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33526 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33527 void __iomem *addr)
33528 {
33529- static atomic_t next_id;
33530+ static atomic_unchecked_t next_id;
33531 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33532 /* These are page-unaligned. */
33533 struct mmiotrace_map map = {
33534@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33535 .private = trace
33536 },
33537 .phys = offset,
33538- .id = atomic_inc_return(&next_id)
33539+ .id = atomic_inc_return_unchecked(&next_id)
33540 };
33541 map.map_id = trace->id;
33542
33543@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33544 ioremap_trace_core(offset, size, addr);
33545 }
33546
33547-static void iounmap_trace_core(volatile void __iomem *addr)
33548+static void iounmap_trace_core(const volatile void __iomem *addr)
33549 {
33550 struct mmiotrace_map map = {
33551 .phys = 0,
33552@@ -328,7 +328,7 @@ not_enabled:
33553 }
33554 }
33555
33556-void mmiotrace_iounmap(volatile void __iomem *addr)
33557+void mmiotrace_iounmap(const volatile void __iomem *addr)
33558 {
33559 might_sleep();
33560 if (is_enabled()) /* recheck and proper locking in *_core() */
33561diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33562index 1a88370..3f598b5 100644
33563--- a/arch/x86/mm/numa.c
33564+++ b/arch/x86/mm/numa.c
33565@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33566 }
33567 }
33568
33569-static int __init numa_register_memblks(struct numa_meminfo *mi)
33570+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33571 {
33572 unsigned long uninitialized_var(pfn_align);
33573 int i, nid;
33574diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33575index 536ea2f..f42c293 100644
33576--- a/arch/x86/mm/pageattr.c
33577+++ b/arch/x86/mm/pageattr.c
33578@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33579 */
33580 #ifdef CONFIG_PCI_BIOS
33581 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33582- pgprot_val(forbidden) |= _PAGE_NX;
33583+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33584 #endif
33585
33586 /*
33587@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33588 * Does not cover __inittext since that is gone later on. On
33589 * 64bit we do not enforce !NX on the low mapping
33590 */
33591- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33592- pgprot_val(forbidden) |= _PAGE_NX;
33593+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33594+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33595
33596+#ifdef CONFIG_DEBUG_RODATA
33597 /*
33598 * The .rodata section needs to be read-only. Using the pfn
33599 * catches all aliases.
33600@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33601 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33602 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33603 pgprot_val(forbidden) |= _PAGE_RW;
33604+#endif
33605
33606 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33607 /*
33608@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33609 }
33610 #endif
33611
33612+#ifdef CONFIG_PAX_KERNEXEC
33613+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33614+ pgprot_val(forbidden) |= _PAGE_RW;
33615+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33616+ }
33617+#endif
33618+
33619 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33620
33621 return prot;
33622@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33623 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33624 {
33625 /* change init_mm */
33626+ pax_open_kernel();
33627 set_pte_atomic(kpte, pte);
33628+
33629 #ifdef CONFIG_X86_32
33630 if (!SHARED_KERNEL_PMD) {
33631+
33632+#ifdef CONFIG_PAX_PER_CPU_PGD
33633+ unsigned long cpu;
33634+#else
33635 struct page *page;
33636+#endif
33637
33638+#ifdef CONFIG_PAX_PER_CPU_PGD
33639+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33640+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33641+#else
33642 list_for_each_entry(page, &pgd_list, lru) {
33643- pgd_t *pgd;
33644+ pgd_t *pgd = (pgd_t *)page_address(page);
33645+#endif
33646+
33647 pud_t *pud;
33648 pmd_t *pmd;
33649
33650- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33651+ pgd += pgd_index(address);
33652 pud = pud_offset(pgd, address);
33653 pmd = pmd_offset(pud, address);
33654 set_pte_atomic((pte_t *)pmd, pte);
33655 }
33656 }
33657 #endif
33658+ pax_close_kernel();
33659 }
33660
33661 static int
33662diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33663index 7ac6869..c0ba541 100644
33664--- a/arch/x86/mm/pat.c
33665+++ b/arch/x86/mm/pat.c
33666@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33667 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33668
33669 if (pg_flags == _PGMT_DEFAULT)
33670- return -1;
33671+ return _PAGE_CACHE_MODE_NUM;
33672 else if (pg_flags == _PGMT_WC)
33673 return _PAGE_CACHE_MODE_WC;
33674 else if (pg_flags == _PGMT_UC_MINUS)
33675@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33676
33677 page = pfn_to_page(pfn);
33678 type = get_page_memtype(page);
33679- if (type != -1) {
33680+ if (type != _PAGE_CACHE_MODE_NUM) {
33681 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33682 start, end - 1, type, req_type);
33683 if (new_type)
33684@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33685
33686 if (!entry) {
33687 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33688- current->comm, current->pid, start, end - 1);
33689+ current->comm, task_pid_nr(current), start, end - 1);
33690 return -EINVAL;
33691 }
33692
33693@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33694 page = pfn_to_page(paddr >> PAGE_SHIFT);
33695 rettype = get_page_memtype(page);
33696 /*
33697- * -1 from get_page_memtype() implies RAM page is in its
33698+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33699 * default state and not reserved, and hence of type WB
33700 */
33701- if (rettype == -1)
33702+ if (rettype == _PAGE_CACHE_MODE_NUM)
33703 rettype = _PAGE_CACHE_MODE_WB;
33704
33705 return rettype;
33706@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33707
33708 while (cursor < to) {
33709 if (!devmem_is_allowed(pfn)) {
33710- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33711- current->comm, from, to - 1);
33712+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33713+ current->comm, from, to - 1, cursor);
33714 return 0;
33715 }
33716 cursor += PAGE_SIZE;
33717@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33718 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33719 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33720 "for [mem %#010Lx-%#010Lx]\n",
33721- current->comm, current->pid,
33722+ current->comm, task_pid_nr(current),
33723 cattr_name(pcm),
33724 base, (unsigned long long)(base + size-1));
33725 return -EINVAL;
33726@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33727 pcm = lookup_memtype(paddr);
33728 if (want_pcm != pcm) {
33729 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33730- current->comm, current->pid,
33731+ current->comm, task_pid_nr(current),
33732 cattr_name(want_pcm),
33733 (unsigned long long)paddr,
33734 (unsigned long long)(paddr + size - 1),
33735@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33736 free_memtype(paddr, paddr + size);
33737 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33738 " for [mem %#010Lx-%#010Lx], got %s\n",
33739- current->comm, current->pid,
33740+ current->comm, task_pid_nr(current),
33741 cattr_name(want_pcm),
33742 (unsigned long long)paddr,
33743 (unsigned long long)(paddr + size - 1),
33744diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33745index 6582adc..fcc5d0b 100644
33746--- a/arch/x86/mm/pat_rbtree.c
33747+++ b/arch/x86/mm/pat_rbtree.c
33748@@ -161,7 +161,7 @@ success:
33749
33750 failure:
33751 printk(KERN_INFO "%s:%d conflicting memory types "
33752- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33753+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33754 end, cattr_name(found_type), cattr_name(match->type));
33755 return -EBUSY;
33756 }
33757diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33758index 9f0614d..92ae64a 100644
33759--- a/arch/x86/mm/pf_in.c
33760+++ b/arch/x86/mm/pf_in.c
33761@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33762 int i;
33763 enum reason_type rv = OTHERS;
33764
33765- p = (unsigned char *)ins_addr;
33766+ p = (unsigned char *)ktla_ktva(ins_addr);
33767 p += skip_prefix(p, &prf);
33768 p += get_opcode(p, &opcode);
33769
33770@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33771 struct prefix_bits prf;
33772 int i;
33773
33774- p = (unsigned char *)ins_addr;
33775+ p = (unsigned char *)ktla_ktva(ins_addr);
33776 p += skip_prefix(p, &prf);
33777 p += get_opcode(p, &opcode);
33778
33779@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33780 struct prefix_bits prf;
33781 int i;
33782
33783- p = (unsigned char *)ins_addr;
33784+ p = (unsigned char *)ktla_ktva(ins_addr);
33785 p += skip_prefix(p, &prf);
33786 p += get_opcode(p, &opcode);
33787
33788@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33789 struct prefix_bits prf;
33790 int i;
33791
33792- p = (unsigned char *)ins_addr;
33793+ p = (unsigned char *)ktla_ktva(ins_addr);
33794 p += skip_prefix(p, &prf);
33795 p += get_opcode(p, &opcode);
33796 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33797@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33798 struct prefix_bits prf;
33799 int i;
33800
33801- p = (unsigned char *)ins_addr;
33802+ p = (unsigned char *)ktla_ktva(ins_addr);
33803 p += skip_prefix(p, &prf);
33804 p += get_opcode(p, &opcode);
33805 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33806diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33807index 6fb6927..634b0f7 100644
33808--- a/arch/x86/mm/pgtable.c
33809+++ b/arch/x86/mm/pgtable.c
33810@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33811 list_del(&page->lru);
33812 }
33813
33814-#define UNSHARED_PTRS_PER_PGD \
33815- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33816+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33817+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33818
33819+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33820+{
33821+ unsigned int count = USER_PGD_PTRS;
33822
33823+ if (!pax_user_shadow_base)
33824+ return;
33825+
33826+ while (count--)
33827+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33828+}
33829+#endif
33830+
33831+#ifdef CONFIG_PAX_PER_CPU_PGD
33832+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33833+{
33834+ unsigned int count = USER_PGD_PTRS;
33835+
33836+ while (count--) {
33837+ pgd_t pgd;
33838+
33839+#ifdef CONFIG_X86_64
33840+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33841+#else
33842+ pgd = *src++;
33843+#endif
33844+
33845+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33846+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33847+#endif
33848+
33849+ *dst++ = pgd;
33850+ }
33851+
33852+}
33853+#endif
33854+
33855+#ifdef CONFIG_X86_64
33856+#define pxd_t pud_t
33857+#define pyd_t pgd_t
33858+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33859+#define pgtable_pxd_page_ctor(page) true
33860+#define pgtable_pxd_page_dtor(page) do {} while (0)
33861+#define pxd_free(mm, pud) pud_free((mm), (pud))
33862+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33863+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33864+#define PYD_SIZE PGDIR_SIZE
33865+#else
33866+#define pxd_t pmd_t
33867+#define pyd_t pud_t
33868+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33869+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33870+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33871+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33872+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33873+#define pyd_offset(mm, address) pud_offset((mm), (address))
33874+#define PYD_SIZE PUD_SIZE
33875+#endif
33876+
33877+#ifdef CONFIG_PAX_PER_CPU_PGD
33878+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33879+static inline void pgd_dtor(pgd_t *pgd) {}
33880+#else
33881 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33882 {
33883 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33884@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33885 pgd_list_del(pgd);
33886 spin_unlock(&pgd_lock);
33887 }
33888+#endif
33889
33890 /*
33891 * List of all pgd's needed for non-PAE so it can invalidate entries
33892@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33893 * -- nyc
33894 */
33895
33896-#ifdef CONFIG_X86_PAE
33897+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33898 /*
33899 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33900 * updating the top-level pagetable entries to guarantee the
33901@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33902 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33903 * and initialize the kernel pmds here.
33904 */
33905-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33906+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33907
33908 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33909 {
33910@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33911 */
33912 flush_tlb_mm(mm);
33913 }
33914+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33915+#define PREALLOCATED_PXDS USER_PGD_PTRS
33916 #else /* !CONFIG_X86_PAE */
33917
33918 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33919-#define PREALLOCATED_PMDS 0
33920+#define PREALLOCATED_PXDS 0
33921
33922 #endif /* CONFIG_X86_PAE */
33923
33924-static void free_pmds(pmd_t *pmds[])
33925+static void free_pxds(pxd_t *pxds[])
33926 {
33927 int i;
33928
33929- for(i = 0; i < PREALLOCATED_PMDS; i++)
33930- if (pmds[i]) {
33931- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33932- free_page((unsigned long)pmds[i]);
33933+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33934+ if (pxds[i]) {
33935+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33936+ free_page((unsigned long)pxds[i]);
33937 }
33938 }
33939
33940-static int preallocate_pmds(pmd_t *pmds[])
33941+static int preallocate_pxds(pxd_t *pxds[])
33942 {
33943 int i;
33944 bool failed = false;
33945
33946- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33947- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33948- if (!pmd)
33949+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33950+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33951+ if (!pxd)
33952 failed = true;
33953- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33954- free_page((unsigned long)pmd);
33955- pmd = NULL;
33956+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33957+ free_page((unsigned long)pxd);
33958+ pxd = NULL;
33959 failed = true;
33960 }
33961- pmds[i] = pmd;
33962+ pxds[i] = pxd;
33963 }
33964
33965 if (failed) {
33966- free_pmds(pmds);
33967+ free_pxds(pxds);
33968 return -ENOMEM;
33969 }
33970
33971@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33972 * preallocate which never got a corresponding vma will need to be
33973 * freed manually.
33974 */
33975-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33976+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33977 {
33978 int i;
33979
33980- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33981+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33982 pgd_t pgd = pgdp[i];
33983
33984 if (pgd_val(pgd) != 0) {
33985- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33986+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33987
33988- pgdp[i] = native_make_pgd(0);
33989+ set_pgd(pgdp + i, native_make_pgd(0));
33990
33991- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33992- pmd_free(mm, pmd);
33993+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33994+ pxd_free(mm, pxd);
33995 }
33996 }
33997 }
33998
33999-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34000+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34001 {
34002- pud_t *pud;
34003+ pyd_t *pyd;
34004 int i;
34005
34006- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34007+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34008 return;
34009
34010- pud = pud_offset(pgd, 0);
34011-
34012- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34013- pmd_t *pmd = pmds[i];
34014+#ifdef CONFIG_X86_64
34015+ pyd = pyd_offset(mm, 0L);
34016+#else
34017+ pyd = pyd_offset(pgd, 0L);
34018+#endif
34019
34020+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34021+ pxd_t *pxd = pxds[i];
34022 if (i >= KERNEL_PGD_BOUNDARY)
34023- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34024- sizeof(pmd_t) * PTRS_PER_PMD);
34025+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34026+ sizeof(pxd_t) * PTRS_PER_PMD);
34027
34028- pud_populate(mm, pud, pmd);
34029+ pyd_populate(mm, pyd, pxd);
34030 }
34031 }
34032
34033 pgd_t *pgd_alloc(struct mm_struct *mm)
34034 {
34035 pgd_t *pgd;
34036- pmd_t *pmds[PREALLOCATED_PMDS];
34037+ pxd_t *pxds[PREALLOCATED_PXDS];
34038
34039 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34040
34041@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34042
34043 mm->pgd = pgd;
34044
34045- if (preallocate_pmds(pmds) != 0)
34046+ if (preallocate_pxds(pxds) != 0)
34047 goto out_free_pgd;
34048
34049 if (paravirt_pgd_alloc(mm) != 0)
34050- goto out_free_pmds;
34051+ goto out_free_pxds;
34052
34053 /*
34054 * Make sure that pre-populating the pmds is atomic with
34055@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34056 spin_lock(&pgd_lock);
34057
34058 pgd_ctor(mm, pgd);
34059- pgd_prepopulate_pmd(mm, pgd, pmds);
34060+ pgd_prepopulate_pxd(mm, pgd, pxds);
34061
34062 spin_unlock(&pgd_lock);
34063
34064 return pgd;
34065
34066-out_free_pmds:
34067- free_pmds(pmds);
34068+out_free_pxds:
34069+ free_pxds(pxds);
34070 out_free_pgd:
34071 free_page((unsigned long)pgd);
34072 out:
34073@@ -313,7 +380,7 @@ out:
34074
34075 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34076 {
34077- pgd_mop_up_pmds(mm, pgd);
34078+ pgd_mop_up_pxds(mm, pgd);
34079 pgd_dtor(pgd);
34080 paravirt_pgd_free(mm, pgd);
34081 free_page((unsigned long)pgd);
34082diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34083index 75cc097..79a097f 100644
34084--- a/arch/x86/mm/pgtable_32.c
34085+++ b/arch/x86/mm/pgtable_32.c
34086@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34087 return;
34088 }
34089 pte = pte_offset_kernel(pmd, vaddr);
34090+
34091+ pax_open_kernel();
34092 if (pte_val(pteval))
34093 set_pte_at(&init_mm, vaddr, pte, pteval);
34094 else
34095 pte_clear(&init_mm, vaddr, pte);
34096+ pax_close_kernel();
34097
34098 /*
34099 * It's enough to flush this one mapping.
34100diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34101index e666cbb..61788c45 100644
34102--- a/arch/x86/mm/physaddr.c
34103+++ b/arch/x86/mm/physaddr.c
34104@@ -10,7 +10,7 @@
34105 #ifdef CONFIG_X86_64
34106
34107 #ifdef CONFIG_DEBUG_VIRTUAL
34108-unsigned long __phys_addr(unsigned long x)
34109+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34110 {
34111 unsigned long y = x - __START_KERNEL_map;
34112
34113@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34114 #else
34115
34116 #ifdef CONFIG_DEBUG_VIRTUAL
34117-unsigned long __phys_addr(unsigned long x)
34118+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34119 {
34120 unsigned long phys_addr = x - PAGE_OFFSET;
34121 /* VMALLOC_* aren't constants */
34122diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34123index 90555bf..f5f1828 100644
34124--- a/arch/x86/mm/setup_nx.c
34125+++ b/arch/x86/mm/setup_nx.c
34126@@ -5,8 +5,10 @@
34127 #include <asm/pgtable.h>
34128 #include <asm/proto.h>
34129
34130+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34131 static int disable_nx;
34132
34133+#ifndef CONFIG_PAX_PAGEEXEC
34134 /*
34135 * noexec = on|off
34136 *
34137@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34138 return 0;
34139 }
34140 early_param("noexec", noexec_setup);
34141+#endif
34142+
34143+#endif
34144
34145 void x86_configure_nx(void)
34146 {
34147+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34148 if (cpu_has_nx && !disable_nx)
34149 __supported_pte_mask |= _PAGE_NX;
34150 else
34151+#endif
34152 __supported_pte_mask &= ~_PAGE_NX;
34153 }
34154
34155diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34156index ee61c36..e6fedeb 100644
34157--- a/arch/x86/mm/tlb.c
34158+++ b/arch/x86/mm/tlb.c
34159@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34160 BUG();
34161 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34162 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34163+
34164+#ifndef CONFIG_PAX_PER_CPU_PGD
34165 load_cr3(swapper_pg_dir);
34166+#endif
34167+
34168 /*
34169 * This gets called in the idle path where RCU
34170 * functions differently. Tracing normally
34171diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34172new file mode 100644
34173index 0000000..dace51c
34174--- /dev/null
34175+++ b/arch/x86/mm/uderef_64.c
34176@@ -0,0 +1,37 @@
34177+#include <linux/mm.h>
34178+#include <asm/pgtable.h>
34179+#include <asm/uaccess.h>
34180+
34181+#ifdef CONFIG_PAX_MEMORY_UDEREF
34182+/* PaX: due to the special call convention these functions must
34183+ * - remain leaf functions under all configurations,
34184+ * - never be called directly, only dereferenced from the wrappers.
34185+ */
34186+void __pax_open_userland(void)
34187+{
34188+ unsigned int cpu;
34189+
34190+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34191+ return;
34192+
34193+ cpu = raw_get_cpu();
34194+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34195+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34196+ raw_put_cpu_no_resched();
34197+}
34198+EXPORT_SYMBOL(__pax_open_userland);
34199+
34200+void __pax_close_userland(void)
34201+{
34202+ unsigned int cpu;
34203+
34204+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34205+ return;
34206+
34207+ cpu = raw_get_cpu();
34208+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34209+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34210+ raw_put_cpu_no_resched();
34211+}
34212+EXPORT_SYMBOL(__pax_close_userland);
34213+#endif
34214diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34215index 6440221..f84b5c7 100644
34216--- a/arch/x86/net/bpf_jit.S
34217+++ b/arch/x86/net/bpf_jit.S
34218@@ -9,6 +9,7 @@
34219 */
34220 #include <linux/linkage.h>
34221 #include <asm/dwarf2.h>
34222+#include <asm/alternative-asm.h>
34223
34224 /*
34225 * Calling convention :
34226@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34227 jle bpf_slow_path_word
34228 mov (SKBDATA,%rsi),%eax
34229 bswap %eax /* ntohl() */
34230+ pax_force_retaddr
34231 ret
34232
34233 sk_load_half:
34234@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34235 jle bpf_slow_path_half
34236 movzwl (SKBDATA,%rsi),%eax
34237 rol $8,%ax # ntohs()
34238+ pax_force_retaddr
34239 ret
34240
34241 sk_load_byte:
34242@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34243 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34244 jle bpf_slow_path_byte
34245 movzbl (SKBDATA,%rsi),%eax
34246+ pax_force_retaddr
34247 ret
34248
34249 /* rsi contains offset and can be scratched */
34250@@ -90,6 +94,7 @@ bpf_slow_path_word:
34251 js bpf_error
34252 mov - MAX_BPF_STACK + 32(%rbp),%eax
34253 bswap %eax
34254+ pax_force_retaddr
34255 ret
34256
34257 bpf_slow_path_half:
34258@@ -98,12 +103,14 @@ bpf_slow_path_half:
34259 mov - MAX_BPF_STACK + 32(%rbp),%ax
34260 rol $8,%ax
34261 movzwl %ax,%eax
34262+ pax_force_retaddr
34263 ret
34264
34265 bpf_slow_path_byte:
34266 bpf_slow_path_common(1)
34267 js bpf_error
34268 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34269+ pax_force_retaddr
34270 ret
34271
34272 #define sk_negative_common(SIZE) \
34273@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34274 sk_negative_common(4)
34275 mov (%rax), %eax
34276 bswap %eax
34277+ pax_force_retaddr
34278 ret
34279
34280 bpf_slow_path_half_neg:
34281@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34282 mov (%rax),%ax
34283 rol $8,%ax
34284 movzwl %ax,%eax
34285+ pax_force_retaddr
34286 ret
34287
34288 bpf_slow_path_byte_neg:
34289@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34290 .globl sk_load_byte_negative_offset
34291 sk_negative_common(1)
34292 movzbl (%rax), %eax
34293+ pax_force_retaddr
34294 ret
34295
34296 bpf_error:
34297@@ -156,4 +166,5 @@ bpf_error:
34298 mov - MAX_BPF_STACK + 16(%rbp),%r14
34299 mov - MAX_BPF_STACK + 24(%rbp),%r15
34300 leaveq
34301+ pax_force_retaddr
34302 ret
34303diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34304index 9875143..00f6656 100644
34305--- a/arch/x86/net/bpf_jit_comp.c
34306+++ b/arch/x86/net/bpf_jit_comp.c
34307@@ -13,7 +13,11 @@
34308 #include <linux/if_vlan.h>
34309 #include <asm/cacheflush.h>
34310
34311+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34312+int bpf_jit_enable __read_only;
34313+#else
34314 int bpf_jit_enable __read_mostly;
34315+#endif
34316
34317 /*
34318 * assembly code in arch/x86/net/bpf_jit.S
34319@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34320 static void jit_fill_hole(void *area, unsigned int size)
34321 {
34322 /* fill whole space with int3 instructions */
34323+ pax_open_kernel();
34324 memset(area, 0xcc, size);
34325+ pax_close_kernel();
34326 }
34327
34328 struct jit_context {
34329@@ -896,7 +902,9 @@ common_load:
34330 pr_err("bpf_jit_compile fatal error\n");
34331 return -EFAULT;
34332 }
34333+ pax_open_kernel();
34334 memcpy(image + proglen, temp, ilen);
34335+ pax_close_kernel();
34336 }
34337 proglen += ilen;
34338 addrs[i] = proglen;
34339@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34340
34341 if (image) {
34342 bpf_flush_icache(header, image + proglen);
34343- set_memory_ro((unsigned long)header, header->pages);
34344 prog->bpf_func = (void *)image;
34345 prog->jited = true;
34346 }
34347@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34348 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34349 struct bpf_binary_header *header = (void *)addr;
34350
34351- if (!fp->jited)
34352- goto free_filter;
34353+ if (fp->jited)
34354+ bpf_jit_binary_free(header);
34355
34356- set_memory_rw(addr, header->pages);
34357- bpf_jit_binary_free(header);
34358-
34359-free_filter:
34360 bpf_prog_unlock_free(fp);
34361 }
34362diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34363index 5d04be5..2beeaa2 100644
34364--- a/arch/x86/oprofile/backtrace.c
34365+++ b/arch/x86/oprofile/backtrace.c
34366@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34367 struct stack_frame_ia32 *fp;
34368 unsigned long bytes;
34369
34370- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34371+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34372 if (bytes != 0)
34373 return NULL;
34374
34375- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34376+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34377
34378 oprofile_add_trace(bufhead[0].return_address);
34379
34380@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34381 struct stack_frame bufhead[2];
34382 unsigned long bytes;
34383
34384- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34385+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34386 if (bytes != 0)
34387 return NULL;
34388
34389@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34390 {
34391 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34392
34393- if (!user_mode_vm(regs)) {
34394+ if (!user_mode(regs)) {
34395 unsigned long stack = kernel_stack_pointer(regs);
34396 if (depth)
34397 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34398diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34399index 1d2e639..f6ef82a 100644
34400--- a/arch/x86/oprofile/nmi_int.c
34401+++ b/arch/x86/oprofile/nmi_int.c
34402@@ -23,6 +23,7 @@
34403 #include <asm/nmi.h>
34404 #include <asm/msr.h>
34405 #include <asm/apic.h>
34406+#include <asm/pgtable.h>
34407
34408 #include "op_counter.h"
34409 #include "op_x86_model.h"
34410@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34411 if (ret)
34412 return ret;
34413
34414- if (!model->num_virt_counters)
34415- model->num_virt_counters = model->num_counters;
34416+ if (!model->num_virt_counters) {
34417+ pax_open_kernel();
34418+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34419+ pax_close_kernel();
34420+ }
34421
34422 mux_init(ops);
34423
34424diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34425index 50d86c0..7985318 100644
34426--- a/arch/x86/oprofile/op_model_amd.c
34427+++ b/arch/x86/oprofile/op_model_amd.c
34428@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34429 num_counters = AMD64_NUM_COUNTERS;
34430 }
34431
34432- op_amd_spec.num_counters = num_counters;
34433- op_amd_spec.num_controls = num_counters;
34434- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34435+ pax_open_kernel();
34436+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34437+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34438+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34439+ pax_close_kernel();
34440
34441 return 0;
34442 }
34443diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34444index d90528e..0127e2b 100644
34445--- a/arch/x86/oprofile/op_model_ppro.c
34446+++ b/arch/x86/oprofile/op_model_ppro.c
34447@@ -19,6 +19,7 @@
34448 #include <asm/msr.h>
34449 #include <asm/apic.h>
34450 #include <asm/nmi.h>
34451+#include <asm/pgtable.h>
34452
34453 #include "op_x86_model.h"
34454 #include "op_counter.h"
34455@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34456
34457 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34458
34459- op_arch_perfmon_spec.num_counters = num_counters;
34460- op_arch_perfmon_spec.num_controls = num_counters;
34461+ pax_open_kernel();
34462+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34463+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34464+ pax_close_kernel();
34465 }
34466
34467 static int arch_perfmon_init(struct oprofile_operations *ignore)
34468diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34469index 71e8a67..6a313bb 100644
34470--- a/arch/x86/oprofile/op_x86_model.h
34471+++ b/arch/x86/oprofile/op_x86_model.h
34472@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34473 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34474 struct op_msrs const * const msrs);
34475 #endif
34476-};
34477+} __do_const;
34478
34479 struct op_counter_config;
34480
34481diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34482index 44b9271..4c5a988 100644
34483--- a/arch/x86/pci/intel_mid_pci.c
34484+++ b/arch/x86/pci/intel_mid_pci.c
34485@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34486 pci_mmcfg_late_init();
34487 pcibios_enable_irq = intel_mid_pci_irq_enable;
34488 pcibios_disable_irq = intel_mid_pci_irq_disable;
34489- pci_root_ops = intel_mid_pci_ops;
34490+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34491 pci_soc_mode = 1;
34492 /* Continue with standard init */
34493 return 1;
34494diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34495index 5dc6ca5..25c03f5 100644
34496--- a/arch/x86/pci/irq.c
34497+++ b/arch/x86/pci/irq.c
34498@@ -51,7 +51,7 @@ struct irq_router {
34499 struct irq_router_handler {
34500 u16 vendor;
34501 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34502-};
34503+} __do_const;
34504
34505 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34506 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34507@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34508 return 0;
34509 }
34510
34511-static __initdata struct irq_router_handler pirq_routers[] = {
34512+static __initconst const struct irq_router_handler pirq_routers[] = {
34513 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34514 { PCI_VENDOR_ID_AL, ali_router_probe },
34515 { PCI_VENDOR_ID_ITE, ite_router_probe },
34516@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34517 static void __init pirq_find_router(struct irq_router *r)
34518 {
34519 struct irq_routing_table *rt = pirq_table;
34520- struct irq_router_handler *h;
34521+ const struct irq_router_handler *h;
34522
34523 #ifdef CONFIG_PCI_BIOS
34524 if (!rt->signature) {
34525@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34526 return 0;
34527 }
34528
34529-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34530+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34531 {
34532 .callback = fix_broken_hp_bios_irq9,
34533 .ident = "HP Pavilion N5400 Series Laptop",
34534diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34535index 9b83b90..4112152 100644
34536--- a/arch/x86/pci/pcbios.c
34537+++ b/arch/x86/pci/pcbios.c
34538@@ -79,7 +79,7 @@ union bios32 {
34539 static struct {
34540 unsigned long address;
34541 unsigned short segment;
34542-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34543+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34544
34545 /*
34546 * Returns the entry point for the given service, NULL on error
34547@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34548 unsigned long length; /* %ecx */
34549 unsigned long entry; /* %edx */
34550 unsigned long flags;
34551+ struct desc_struct d, *gdt;
34552
34553 local_irq_save(flags);
34554- __asm__("lcall *(%%edi); cld"
34555+
34556+ gdt = get_cpu_gdt_table(smp_processor_id());
34557+
34558+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34559+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34560+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34561+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34562+
34563+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34564 : "=a" (return_code),
34565 "=b" (address),
34566 "=c" (length),
34567 "=d" (entry)
34568 : "0" (service),
34569 "1" (0),
34570- "D" (&bios32_indirect));
34571+ "D" (&bios32_indirect),
34572+ "r"(__PCIBIOS_DS)
34573+ : "memory");
34574+
34575+ pax_open_kernel();
34576+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34577+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34578+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34579+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34580+ pax_close_kernel();
34581+
34582 local_irq_restore(flags);
34583
34584 switch (return_code) {
34585- case 0:
34586- return address + entry;
34587- case 0x80: /* Not present */
34588- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34589- return 0;
34590- default: /* Shouldn't happen */
34591- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34592- service, return_code);
34593+ case 0: {
34594+ int cpu;
34595+ unsigned char flags;
34596+
34597+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34598+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34599+ printk(KERN_WARNING "bios32_service: not valid\n");
34600 return 0;
34601+ }
34602+ address = address + PAGE_OFFSET;
34603+ length += 16UL; /* some BIOSs underreport this... */
34604+ flags = 4;
34605+ if (length >= 64*1024*1024) {
34606+ length >>= PAGE_SHIFT;
34607+ flags |= 8;
34608+ }
34609+
34610+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34611+ gdt = get_cpu_gdt_table(cpu);
34612+ pack_descriptor(&d, address, length, 0x9b, flags);
34613+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34614+ pack_descriptor(&d, address, length, 0x93, flags);
34615+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34616+ }
34617+ return entry;
34618+ }
34619+ case 0x80: /* Not present */
34620+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34621+ return 0;
34622+ default: /* Shouldn't happen */
34623+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34624+ service, return_code);
34625+ return 0;
34626 }
34627 }
34628
34629 static struct {
34630 unsigned long address;
34631 unsigned short segment;
34632-} pci_indirect = { 0, __KERNEL_CS };
34633+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34634
34635-static int pci_bios_present;
34636+static int pci_bios_present __read_only;
34637
34638 static int __init check_pcibios(void)
34639 {
34640@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34641 unsigned long flags, pcibios_entry;
34642
34643 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34644- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34645+ pci_indirect.address = pcibios_entry;
34646
34647 local_irq_save(flags);
34648- __asm__(
34649- "lcall *(%%edi); cld\n\t"
34650+ __asm__("movw %w6, %%ds\n\t"
34651+ "lcall *%%ss:(%%edi); cld\n\t"
34652+ "push %%ss\n\t"
34653+ "pop %%ds\n\t"
34654 "jc 1f\n\t"
34655 "xor %%ah, %%ah\n"
34656 "1:"
34657@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34658 "=b" (ebx),
34659 "=c" (ecx)
34660 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34661- "D" (&pci_indirect)
34662+ "D" (&pci_indirect),
34663+ "r" (__PCIBIOS_DS)
34664 : "memory");
34665 local_irq_restore(flags);
34666
34667@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34668
34669 switch (len) {
34670 case 1:
34671- __asm__("lcall *(%%esi); cld\n\t"
34672+ __asm__("movw %w6, %%ds\n\t"
34673+ "lcall *%%ss:(%%esi); cld\n\t"
34674+ "push %%ss\n\t"
34675+ "pop %%ds\n\t"
34676 "jc 1f\n\t"
34677 "xor %%ah, %%ah\n"
34678 "1:"
34679@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34680 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34681 "b" (bx),
34682 "D" ((long)reg),
34683- "S" (&pci_indirect));
34684+ "S" (&pci_indirect),
34685+ "r" (__PCIBIOS_DS));
34686 /*
34687 * Zero-extend the result beyond 8 bits, do not trust the
34688 * BIOS having done it:
34689@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34690 *value &= 0xff;
34691 break;
34692 case 2:
34693- __asm__("lcall *(%%esi); cld\n\t"
34694+ __asm__("movw %w6, %%ds\n\t"
34695+ "lcall *%%ss:(%%esi); cld\n\t"
34696+ "push %%ss\n\t"
34697+ "pop %%ds\n\t"
34698 "jc 1f\n\t"
34699 "xor %%ah, %%ah\n"
34700 "1:"
34701@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34702 : "1" (PCIBIOS_READ_CONFIG_WORD),
34703 "b" (bx),
34704 "D" ((long)reg),
34705- "S" (&pci_indirect));
34706+ "S" (&pci_indirect),
34707+ "r" (__PCIBIOS_DS));
34708 /*
34709 * Zero-extend the result beyond 16 bits, do not trust the
34710 * BIOS having done it:
34711@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34712 *value &= 0xffff;
34713 break;
34714 case 4:
34715- __asm__("lcall *(%%esi); cld\n\t"
34716+ __asm__("movw %w6, %%ds\n\t"
34717+ "lcall *%%ss:(%%esi); cld\n\t"
34718+ "push %%ss\n\t"
34719+ "pop %%ds\n\t"
34720 "jc 1f\n\t"
34721 "xor %%ah, %%ah\n"
34722 "1:"
34723@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34724 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34725 "b" (bx),
34726 "D" ((long)reg),
34727- "S" (&pci_indirect));
34728+ "S" (&pci_indirect),
34729+ "r" (__PCIBIOS_DS));
34730 break;
34731 }
34732
34733@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34734
34735 switch (len) {
34736 case 1:
34737- __asm__("lcall *(%%esi); cld\n\t"
34738+ __asm__("movw %w6, %%ds\n\t"
34739+ "lcall *%%ss:(%%esi); cld\n\t"
34740+ "push %%ss\n\t"
34741+ "pop %%ds\n\t"
34742 "jc 1f\n\t"
34743 "xor %%ah, %%ah\n"
34744 "1:"
34745@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34746 "c" (value),
34747 "b" (bx),
34748 "D" ((long)reg),
34749- "S" (&pci_indirect));
34750+ "S" (&pci_indirect),
34751+ "r" (__PCIBIOS_DS));
34752 break;
34753 case 2:
34754- __asm__("lcall *(%%esi); cld\n\t"
34755+ __asm__("movw %w6, %%ds\n\t"
34756+ "lcall *%%ss:(%%esi); cld\n\t"
34757+ "push %%ss\n\t"
34758+ "pop %%ds\n\t"
34759 "jc 1f\n\t"
34760 "xor %%ah, %%ah\n"
34761 "1:"
34762@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34763 "c" (value),
34764 "b" (bx),
34765 "D" ((long)reg),
34766- "S" (&pci_indirect));
34767+ "S" (&pci_indirect),
34768+ "r" (__PCIBIOS_DS));
34769 break;
34770 case 4:
34771- __asm__("lcall *(%%esi); cld\n\t"
34772+ __asm__("movw %w6, %%ds\n\t"
34773+ "lcall *%%ss:(%%esi); cld\n\t"
34774+ "push %%ss\n\t"
34775+ "pop %%ds\n\t"
34776 "jc 1f\n\t"
34777 "xor %%ah, %%ah\n"
34778 "1:"
34779@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34780 "c" (value),
34781 "b" (bx),
34782 "D" ((long)reg),
34783- "S" (&pci_indirect));
34784+ "S" (&pci_indirect),
34785+ "r" (__PCIBIOS_DS));
34786 break;
34787 }
34788
34789@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34790
34791 DBG("PCI: Fetching IRQ routing table... ");
34792 __asm__("push %%es\n\t"
34793+ "movw %w8, %%ds\n\t"
34794 "push %%ds\n\t"
34795 "pop %%es\n\t"
34796- "lcall *(%%esi); cld\n\t"
34797+ "lcall *%%ss:(%%esi); cld\n\t"
34798 "pop %%es\n\t"
34799+ "push %%ss\n\t"
34800+ "pop %%ds\n"
34801 "jc 1f\n\t"
34802 "xor %%ah, %%ah\n"
34803 "1:"
34804@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34805 "1" (0),
34806 "D" ((long) &opt),
34807 "S" (&pci_indirect),
34808- "m" (opt)
34809+ "m" (opt),
34810+ "r" (__PCIBIOS_DS)
34811 : "memory");
34812 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34813 if (ret & 0xff00)
34814@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34815 {
34816 int ret;
34817
34818- __asm__("lcall *(%%esi); cld\n\t"
34819+ __asm__("movw %w5, %%ds\n\t"
34820+ "lcall *%%ss:(%%esi); cld\n\t"
34821+ "push %%ss\n\t"
34822+ "pop %%ds\n"
34823 "jc 1f\n\t"
34824 "xor %%ah, %%ah\n"
34825 "1:"
34826@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34827 : "0" (PCIBIOS_SET_PCI_HW_INT),
34828 "b" ((dev->bus->number << 8) | dev->devfn),
34829 "c" ((irq << 8) | (pin + 10)),
34830- "S" (&pci_indirect));
34831+ "S" (&pci_indirect),
34832+ "r" (__PCIBIOS_DS));
34833 return !(ret & 0xff00);
34834 }
34835 EXPORT_SYMBOL(pcibios_set_irq_routing);
34836diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34837index 40e7cda..c7e6672 100644
34838--- a/arch/x86/platform/efi/efi_32.c
34839+++ b/arch/x86/platform/efi/efi_32.c
34840@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34841 {
34842 struct desc_ptr gdt_descr;
34843
34844+#ifdef CONFIG_PAX_KERNEXEC
34845+ struct desc_struct d;
34846+#endif
34847+
34848 local_irq_save(efi_rt_eflags);
34849
34850 load_cr3(initial_page_table);
34851 __flush_tlb_all();
34852
34853+#ifdef CONFIG_PAX_KERNEXEC
34854+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34855+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34856+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34857+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34858+#endif
34859+
34860 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34861 gdt_descr.size = GDT_SIZE - 1;
34862 load_gdt(&gdt_descr);
34863@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34864 {
34865 struct desc_ptr gdt_descr;
34866
34867+#ifdef CONFIG_PAX_KERNEXEC
34868+ struct desc_struct d;
34869+
34870+ memset(&d, 0, sizeof d);
34871+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34872+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34873+#endif
34874+
34875 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34876 gdt_descr.size = GDT_SIZE - 1;
34877 load_gdt(&gdt_descr);
34878
34879+#ifdef CONFIG_PAX_PER_CPU_PGD
34880+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34881+#else
34882 load_cr3(swapper_pg_dir);
34883+#endif
34884+
34885 __flush_tlb_all();
34886
34887 local_irq_restore(efi_rt_eflags);
34888diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34889index 17e80d8..9fa6e41 100644
34890--- a/arch/x86/platform/efi/efi_64.c
34891+++ b/arch/x86/platform/efi/efi_64.c
34892@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34893 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34894 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34895 }
34896+
34897+#ifdef CONFIG_PAX_PER_CPU_PGD
34898+ load_cr3(swapper_pg_dir);
34899+#endif
34900+
34901 __flush_tlb_all();
34902 }
34903
34904@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34905 for (pgd = 0; pgd < n_pgds; pgd++)
34906 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34907 kfree(save_pgd);
34908+
34909+#ifdef CONFIG_PAX_PER_CPU_PGD
34910+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34911+#endif
34912+
34913 __flush_tlb_all();
34914 local_irq_restore(efi_flags);
34915 early_code_mapping_set_exec(0);
34916@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34917 unsigned npages;
34918 pgd_t *pgd;
34919
34920- if (efi_enabled(EFI_OLD_MEMMAP))
34921+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34922+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34923+ * able to execute the EFI services.
34924+ */
34925+ if (__supported_pte_mask & _PAGE_NX) {
34926+ unsigned long addr = (unsigned long) __va(0);
34927+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34928+
34929+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34930+#ifdef CONFIG_PAX_PER_CPU_PGD
34931+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34932+#endif
34933+ set_pgd(pgd_offset_k(addr), pe);
34934+ }
34935+
34936 return 0;
34937+ }
34938
34939 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34940 pgd = __va(efi_scratch.efi_pgt);
34941diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34942index 040192b..7d3300f 100644
34943--- a/arch/x86/platform/efi/efi_stub_32.S
34944+++ b/arch/x86/platform/efi/efi_stub_32.S
34945@@ -6,7 +6,9 @@
34946 */
34947
34948 #include <linux/linkage.h>
34949+#include <linux/init.h>
34950 #include <asm/page_types.h>
34951+#include <asm/segment.h>
34952
34953 /*
34954 * efi_call_phys(void *, ...) is a function with variable parameters.
34955@@ -20,7 +22,7 @@
34956 * service functions will comply with gcc calling convention, too.
34957 */
34958
34959-.text
34960+__INIT
34961 ENTRY(efi_call_phys)
34962 /*
34963 * 0. The function can only be called in Linux kernel. So CS has been
34964@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34965 * The mapping of lower virtual memory has been created in prolog and
34966 * epilog.
34967 */
34968- movl $1f, %edx
34969- subl $__PAGE_OFFSET, %edx
34970- jmp *%edx
34971+#ifdef CONFIG_PAX_KERNEXEC
34972+ movl $(__KERNEXEC_EFI_DS), %edx
34973+ mov %edx, %ds
34974+ mov %edx, %es
34975+ mov %edx, %ss
34976+ addl $2f,(1f)
34977+ ljmp *(1f)
34978+
34979+__INITDATA
34980+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34981+.previous
34982+
34983+2:
34984+ subl $2b,(1b)
34985+#else
34986+ jmp 1f-__PAGE_OFFSET
34987 1:
34988+#endif
34989
34990 /*
34991 * 2. Now on the top of stack is the return
34992@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34993 * parameter 2, ..., param n. To make things easy, we save the return
34994 * address of efi_call_phys in a global variable.
34995 */
34996- popl %edx
34997- movl %edx, saved_return_addr
34998- /* get the function pointer into ECX*/
34999- popl %ecx
35000- movl %ecx, efi_rt_function_ptr
35001- movl $2f, %edx
35002- subl $__PAGE_OFFSET, %edx
35003- pushl %edx
35004+ popl (saved_return_addr)
35005+ popl (efi_rt_function_ptr)
35006
35007 /*
35008 * 3. Clear PG bit in %CR0.
35009@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35010 /*
35011 * 5. Call the physical function.
35012 */
35013- jmp *%ecx
35014+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35015
35016-2:
35017 /*
35018 * 6. After EFI runtime service returns, control will return to
35019 * following instruction. We'd better readjust stack pointer first.
35020@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35021 movl %cr0, %edx
35022 orl $0x80000000, %edx
35023 movl %edx, %cr0
35024- jmp 1f
35025-1:
35026+
35027 /*
35028 * 8. Now restore the virtual mode from flat mode by
35029 * adding EIP with PAGE_OFFSET.
35030 */
35031- movl $1f, %edx
35032- jmp *%edx
35033+#ifdef CONFIG_PAX_KERNEXEC
35034+ movl $(__KERNEL_DS), %edx
35035+ mov %edx, %ds
35036+ mov %edx, %es
35037+ mov %edx, %ss
35038+ ljmp $(__KERNEL_CS),$1f
35039+#else
35040+ jmp 1f+__PAGE_OFFSET
35041+#endif
35042 1:
35043
35044 /*
35045 * 9. Balance the stack. And because EAX contain the return value,
35046 * we'd better not clobber it.
35047 */
35048- leal efi_rt_function_ptr, %edx
35049- movl (%edx), %ecx
35050- pushl %ecx
35051+ pushl (efi_rt_function_ptr)
35052
35053 /*
35054- * 10. Push the saved return address onto the stack and return.
35055+ * 10. Return to the saved return address.
35056 */
35057- leal saved_return_addr, %edx
35058- movl (%edx), %ecx
35059- pushl %ecx
35060- ret
35061+ jmpl *(saved_return_addr)
35062 ENDPROC(efi_call_phys)
35063 .previous
35064
35065-.data
35066+__INITDATA
35067 saved_return_addr:
35068 .long 0
35069 efi_rt_function_ptr:
35070diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35071index 86d0f9e..6d499f4 100644
35072--- a/arch/x86/platform/efi/efi_stub_64.S
35073+++ b/arch/x86/platform/efi/efi_stub_64.S
35074@@ -11,6 +11,7 @@
35075 #include <asm/msr.h>
35076 #include <asm/processor-flags.h>
35077 #include <asm/page_types.h>
35078+#include <asm/alternative-asm.h>
35079
35080 #define SAVE_XMM \
35081 mov %rsp, %rax; \
35082@@ -88,6 +89,7 @@ ENTRY(efi_call)
35083 RESTORE_PGT
35084 addq $48, %rsp
35085 RESTORE_XMM
35086+ pax_force_retaddr 0, 1
35087 ret
35088 ENDPROC(efi_call)
35089
35090diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35091index 1bbedc4..eb795b5 100644
35092--- a/arch/x86/platform/intel-mid/intel-mid.c
35093+++ b/arch/x86/platform/intel-mid/intel-mid.c
35094@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35095 {
35096 };
35097
35098-static void intel_mid_reboot(void)
35099+static void __noreturn intel_mid_reboot(void)
35100 {
35101 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35102+ BUG();
35103 }
35104
35105 static unsigned long __init intel_mid_calibrate_tsc(void)
35106diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35107index 3c1c386..59a68ed 100644
35108--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35109+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35110@@ -13,6 +13,6 @@
35111 /* For every CPU addition a new get_<cpuname>_ops interface needs
35112 * to be added.
35113 */
35114-extern void *get_penwell_ops(void);
35115-extern void *get_cloverview_ops(void);
35116-extern void *get_tangier_ops(void);
35117+extern const void *get_penwell_ops(void);
35118+extern const void *get_cloverview_ops(void);
35119+extern const void *get_tangier_ops(void);
35120diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35121index 23381d2..8ddc10e 100644
35122--- a/arch/x86/platform/intel-mid/mfld.c
35123+++ b/arch/x86/platform/intel-mid/mfld.c
35124@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35125 pm_power_off = mfld_power_off;
35126 }
35127
35128-void *get_penwell_ops(void)
35129+const void *get_penwell_ops(void)
35130 {
35131 return &penwell_ops;
35132 }
35133
35134-void *get_cloverview_ops(void)
35135+const void *get_cloverview_ops(void)
35136 {
35137 return &penwell_ops;
35138 }
35139diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35140index aaca917..66eadbc 100644
35141--- a/arch/x86/platform/intel-mid/mrfl.c
35142+++ b/arch/x86/platform/intel-mid/mrfl.c
35143@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35144 .arch_setup = tangier_arch_setup,
35145 };
35146
35147-void *get_tangier_ops(void)
35148+const void *get_tangier_ops(void)
35149 {
35150 return &tangier_ops;
35151 }
35152diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35153index d6ee929..3637cb5 100644
35154--- a/arch/x86/platform/olpc/olpc_dt.c
35155+++ b/arch/x86/platform/olpc/olpc_dt.c
35156@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35157 return res;
35158 }
35159
35160-static struct of_pdt_ops prom_olpc_ops __initdata = {
35161+static struct of_pdt_ops prom_olpc_ops __initconst = {
35162 .nextprop = olpc_dt_nextprop,
35163 .getproplen = olpc_dt_getproplen,
35164 .getproperty = olpc_dt_getproperty,
35165diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35166index 6ec7910..ecdbb11 100644
35167--- a/arch/x86/power/cpu.c
35168+++ b/arch/x86/power/cpu.c
35169@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35170 static void fix_processor_context(void)
35171 {
35172 int cpu = smp_processor_id();
35173- struct tss_struct *t = &per_cpu(init_tss, cpu);
35174-#ifdef CONFIG_X86_64
35175- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35176- tss_desc tss;
35177-#endif
35178+ struct tss_struct *t = init_tss + cpu;
35179+
35180 set_tss_desc(cpu, t); /*
35181 * This just modifies memory; should not be
35182 * necessary. But... This is necessary, because
35183@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35184 */
35185
35186 #ifdef CONFIG_X86_64
35187- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35188- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35189- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35190-
35191 syscall_init(); /* This sets MSR_*STAR and related */
35192 #endif
35193 load_TR_desc(); /* This does ltr */
35194diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35195index bad628a..a102610 100644
35196--- a/arch/x86/realmode/init.c
35197+++ b/arch/x86/realmode/init.c
35198@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35199 __va(real_mode_header->trampoline_header);
35200
35201 #ifdef CONFIG_X86_32
35202- trampoline_header->start = __pa_symbol(startup_32_smp);
35203+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35204+
35205+#ifdef CONFIG_PAX_KERNEXEC
35206+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35207+#endif
35208+
35209+ trampoline_header->boot_cs = __BOOT_CS;
35210 trampoline_header->gdt_limit = __BOOT_DS + 7;
35211 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35212 #else
35213@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35214 *trampoline_cr4_features = read_cr4();
35215
35216 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35217- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35218+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35219 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35220 #endif
35221 }
35222diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35223index 7c0d7be..d24dc88 100644
35224--- a/arch/x86/realmode/rm/Makefile
35225+++ b/arch/x86/realmode/rm/Makefile
35226@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35227
35228 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35229 -I$(srctree)/arch/x86/boot
35230+ifdef CONSTIFY_PLUGIN
35231+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35232+endif
35233 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35234 GCOV_PROFILE := n
35235diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35236index a28221d..93c40f1 100644
35237--- a/arch/x86/realmode/rm/header.S
35238+++ b/arch/x86/realmode/rm/header.S
35239@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35240 #endif
35241 /* APM/BIOS reboot */
35242 .long pa_machine_real_restart_asm
35243-#ifdef CONFIG_X86_64
35244+#ifdef CONFIG_X86_32
35245+ .long __KERNEL_CS
35246+#else
35247 .long __KERNEL32_CS
35248 #endif
35249 END(real_mode_header)
35250diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35251index 48ddd76..c26749f 100644
35252--- a/arch/x86/realmode/rm/trampoline_32.S
35253+++ b/arch/x86/realmode/rm/trampoline_32.S
35254@@ -24,6 +24,12 @@
35255 #include <asm/page_types.h>
35256 #include "realmode.h"
35257
35258+#ifdef CONFIG_PAX_KERNEXEC
35259+#define ta(X) (X)
35260+#else
35261+#define ta(X) (pa_ ## X)
35262+#endif
35263+
35264 .text
35265 .code16
35266
35267@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35268
35269 cli # We should be safe anyway
35270
35271- movl tr_start, %eax # where we need to go
35272-
35273 movl $0xA5A5A5A5, trampoline_status
35274 # write marker for master knows we're running
35275
35276@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35277 movw $1, %dx # protected mode (PE) bit
35278 lmsw %dx # into protected mode
35279
35280- ljmpl $__BOOT_CS, $pa_startup_32
35281+ ljmpl *(trampoline_header)
35282
35283 .section ".text32","ax"
35284 .code32
35285@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35286 .balign 8
35287 GLOBAL(trampoline_header)
35288 tr_start: .space 4
35289- tr_gdt_pad: .space 2
35290+ tr_boot_cs: .space 2
35291 tr_gdt: .space 6
35292 END(trampoline_header)
35293
35294diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35295index dac7b20..72dbaca 100644
35296--- a/arch/x86/realmode/rm/trampoline_64.S
35297+++ b/arch/x86/realmode/rm/trampoline_64.S
35298@@ -93,6 +93,7 @@ ENTRY(startup_32)
35299 movl %edx, %gs
35300
35301 movl pa_tr_cr4, %eax
35302+ andl $~X86_CR4_PCIDE, %eax
35303 movl %eax, %cr4 # Enable PAE mode
35304
35305 # Setup trampoline 4 level pagetables
35306@@ -106,7 +107,7 @@ ENTRY(startup_32)
35307 wrmsr
35308
35309 # Enable paging and in turn activate Long Mode
35310- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35311+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35312 movl %eax, %cr0
35313
35314 /*
35315diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35316index 9e7e147..25a4158 100644
35317--- a/arch/x86/realmode/rm/wakeup_asm.S
35318+++ b/arch/x86/realmode/rm/wakeup_asm.S
35319@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35320 lgdtl pmode_gdt
35321
35322 /* This really couldn't... */
35323- movl pmode_entry, %eax
35324 movl pmode_cr0, %ecx
35325 movl %ecx, %cr0
35326- ljmpl $__KERNEL_CS, $pa_startup_32
35327- /* -> jmp *%eax in trampoline_32.S */
35328+
35329+ ljmpl *pmode_entry
35330 #else
35331 jmp trampoline_start
35332 #endif
35333diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35334index 604a37e..e49702a 100644
35335--- a/arch/x86/tools/Makefile
35336+++ b/arch/x86/tools/Makefile
35337@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35338
35339 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35340
35341-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35342+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35343 hostprogs-y += relocs
35344 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35345 PHONY += relocs
35346diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35347index 0c2fae8..88036b7 100644
35348--- a/arch/x86/tools/relocs.c
35349+++ b/arch/x86/tools/relocs.c
35350@@ -1,5 +1,7 @@
35351 /* This is included from relocs_32/64.c */
35352
35353+#include "../../../include/generated/autoconf.h"
35354+
35355 #define ElfW(type) _ElfW(ELF_BITS, type)
35356 #define _ElfW(bits, type) __ElfW(bits, type)
35357 #define __ElfW(bits, type) Elf##bits##_##type
35358@@ -11,6 +13,7 @@
35359 #define Elf_Sym ElfW(Sym)
35360
35361 static Elf_Ehdr ehdr;
35362+static Elf_Phdr *phdr;
35363
35364 struct relocs {
35365 uint32_t *offset;
35366@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35367 }
35368 }
35369
35370+static void read_phdrs(FILE *fp)
35371+{
35372+ unsigned int i;
35373+
35374+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35375+ if (!phdr) {
35376+ die("Unable to allocate %d program headers\n",
35377+ ehdr.e_phnum);
35378+ }
35379+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35380+ die("Seek to %d failed: %s\n",
35381+ ehdr.e_phoff, strerror(errno));
35382+ }
35383+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35384+ die("Cannot read ELF program headers: %s\n",
35385+ strerror(errno));
35386+ }
35387+ for(i = 0; i < ehdr.e_phnum; i++) {
35388+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35389+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35390+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35391+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35392+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35393+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35394+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35395+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35396+ }
35397+
35398+}
35399+
35400 static void read_shdrs(FILE *fp)
35401 {
35402- int i;
35403+ unsigned int i;
35404 Elf_Shdr shdr;
35405
35406 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35407@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35408
35409 static void read_strtabs(FILE *fp)
35410 {
35411- int i;
35412+ unsigned int i;
35413 for (i = 0; i < ehdr.e_shnum; i++) {
35414 struct section *sec = &secs[i];
35415 if (sec->shdr.sh_type != SHT_STRTAB) {
35416@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35417
35418 static void read_symtabs(FILE *fp)
35419 {
35420- int i,j;
35421+ unsigned int i,j;
35422 for (i = 0; i < ehdr.e_shnum; i++) {
35423 struct section *sec = &secs[i];
35424 if (sec->shdr.sh_type != SHT_SYMTAB) {
35425@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35426 }
35427
35428
35429-static void read_relocs(FILE *fp)
35430+static void read_relocs(FILE *fp, int use_real_mode)
35431 {
35432- int i,j;
35433+ unsigned int i,j;
35434+ uint32_t base;
35435+
35436 for (i = 0; i < ehdr.e_shnum; i++) {
35437 struct section *sec = &secs[i];
35438 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35439@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35440 die("Cannot read symbol table: %s\n",
35441 strerror(errno));
35442 }
35443+ base = 0;
35444+
35445+#ifdef CONFIG_X86_32
35446+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35447+ if (phdr[j].p_type != PT_LOAD )
35448+ continue;
35449+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35450+ continue;
35451+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35452+ break;
35453+ }
35454+#endif
35455+
35456 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35457 Elf_Rel *rel = &sec->reltab[j];
35458- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35459+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35460 rel->r_info = elf_xword_to_cpu(rel->r_info);
35461 #if (SHT_REL_TYPE == SHT_RELA)
35462 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35463@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35464
35465 static void print_absolute_symbols(void)
35466 {
35467- int i;
35468+ unsigned int i;
35469 const char *format;
35470
35471 if (ELF_BITS == 64)
35472@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35473 for (i = 0; i < ehdr.e_shnum; i++) {
35474 struct section *sec = &secs[i];
35475 char *sym_strtab;
35476- int j;
35477+ unsigned int j;
35478
35479 if (sec->shdr.sh_type != SHT_SYMTAB) {
35480 continue;
35481@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35482
35483 static void print_absolute_relocs(void)
35484 {
35485- int i, printed = 0;
35486+ unsigned int i, printed = 0;
35487 const char *format;
35488
35489 if (ELF_BITS == 64)
35490@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35491 struct section *sec_applies, *sec_symtab;
35492 char *sym_strtab;
35493 Elf_Sym *sh_symtab;
35494- int j;
35495+ unsigned int j;
35496 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35497 continue;
35498 }
35499@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35500 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35501 Elf_Sym *sym, const char *symname))
35502 {
35503- int i;
35504+ unsigned int i;
35505 /* Walk through the relocations */
35506 for (i = 0; i < ehdr.e_shnum; i++) {
35507 char *sym_strtab;
35508 Elf_Sym *sh_symtab;
35509 struct section *sec_applies, *sec_symtab;
35510- int j;
35511+ unsigned int j;
35512 struct section *sec = &secs[i];
35513
35514 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35515@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35516 {
35517 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35518 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35519+ char *sym_strtab = sec->link->link->strtab;
35520+
35521+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35522+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35523+ return 0;
35524+
35525+#ifdef CONFIG_PAX_KERNEXEC
35526+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35527+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35528+ return 0;
35529+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35530+ return 0;
35531+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35532+ return 0;
35533+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35534+ return 0;
35535+#endif
35536
35537 switch (r_type) {
35538 case R_386_NONE:
35539@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35540
35541 static void emit_relocs(int as_text, int use_real_mode)
35542 {
35543- int i;
35544+ unsigned int i;
35545 int (*write_reloc)(uint32_t, FILE *) = write32;
35546 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35547 const char *symname);
35548@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35549 {
35550 regex_init(use_real_mode);
35551 read_ehdr(fp);
35552+ read_phdrs(fp);
35553 read_shdrs(fp);
35554 read_strtabs(fp);
35555 read_symtabs(fp);
35556- read_relocs(fp);
35557+ read_relocs(fp, use_real_mode);
35558 if (ELF_BITS == 64)
35559 percpu_init();
35560 if (show_absolute_syms) {
35561diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35562index f40281e..92728c9 100644
35563--- a/arch/x86/um/mem_32.c
35564+++ b/arch/x86/um/mem_32.c
35565@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35566 gate_vma.vm_start = FIXADDR_USER_START;
35567 gate_vma.vm_end = FIXADDR_USER_END;
35568 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35569- gate_vma.vm_page_prot = __P101;
35570+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35571
35572 return 0;
35573 }
35574diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35575index 80ffa5b..a33bd15 100644
35576--- a/arch/x86/um/tls_32.c
35577+++ b/arch/x86/um/tls_32.c
35578@@ -260,7 +260,7 @@ out:
35579 if (unlikely(task == current &&
35580 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35581 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35582- "without flushed TLS.", current->pid);
35583+ "without flushed TLS.", task_pid_nr(current));
35584 }
35585
35586 return 0;
35587diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35588index 5a4affe..9e2d522 100644
35589--- a/arch/x86/vdso/Makefile
35590+++ b/arch/x86/vdso/Makefile
35591@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35592 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35593 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35594
35595-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35596+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35597 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35598 GCOV_PROFILE := n
35599
35600diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35601index 0224987..c7d65a5 100644
35602--- a/arch/x86/vdso/vdso2c.h
35603+++ b/arch/x86/vdso/vdso2c.h
35604@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35605 unsigned long load_size = -1; /* Work around bogus warning */
35606 unsigned long mapping_size;
35607 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35608- int i;
35609+ unsigned int i;
35610 unsigned long j;
35611 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35612 *alt_sec = NULL;
35613diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35614index e904c27..b9eaa03 100644
35615--- a/arch/x86/vdso/vdso32-setup.c
35616+++ b/arch/x86/vdso/vdso32-setup.c
35617@@ -14,6 +14,7 @@
35618 #include <asm/cpufeature.h>
35619 #include <asm/processor.h>
35620 #include <asm/vdso.h>
35621+#include <asm/mman.h>
35622
35623 #ifdef CONFIG_COMPAT_VDSO
35624 #define VDSO_DEFAULT 0
35625diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35626index 1c9f750..cfddb1a 100644
35627--- a/arch/x86/vdso/vma.c
35628+++ b/arch/x86/vdso/vma.c
35629@@ -19,10 +19,7 @@
35630 #include <asm/page.h>
35631 #include <asm/hpet.h>
35632 #include <asm/desc.h>
35633-
35634-#if defined(CONFIG_X86_64)
35635-unsigned int __read_mostly vdso64_enabled = 1;
35636-#endif
35637+#include <asm/mman.h>
35638
35639 void __init init_vdso_image(const struct vdso_image *image)
35640 {
35641@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35642 .pages = no_pages,
35643 };
35644
35645+#ifdef CONFIG_PAX_RANDMMAP
35646+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35647+ calculate_addr = false;
35648+#endif
35649+
35650 if (calculate_addr) {
35651 addr = vdso_addr(current->mm->start_stack,
35652 image->size - image->sym_vvar_start);
35653@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35654 down_write(&mm->mmap_sem);
35655
35656 addr = get_unmapped_area(NULL, addr,
35657- image->size - image->sym_vvar_start, 0, 0);
35658+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35659 if (IS_ERR_VALUE(addr)) {
35660 ret = addr;
35661 goto up_fail;
35662 }
35663
35664 text_start = addr - image->sym_vvar_start;
35665- current->mm->context.vdso = (void __user *)text_start;
35666+ mm->context.vdso = text_start;
35667
35668 /*
35669 * MAYWRITE to allow gdb to COW and set breakpoints
35670@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35671 hpet_address >> PAGE_SHIFT,
35672 PAGE_SIZE,
35673 pgprot_noncached(PAGE_READONLY));
35674-
35675- if (ret)
35676- goto up_fail;
35677 }
35678 #endif
35679
35680 up_fail:
35681 if (ret)
35682- current->mm->context.vdso = NULL;
35683+ current->mm->context.vdso = 0;
35684
35685 up_write(&mm->mmap_sem);
35686 return ret;
35687@@ -191,8 +190,8 @@ static int load_vdso32(void)
35688
35689 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35690 current_thread_info()->sysenter_return =
35691- current->mm->context.vdso +
35692- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35693+ (void __force_user *)(current->mm->context.vdso +
35694+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35695
35696 return 0;
35697 }
35698@@ -201,9 +200,6 @@ static int load_vdso32(void)
35699 #ifdef CONFIG_X86_64
35700 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35701 {
35702- if (!vdso64_enabled)
35703- return 0;
35704-
35705 return map_vdso(&vdso_image_64, true);
35706 }
35707
35708@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35709 int uses_interp)
35710 {
35711 #ifdef CONFIG_X86_X32_ABI
35712- if (test_thread_flag(TIF_X32)) {
35713- if (!vdso64_enabled)
35714- return 0;
35715-
35716+ if (test_thread_flag(TIF_X32))
35717 return map_vdso(&vdso_image_x32, true);
35718- }
35719 #endif
35720
35721 return load_vdso32();
35722@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35723 #endif
35724
35725 #ifdef CONFIG_X86_64
35726-static __init int vdso_setup(char *s)
35727-{
35728- vdso64_enabled = simple_strtoul(s, NULL, 0);
35729- return 0;
35730-}
35731-__setup("vdso=", vdso_setup);
35732-#endif
35733-
35734-#ifdef CONFIG_X86_64
35735 static void vgetcpu_cpu_init(void *arg)
35736 {
35737 int cpu = smp_processor_id();
35738diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35739index e88fda8..76ce7ce 100644
35740--- a/arch/x86/xen/Kconfig
35741+++ b/arch/x86/xen/Kconfig
35742@@ -9,6 +9,7 @@ config XEN
35743 select XEN_HAVE_PVMMU
35744 depends on X86_64 || (X86_32 && X86_PAE)
35745 depends on X86_TSC
35746+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35747 help
35748 This is the Linux Xen port. Enabling this will allow the
35749 kernel to boot in a paravirtualized environment under the
35750diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35751index 78a881b..9994bbb 100644
35752--- a/arch/x86/xen/enlighten.c
35753+++ b/arch/x86/xen/enlighten.c
35754@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35755
35756 struct shared_info xen_dummy_shared_info;
35757
35758-void *xen_initial_gdt;
35759-
35760 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35761 __read_mostly int xen_have_vector_callback;
35762 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35763@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35764 {
35765 unsigned long va = dtr->address;
35766 unsigned int size = dtr->size + 1;
35767- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35768- unsigned long frames[pages];
35769+ unsigned long frames[65536 / PAGE_SIZE];
35770 int f;
35771
35772 /*
35773@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35774 {
35775 unsigned long va = dtr->address;
35776 unsigned int size = dtr->size + 1;
35777- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35778- unsigned long frames[pages];
35779+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35780 int f;
35781
35782 /*
35783@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35784 * 8-byte entries, or 16 4k pages..
35785 */
35786
35787- BUG_ON(size > 65536);
35788+ BUG_ON(size > GDT_SIZE);
35789 BUG_ON(va & ~PAGE_MASK);
35790
35791 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35792@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35793 return 0;
35794 }
35795
35796-static void set_xen_basic_apic_ops(void)
35797+static void __init set_xen_basic_apic_ops(void)
35798 {
35799 apic->read = xen_apic_read;
35800 apic->write = xen_apic_write;
35801@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35802 #endif
35803 };
35804
35805-static void xen_reboot(int reason)
35806+static __noreturn void xen_reboot(int reason)
35807 {
35808 struct sched_shutdown r = { .reason = reason };
35809
35810- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35811- BUG();
35812+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35813+ BUG();
35814 }
35815
35816-static void xen_restart(char *msg)
35817+static __noreturn void xen_restart(char *msg)
35818 {
35819 xen_reboot(SHUTDOWN_reboot);
35820 }
35821
35822-static void xen_emergency_restart(void)
35823+static __noreturn void xen_emergency_restart(void)
35824 {
35825 xen_reboot(SHUTDOWN_reboot);
35826 }
35827
35828-static void xen_machine_halt(void)
35829+static __noreturn void xen_machine_halt(void)
35830 {
35831 xen_reboot(SHUTDOWN_poweroff);
35832 }
35833
35834-static void xen_machine_power_off(void)
35835+static __noreturn void xen_machine_power_off(void)
35836 {
35837 if (pm_power_off)
35838 pm_power_off();
35839@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35840 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35841 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35842
35843- setup_stack_canary_segment(0);
35844- switch_to_new_gdt(0);
35845+ setup_stack_canary_segment(cpu);
35846+#ifdef CONFIG_X86_64
35847+ load_percpu_segment(cpu);
35848+#endif
35849+ switch_to_new_gdt(cpu);
35850
35851 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35852 pv_cpu_ops.load_gdt = xen_load_gdt;
35853@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35854 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35855
35856 /* Work out if we support NX */
35857- x86_configure_nx();
35858+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35859+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35860+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35861+ unsigned l, h;
35862+
35863+ __supported_pte_mask |= _PAGE_NX;
35864+ rdmsr(MSR_EFER, l, h);
35865+ l |= EFER_NX;
35866+ wrmsr(MSR_EFER, l, h);
35867+ }
35868+#endif
35869
35870 /* Get mfn list */
35871 xen_build_dynamic_phys_to_machine();
35872@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35873
35874 machine_ops = xen_machine_ops;
35875
35876- /*
35877- * The only reliable way to retain the initial address of the
35878- * percpu gdt_page is to remember it here, so we can go and
35879- * mark it RW later, when the initial percpu area is freed.
35880- */
35881- xen_initial_gdt = &per_cpu(gdt_page, 0);
35882-
35883 xen_smp_init();
35884
35885 #ifdef CONFIG_ACPI_NUMA
35886diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35887index 5c1f9ac..0e15f5c 100644
35888--- a/arch/x86/xen/mmu.c
35889+++ b/arch/x86/xen/mmu.c
35890@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35891 return val;
35892 }
35893
35894-static pteval_t pte_pfn_to_mfn(pteval_t val)
35895+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35896 {
35897 if (val & _PAGE_PRESENT) {
35898 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35899@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35900 * L3_k[511] -> level2_fixmap_pgt */
35901 convert_pfn_mfn(level3_kernel_pgt);
35902
35903+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35904+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35905+ convert_pfn_mfn(level3_vmemmap_pgt);
35906 /* L3_k[511][506] -> level1_fixmap_pgt */
35907+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35908 convert_pfn_mfn(level2_fixmap_pgt);
35909 }
35910 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35911@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35912 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35913 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35914 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35915+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35916+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35917+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35918 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35919 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35920+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35921 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35922 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35923 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35924+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35925
35926 /* Pin down new L4 */
35927 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35928@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35929 pv_mmu_ops.set_pud = xen_set_pud;
35930 #if PAGETABLE_LEVELS == 4
35931 pv_mmu_ops.set_pgd = xen_set_pgd;
35932+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35933 #endif
35934
35935 /* This will work as long as patching hasn't happened yet
35936@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35937 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35938 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35939 .set_pgd = xen_set_pgd_hyper,
35940+ .set_pgd_batched = xen_set_pgd_hyper,
35941
35942 .alloc_pud = xen_alloc_pmd_init,
35943 .release_pud = xen_release_pmd_init,
35944diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35945index 4c071ae..00e7049 100644
35946--- a/arch/x86/xen/smp.c
35947+++ b/arch/x86/xen/smp.c
35948@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35949
35950 if (xen_pv_domain()) {
35951 if (!xen_feature(XENFEAT_writable_page_tables))
35952- /* We've switched to the "real" per-cpu gdt, so make
35953- * sure the old memory can be recycled. */
35954- make_lowmem_page_readwrite(xen_initial_gdt);
35955-
35956 #ifdef CONFIG_X86_32
35957 /*
35958 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35959 * expects __USER_DS
35960 */
35961- loadsegment(ds, __USER_DS);
35962- loadsegment(es, __USER_DS);
35963+ loadsegment(ds, __KERNEL_DS);
35964+ loadsegment(es, __KERNEL_DS);
35965 #endif
35966
35967 xen_filter_cpu_maps();
35968@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35969 #ifdef CONFIG_X86_32
35970 /* Note: PVH is not yet supported on x86_32. */
35971 ctxt->user_regs.fs = __KERNEL_PERCPU;
35972- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35973+ savesegment(gs, ctxt->user_regs.gs);
35974 #endif
35975 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35976
35977@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35978 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35979 ctxt->flags = VGCF_IN_KERNEL;
35980 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35981- ctxt->user_regs.ds = __USER_DS;
35982- ctxt->user_regs.es = __USER_DS;
35983+ ctxt->user_regs.ds = __KERNEL_DS;
35984+ ctxt->user_regs.es = __KERNEL_DS;
35985 ctxt->user_regs.ss = __KERNEL_DS;
35986
35987 xen_copy_trap_info(ctxt->trap_ctxt);
35988@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35989 int rc;
35990
35991 per_cpu(current_task, cpu) = idle;
35992+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35993 #ifdef CONFIG_X86_32
35994 irq_ctx_init(cpu);
35995 #else
35996 clear_tsk_thread_flag(idle, TIF_FORK);
35997 #endif
35998- per_cpu(kernel_stack, cpu) =
35999- (unsigned long)task_stack_page(idle) -
36000- KERNEL_STACK_OFFSET + THREAD_SIZE;
36001+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36002
36003 xen_setup_runstate_info(cpu);
36004 xen_setup_timer(cpu);
36005@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36006
36007 void __init xen_smp_init(void)
36008 {
36009- smp_ops = xen_smp_ops;
36010+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36011 xen_fill_possible_map();
36012 }
36013
36014diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36015index fd92a64..1f72641 100644
36016--- a/arch/x86/xen/xen-asm_32.S
36017+++ b/arch/x86/xen/xen-asm_32.S
36018@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36019 pushw %fs
36020 movl $(__KERNEL_PERCPU), %eax
36021 movl %eax, %fs
36022- movl %fs:xen_vcpu, %eax
36023+ mov PER_CPU_VAR(xen_vcpu), %eax
36024 POP_FS
36025 #else
36026 movl %ss:xen_vcpu, %eax
36027diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36028index 674b2225..f1f5dc1 100644
36029--- a/arch/x86/xen/xen-head.S
36030+++ b/arch/x86/xen/xen-head.S
36031@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36032 #ifdef CONFIG_X86_32
36033 mov %esi,xen_start_info
36034 mov $init_thread_union+THREAD_SIZE,%esp
36035+#ifdef CONFIG_SMP
36036+ movl $cpu_gdt_table,%edi
36037+ movl $__per_cpu_load,%eax
36038+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36039+ rorl $16,%eax
36040+ movb %al,__KERNEL_PERCPU + 4(%edi)
36041+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36042+ movl $__per_cpu_end - 1,%eax
36043+ subl $__per_cpu_start,%eax
36044+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36045+#endif
36046 #else
36047 mov %rsi,xen_start_info
36048 mov $init_thread_union+THREAD_SIZE,%rsp
36049diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36050index 5686bd9..0c8b6ee 100644
36051--- a/arch/x86/xen/xen-ops.h
36052+++ b/arch/x86/xen/xen-ops.h
36053@@ -10,8 +10,6 @@
36054 extern const char xen_hypervisor_callback[];
36055 extern const char xen_failsafe_callback[];
36056
36057-extern void *xen_initial_gdt;
36058-
36059 struct trap_info;
36060 void xen_copy_trap_info(struct trap_info *traps);
36061
36062diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36063index 525bd3d..ef888b1 100644
36064--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36065+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36066@@ -119,9 +119,9 @@
36067 ----------------------------------------------------------------------*/
36068
36069 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36070-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36071 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36072 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36073+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36074
36075 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36076 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36077diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36078index 2f33760..835e50a 100644
36079--- a/arch/xtensa/variants/fsf/include/variant/core.h
36080+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36081@@ -11,6 +11,7 @@
36082 #ifndef _XTENSA_CORE_H
36083 #define _XTENSA_CORE_H
36084
36085+#include <linux/const.h>
36086
36087 /****************************************************************************
36088 Parameters Useful for Any Code, USER or PRIVILEGED
36089@@ -112,9 +113,9 @@
36090 ----------------------------------------------------------------------*/
36091
36092 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36093-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36094 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36095 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36096+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36097
36098 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36099 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36100diff --git a/block/bio.c b/block/bio.c
36101index 471d738..bd3da0d 100644
36102--- a/block/bio.c
36103+++ b/block/bio.c
36104@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36105 /*
36106 * Overflow, abort
36107 */
36108- if (end < start)
36109+ if (end < start || end - start > INT_MAX - nr_pages)
36110 return ERR_PTR(-EINVAL);
36111
36112 nr_pages += end - start;
36113@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36114 /*
36115 * Overflow, abort
36116 */
36117- if (end < start)
36118+ if (end < start || end - start > INT_MAX - nr_pages)
36119 return ERR_PTR(-EINVAL);
36120
36121 nr_pages += end - start;
36122@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36123 const int read = bio_data_dir(bio) == READ;
36124 struct bio_map_data *bmd = bio->bi_private;
36125 int i;
36126- char *p = bmd->sgvecs[0].iov_base;
36127+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36128
36129 bio_for_each_segment_all(bvec, bio, i) {
36130 char *addr = page_address(bvec->bv_page);
36131diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36132index 0736729..2ec3b48 100644
36133--- a/block/blk-iopoll.c
36134+++ b/block/blk-iopoll.c
36135@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36136 }
36137 EXPORT_SYMBOL(blk_iopoll_complete);
36138
36139-static void blk_iopoll_softirq(struct softirq_action *h)
36140+static __latent_entropy void blk_iopoll_softirq(void)
36141 {
36142 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36143 int rearm = 0, budget = blk_iopoll_budget;
36144diff --git a/block/blk-map.c b/block/blk-map.c
36145index f890d43..97b0482 100644
36146--- a/block/blk-map.c
36147+++ b/block/blk-map.c
36148@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36149 if (!len || !kbuf)
36150 return -EINVAL;
36151
36152- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36153+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36154 if (do_copy)
36155 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36156 else
36157diff --git a/block/blk-mq.c b/block/blk-mq.c
36158index 447f533..da01de2 100644
36159--- a/block/blk-mq.c
36160+++ b/block/blk-mq.c
36161@@ -1456,7 +1456,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
36162
36163 do {
36164 page = alloc_pages_node(set->numa_node,
36165- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
36166+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
36167 this_order);
36168 if (page)
36169 break;
36170@@ -1478,8 +1478,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
36171 left -= to_do * rq_size;
36172 for (j = 0; j < to_do; j++) {
36173 tags->rqs[i] = p;
36174- tags->rqs[i]->atomic_flags = 0;
36175- tags->rqs[i]->cmd_flags = 0;
36176 if (set->ops->init_request) {
36177 if (set->ops->init_request(set->driver_data,
36178 tags->rqs[i], hctx_idx, i,
36179diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36180index 53b1737..08177d2e 100644
36181--- a/block/blk-softirq.c
36182+++ b/block/blk-softirq.c
36183@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36184 * Softirq action handler - move entries to local list and loop over them
36185 * while passing them to the queue registered handler.
36186 */
36187-static void blk_done_softirq(struct softirq_action *h)
36188+static __latent_entropy void blk_done_softirq(void)
36189 {
36190 struct list_head *cpu_list, local_list;
36191
36192diff --git a/block/bsg.c b/block/bsg.c
36193index 276e869..6fe4c61 100644
36194--- a/block/bsg.c
36195+++ b/block/bsg.c
36196@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36197 struct sg_io_v4 *hdr, struct bsg_device *bd,
36198 fmode_t has_write_perm)
36199 {
36200+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36201+ unsigned char *cmdptr;
36202+
36203 if (hdr->request_len > BLK_MAX_CDB) {
36204 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36205 if (!rq->cmd)
36206 return -ENOMEM;
36207- }
36208+ cmdptr = rq->cmd;
36209+ } else
36210+ cmdptr = tmpcmd;
36211
36212- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36213+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36214 hdr->request_len))
36215 return -EFAULT;
36216
36217+ if (cmdptr != rq->cmd)
36218+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36219+
36220 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36221 if (blk_verify_command(rq->cmd, has_write_perm))
36222 return -EPERM;
36223diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36224index f678c73..f35aa18 100644
36225--- a/block/compat_ioctl.c
36226+++ b/block/compat_ioctl.c
36227@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36228 cgc = compat_alloc_user_space(sizeof(*cgc));
36229 cgc32 = compat_ptr(arg);
36230
36231- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36232+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36233 get_user(data, &cgc32->buffer) ||
36234 put_user(compat_ptr(data), &cgc->buffer) ||
36235 copy_in_user(&cgc->buflen, &cgc32->buflen,
36236@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36237 err |= __get_user(f->spec1, &uf->spec1);
36238 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36239 err |= __get_user(name, &uf->name);
36240- f->name = compat_ptr(name);
36241+ f->name = (void __force_kernel *)compat_ptr(name);
36242 if (err) {
36243 err = -EFAULT;
36244 goto out;
36245diff --git a/block/genhd.c b/block/genhd.c
36246index 0a536dc..b8f7aca 100644
36247--- a/block/genhd.c
36248+++ b/block/genhd.c
36249@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36250
36251 /*
36252 * Register device numbers dev..(dev+range-1)
36253- * range must be nonzero
36254+ * Noop if @range is zero.
36255 * The hash chain is sorted on range, so that subranges can override.
36256 */
36257 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36258 struct kobject *(*probe)(dev_t, int *, void *),
36259 int (*lock)(dev_t, void *), void *data)
36260 {
36261- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36262+ if (range)
36263+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36264 }
36265
36266 EXPORT_SYMBOL(blk_register_region);
36267
36268+/* undo blk_register_region(), noop if @range is zero */
36269 void blk_unregister_region(dev_t devt, unsigned long range)
36270 {
36271- kobj_unmap(bdev_map, devt, range);
36272+ if (range)
36273+ kobj_unmap(bdev_map, devt, range);
36274 }
36275
36276 EXPORT_SYMBOL(blk_unregister_region);
36277diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36278index 56d08fd..2e07090 100644
36279--- a/block/partitions/efi.c
36280+++ b/block/partitions/efi.c
36281@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36282 if (!gpt)
36283 return NULL;
36284
36285+ if (!le32_to_cpu(gpt->num_partition_entries))
36286+ return NULL;
36287+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36288+ if (!pte)
36289+ return NULL;
36290+
36291 count = le32_to_cpu(gpt->num_partition_entries) *
36292 le32_to_cpu(gpt->sizeof_partition_entry);
36293- if (!count)
36294- return NULL;
36295- pte = kmalloc(count, GFP_KERNEL);
36296- if (!pte)
36297- return NULL;
36298-
36299 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36300 (u8 *) pte, count) < count) {
36301 kfree(pte);
36302diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36303index 28163fa..07190a06 100644
36304--- a/block/scsi_ioctl.c
36305+++ b/block/scsi_ioctl.c
36306@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36307 return put_user(0, p);
36308 }
36309
36310-static int sg_get_timeout(struct request_queue *q)
36311+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36312 {
36313 return jiffies_to_clock_t(q->sg_timeout);
36314 }
36315@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36316 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36317 struct sg_io_hdr *hdr, fmode_t mode)
36318 {
36319- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36320+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36321+ unsigned char *cmdptr;
36322+
36323+ if (rq->cmd != rq->__cmd)
36324+ cmdptr = rq->cmd;
36325+ else
36326+ cmdptr = tmpcmd;
36327+
36328+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36329 return -EFAULT;
36330+
36331+ if (cmdptr != rq->cmd)
36332+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36333+
36334 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36335 return -EPERM;
36336
36337@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36338 int err;
36339 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36340 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36341+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36342+ unsigned char *cmdptr;
36343
36344 if (!sic)
36345 return -EINVAL;
36346@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36347 */
36348 err = -EFAULT;
36349 rq->cmd_len = cmdlen;
36350- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36351+
36352+ if (rq->cmd != rq->__cmd)
36353+ cmdptr = rq->cmd;
36354+ else
36355+ cmdptr = tmpcmd;
36356+
36357+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36358 goto error;
36359
36360+ if (rq->cmd != cmdptr)
36361+ memcpy(rq->cmd, cmdptr, cmdlen);
36362+
36363 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36364 goto error;
36365
36366diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36367index 650afac1..f3307de 100644
36368--- a/crypto/cryptd.c
36369+++ b/crypto/cryptd.c
36370@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36371
36372 struct cryptd_blkcipher_request_ctx {
36373 crypto_completion_t complete;
36374-};
36375+} __no_const;
36376
36377 struct cryptd_hash_ctx {
36378 struct crypto_shash *child;
36379@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36380
36381 struct cryptd_aead_request_ctx {
36382 crypto_completion_t complete;
36383-};
36384+} __no_const;
36385
36386 static void cryptd_queue_worker(struct work_struct *work);
36387
36388diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36389index c305d41..a96de79 100644
36390--- a/crypto/pcrypt.c
36391+++ b/crypto/pcrypt.c
36392@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36393 int ret;
36394
36395 pinst->kobj.kset = pcrypt_kset;
36396- ret = kobject_add(&pinst->kobj, NULL, name);
36397+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36398 if (!ret)
36399 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36400
36401diff --git a/crypto/zlib.c b/crypto/zlib.c
36402index 0eefa9d..0fa3d29 100644
36403--- a/crypto/zlib.c
36404+++ b/crypto/zlib.c
36405@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36406 zlib_comp_exit(ctx);
36407
36408 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36409- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36410+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36411 : MAX_WBITS;
36412 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36413- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36414+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36415 : DEF_MEM_LEVEL;
36416
36417 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36418diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36419index 6921c7f..78e1af7 100644
36420--- a/drivers/acpi/acpica/hwxfsleep.c
36421+++ b/drivers/acpi/acpica/hwxfsleep.c
36422@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36423 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36424
36425 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36426- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36427- acpi_hw_extended_sleep},
36428- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36429- acpi_hw_extended_wake_prep},
36430- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36431+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36432+ .extended_function = acpi_hw_extended_sleep},
36433+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36434+ .extended_function = acpi_hw_extended_wake_prep},
36435+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36436+ .extended_function = acpi_hw_extended_wake}
36437 };
36438
36439 /*
36440diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36441index 16129c7..8b675cd 100644
36442--- a/drivers/acpi/apei/apei-internal.h
36443+++ b/drivers/acpi/apei/apei-internal.h
36444@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36445 struct apei_exec_ins_type {
36446 u32 flags;
36447 apei_exec_ins_func_t run;
36448-};
36449+} __do_const;
36450
36451 struct apei_exec_context {
36452 u32 ip;
36453diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36454index e82d097..0c855c1 100644
36455--- a/drivers/acpi/apei/ghes.c
36456+++ b/drivers/acpi/apei/ghes.c
36457@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36458 const struct acpi_hest_generic *generic,
36459 const struct acpi_hest_generic_status *estatus)
36460 {
36461- static atomic_t seqno;
36462+ static atomic_unchecked_t seqno;
36463 unsigned int curr_seqno;
36464 char pfx_seq[64];
36465
36466@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36467 else
36468 pfx = KERN_ERR;
36469 }
36470- curr_seqno = atomic_inc_return(&seqno);
36471+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36472 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36473 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36474 pfx_seq, generic->header.source_id);
36475diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36476index a83e3c6..c3d617f 100644
36477--- a/drivers/acpi/bgrt.c
36478+++ b/drivers/acpi/bgrt.c
36479@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36480 if (!bgrt_image)
36481 return -ENODEV;
36482
36483- bin_attr_image.private = bgrt_image;
36484- bin_attr_image.size = bgrt_image_size;
36485+ pax_open_kernel();
36486+ *(void **)&bin_attr_image.private = bgrt_image;
36487+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36488+ pax_close_kernel();
36489
36490 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36491 if (!bgrt_kobj)
36492diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36493index 9b693d5..8953d54 100644
36494--- a/drivers/acpi/blacklist.c
36495+++ b/drivers/acpi/blacklist.c
36496@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36497 u32 is_critical_error;
36498 };
36499
36500-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36501+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36502
36503 /*
36504 * POLICY: If *anything* doesn't work, put it on the blacklist.
36505@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36506 return 0;
36507 }
36508
36509-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36510+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36511 {
36512 .callback = dmi_disable_osi_vista,
36513 .ident = "Fujitsu Siemens",
36514diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36515index c68e724..e863008 100644
36516--- a/drivers/acpi/custom_method.c
36517+++ b/drivers/acpi/custom_method.c
36518@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36519 struct acpi_table_header table;
36520 acpi_status status;
36521
36522+#ifdef CONFIG_GRKERNSEC_KMEM
36523+ return -EPERM;
36524+#endif
36525+
36526 if (!(*ppos)) {
36527 /* parse the table header to get the table length */
36528 if (count <= sizeof(struct acpi_table_header))
36529diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36530index c0d44d3..5ad8f9a 100644
36531--- a/drivers/acpi/device_pm.c
36532+++ b/drivers/acpi/device_pm.c
36533@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36534
36535 #endif /* CONFIG_PM_SLEEP */
36536
36537+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36538+
36539 static struct dev_pm_domain acpi_general_pm_domain = {
36540 .ops = {
36541 #ifdef CONFIG_PM
36542@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36543 #endif
36544 #endif
36545 },
36546+ .detach = acpi_dev_pm_detach
36547 };
36548
36549 /**
36550@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36551 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36552 }
36553
36554- dev->pm_domain->detach = acpi_dev_pm_detach;
36555 return 0;
36556 }
36557 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36558diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36559index b27ab7a..275b1b6 100644
36560--- a/drivers/acpi/processor_idle.c
36561+++ b/drivers/acpi/processor_idle.c
36562@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36563 {
36564 int i, count = CPUIDLE_DRIVER_STATE_START;
36565 struct acpi_processor_cx *cx;
36566- struct cpuidle_state *state;
36567+ cpuidle_state_no_const *state;
36568 struct cpuidle_driver *drv = &acpi_idle_driver;
36569
36570 if (!pr->flags.power_setup_done)
36571diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36572index 13e577c..cef11ee 100644
36573--- a/drivers/acpi/sysfs.c
36574+++ b/drivers/acpi/sysfs.c
36575@@ -423,11 +423,11 @@ static u32 num_counters;
36576 static struct attribute **all_attrs;
36577 static u32 acpi_gpe_count;
36578
36579-static struct attribute_group interrupt_stats_attr_group = {
36580+static attribute_group_no_const interrupt_stats_attr_group = {
36581 .name = "interrupts",
36582 };
36583
36584-static struct kobj_attribute *counter_attrs;
36585+static kobj_attribute_no_const *counter_attrs;
36586
36587 static void delete_gpe_attr_array(void)
36588 {
36589diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36590index 61a9c07..ea98fa1 100644
36591--- a/drivers/ata/libahci.c
36592+++ b/drivers/ata/libahci.c
36593@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36594 }
36595 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36596
36597-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36598+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36599 struct ata_taskfile *tf, int is_cmd, u16 flags,
36600 unsigned long timeout_msec)
36601 {
36602diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36603index 00f2f74..efd8b7d 100644
36604--- a/drivers/ata/libata-core.c
36605+++ b/drivers/ata/libata-core.c
36606@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36607 static void ata_dev_xfermask(struct ata_device *dev);
36608 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36609
36610-atomic_t ata_print_id = ATOMIC_INIT(0);
36611+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36612
36613 struct ata_force_param {
36614 const char *name;
36615@@ -4842,7 +4842,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36616 struct ata_port *ap;
36617 unsigned int tag;
36618
36619- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36620+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36621 ap = qc->ap;
36622
36623 qc->flags = 0;
36624@@ -4858,7 +4858,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36625 struct ata_port *ap;
36626 struct ata_link *link;
36627
36628- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36629+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36630 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36631 ap = qc->ap;
36632 link = qc->dev->link;
36633@@ -5962,6 +5962,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36634 return;
36635
36636 spin_lock(&lock);
36637+ pax_open_kernel();
36638
36639 for (cur = ops->inherits; cur; cur = cur->inherits) {
36640 void **inherit = (void **)cur;
36641@@ -5975,8 +5976,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36642 if (IS_ERR(*pp))
36643 *pp = NULL;
36644
36645- ops->inherits = NULL;
36646+ *(struct ata_port_operations **)&ops->inherits = NULL;
36647
36648+ pax_close_kernel();
36649 spin_unlock(&lock);
36650 }
36651
36652@@ -6172,7 +6174,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36653
36654 /* give ports names and add SCSI hosts */
36655 for (i = 0; i < host->n_ports; i++) {
36656- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36657+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36658 host->ports[i]->local_port_no = i + 1;
36659 }
36660
36661diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36662index 6abd17a..9961bf7 100644
36663--- a/drivers/ata/libata-scsi.c
36664+++ b/drivers/ata/libata-scsi.c
36665@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36666
36667 if (rc)
36668 return rc;
36669- ap->print_id = atomic_inc_return(&ata_print_id);
36670+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36671 return 0;
36672 }
36673 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36674diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36675index 5f4e0cc..ff2c347 100644
36676--- a/drivers/ata/libata.h
36677+++ b/drivers/ata/libata.h
36678@@ -53,7 +53,7 @@ enum {
36679 ATA_DNXFER_QUIET = (1 << 31),
36680 };
36681
36682-extern atomic_t ata_print_id;
36683+extern atomic_unchecked_t ata_print_id;
36684 extern int atapi_passthru16;
36685 extern int libata_fua;
36686 extern int libata_noacpi;
36687diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36688index a9b0c82..207d97d 100644
36689--- a/drivers/ata/pata_arasan_cf.c
36690+++ b/drivers/ata/pata_arasan_cf.c
36691@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36692 /* Handle platform specific quirks */
36693 if (quirk) {
36694 if (quirk & CF_BROKEN_PIO) {
36695- ap->ops->set_piomode = NULL;
36696+ pax_open_kernel();
36697+ *(void **)&ap->ops->set_piomode = NULL;
36698+ pax_close_kernel();
36699 ap->pio_mask = 0;
36700 }
36701 if (quirk & CF_BROKEN_MWDMA)
36702diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36703index f9b983a..887b9d8 100644
36704--- a/drivers/atm/adummy.c
36705+++ b/drivers/atm/adummy.c
36706@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36707 vcc->pop(vcc, skb);
36708 else
36709 dev_kfree_skb_any(skb);
36710- atomic_inc(&vcc->stats->tx);
36711+ atomic_inc_unchecked(&vcc->stats->tx);
36712
36713 return 0;
36714 }
36715diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36716index f1a9198..f466a4a 100644
36717--- a/drivers/atm/ambassador.c
36718+++ b/drivers/atm/ambassador.c
36719@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36720 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36721
36722 // VC layer stats
36723- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36724+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36725
36726 // free the descriptor
36727 kfree (tx_descr);
36728@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36729 dump_skb ("<<<", vc, skb);
36730
36731 // VC layer stats
36732- atomic_inc(&atm_vcc->stats->rx);
36733+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36734 __net_timestamp(skb);
36735 // end of our responsibility
36736 atm_vcc->push (atm_vcc, skb);
36737@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36738 } else {
36739 PRINTK (KERN_INFO, "dropped over-size frame");
36740 // should we count this?
36741- atomic_inc(&atm_vcc->stats->rx_drop);
36742+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36743 }
36744
36745 } else {
36746@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36747 }
36748
36749 if (check_area (skb->data, skb->len)) {
36750- atomic_inc(&atm_vcc->stats->tx_err);
36751+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36752 return -ENOMEM; // ?
36753 }
36754
36755diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36756index 480fa6f..947067c 100644
36757--- a/drivers/atm/atmtcp.c
36758+++ b/drivers/atm/atmtcp.c
36759@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36760 if (vcc->pop) vcc->pop(vcc,skb);
36761 else dev_kfree_skb(skb);
36762 if (dev_data) return 0;
36763- atomic_inc(&vcc->stats->tx_err);
36764+ atomic_inc_unchecked(&vcc->stats->tx_err);
36765 return -ENOLINK;
36766 }
36767 size = skb->len+sizeof(struct atmtcp_hdr);
36768@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36769 if (!new_skb) {
36770 if (vcc->pop) vcc->pop(vcc,skb);
36771 else dev_kfree_skb(skb);
36772- atomic_inc(&vcc->stats->tx_err);
36773+ atomic_inc_unchecked(&vcc->stats->tx_err);
36774 return -ENOBUFS;
36775 }
36776 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36777@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36778 if (vcc->pop) vcc->pop(vcc,skb);
36779 else dev_kfree_skb(skb);
36780 out_vcc->push(out_vcc,new_skb);
36781- atomic_inc(&vcc->stats->tx);
36782- atomic_inc(&out_vcc->stats->rx);
36783+ atomic_inc_unchecked(&vcc->stats->tx);
36784+ atomic_inc_unchecked(&out_vcc->stats->rx);
36785 return 0;
36786 }
36787
36788@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36789 read_unlock(&vcc_sklist_lock);
36790 if (!out_vcc) {
36791 result = -EUNATCH;
36792- atomic_inc(&vcc->stats->tx_err);
36793+ atomic_inc_unchecked(&vcc->stats->tx_err);
36794 goto done;
36795 }
36796 skb_pull(skb,sizeof(struct atmtcp_hdr));
36797@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36798 __net_timestamp(new_skb);
36799 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36800 out_vcc->push(out_vcc,new_skb);
36801- atomic_inc(&vcc->stats->tx);
36802- atomic_inc(&out_vcc->stats->rx);
36803+ atomic_inc_unchecked(&vcc->stats->tx);
36804+ atomic_inc_unchecked(&out_vcc->stats->rx);
36805 done:
36806 if (vcc->pop) vcc->pop(vcc,skb);
36807 else dev_kfree_skb(skb);
36808diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36809index c7fab3e..68d0965 100644
36810--- a/drivers/atm/eni.c
36811+++ b/drivers/atm/eni.c
36812@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36813 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36814 vcc->dev->number);
36815 length = 0;
36816- atomic_inc(&vcc->stats->rx_err);
36817+ atomic_inc_unchecked(&vcc->stats->rx_err);
36818 }
36819 else {
36820 length = ATM_CELL_SIZE-1; /* no HEC */
36821@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36822 size);
36823 }
36824 eff = length = 0;
36825- atomic_inc(&vcc->stats->rx_err);
36826+ atomic_inc_unchecked(&vcc->stats->rx_err);
36827 }
36828 else {
36829 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36830@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36831 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36832 vcc->dev->number,vcc->vci,length,size << 2,descr);
36833 length = eff = 0;
36834- atomic_inc(&vcc->stats->rx_err);
36835+ atomic_inc_unchecked(&vcc->stats->rx_err);
36836 }
36837 }
36838 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36839@@ -770,7 +770,7 @@ rx_dequeued++;
36840 vcc->push(vcc,skb);
36841 pushed++;
36842 }
36843- atomic_inc(&vcc->stats->rx);
36844+ atomic_inc_unchecked(&vcc->stats->rx);
36845 }
36846 wake_up(&eni_dev->rx_wait);
36847 }
36848@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36849 PCI_DMA_TODEVICE);
36850 if (vcc->pop) vcc->pop(vcc,skb);
36851 else dev_kfree_skb_irq(skb);
36852- atomic_inc(&vcc->stats->tx);
36853+ atomic_inc_unchecked(&vcc->stats->tx);
36854 wake_up(&eni_dev->tx_wait);
36855 dma_complete++;
36856 }
36857diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36858index 82f2ae0..f205c02 100644
36859--- a/drivers/atm/firestream.c
36860+++ b/drivers/atm/firestream.c
36861@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36862 }
36863 }
36864
36865- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36866+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36867
36868 fs_dprintk (FS_DEBUG_TXMEM, "i");
36869 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36870@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36871 #endif
36872 skb_put (skb, qe->p1 & 0xffff);
36873 ATM_SKB(skb)->vcc = atm_vcc;
36874- atomic_inc(&atm_vcc->stats->rx);
36875+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36876 __net_timestamp(skb);
36877 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36878 atm_vcc->push (atm_vcc, skb);
36879@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36880 kfree (pe);
36881 }
36882 if (atm_vcc)
36883- atomic_inc(&atm_vcc->stats->rx_drop);
36884+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36885 break;
36886 case 0x1f: /* Reassembly abort: no buffers. */
36887 /* Silently increment error counter. */
36888 if (atm_vcc)
36889- atomic_inc(&atm_vcc->stats->rx_drop);
36890+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36891 break;
36892 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36893 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36894diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36895index d5d9eaf..65c0d53 100644
36896--- a/drivers/atm/fore200e.c
36897+++ b/drivers/atm/fore200e.c
36898@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36899 #endif
36900 /* check error condition */
36901 if (*entry->status & STATUS_ERROR)
36902- atomic_inc(&vcc->stats->tx_err);
36903+ atomic_inc_unchecked(&vcc->stats->tx_err);
36904 else
36905- atomic_inc(&vcc->stats->tx);
36906+ atomic_inc_unchecked(&vcc->stats->tx);
36907 }
36908 }
36909
36910@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36911 if (skb == NULL) {
36912 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36913
36914- atomic_inc(&vcc->stats->rx_drop);
36915+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36916 return -ENOMEM;
36917 }
36918
36919@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36920
36921 dev_kfree_skb_any(skb);
36922
36923- atomic_inc(&vcc->stats->rx_drop);
36924+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36925 return -ENOMEM;
36926 }
36927
36928 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36929
36930 vcc->push(vcc, skb);
36931- atomic_inc(&vcc->stats->rx);
36932+ atomic_inc_unchecked(&vcc->stats->rx);
36933
36934 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36935
36936@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36937 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36938 fore200e->atm_dev->number,
36939 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36940- atomic_inc(&vcc->stats->rx_err);
36941+ atomic_inc_unchecked(&vcc->stats->rx_err);
36942 }
36943 }
36944
36945@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36946 goto retry_here;
36947 }
36948
36949- atomic_inc(&vcc->stats->tx_err);
36950+ atomic_inc_unchecked(&vcc->stats->tx_err);
36951
36952 fore200e->tx_sat++;
36953 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36954diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36955index c39702b..785b73b 100644
36956--- a/drivers/atm/he.c
36957+++ b/drivers/atm/he.c
36958@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36959
36960 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36961 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36962- atomic_inc(&vcc->stats->rx_drop);
36963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36964 goto return_host_buffers;
36965 }
36966
36967@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36968 RBRQ_LEN_ERR(he_dev->rbrq_head)
36969 ? "LEN_ERR" : "",
36970 vcc->vpi, vcc->vci);
36971- atomic_inc(&vcc->stats->rx_err);
36972+ atomic_inc_unchecked(&vcc->stats->rx_err);
36973 goto return_host_buffers;
36974 }
36975
36976@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36977 vcc->push(vcc, skb);
36978 spin_lock(&he_dev->global_lock);
36979
36980- atomic_inc(&vcc->stats->rx);
36981+ atomic_inc_unchecked(&vcc->stats->rx);
36982
36983 return_host_buffers:
36984 ++pdus_assembled;
36985@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36986 tpd->vcc->pop(tpd->vcc, tpd->skb);
36987 else
36988 dev_kfree_skb_any(tpd->skb);
36989- atomic_inc(&tpd->vcc->stats->tx_err);
36990+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36991 }
36992 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36993 return;
36994@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36995 vcc->pop(vcc, skb);
36996 else
36997 dev_kfree_skb_any(skb);
36998- atomic_inc(&vcc->stats->tx_err);
36999+ atomic_inc_unchecked(&vcc->stats->tx_err);
37000 return -EINVAL;
37001 }
37002
37003@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37004 vcc->pop(vcc, skb);
37005 else
37006 dev_kfree_skb_any(skb);
37007- atomic_inc(&vcc->stats->tx_err);
37008+ atomic_inc_unchecked(&vcc->stats->tx_err);
37009 return -EINVAL;
37010 }
37011 #endif
37012@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37013 vcc->pop(vcc, skb);
37014 else
37015 dev_kfree_skb_any(skb);
37016- atomic_inc(&vcc->stats->tx_err);
37017+ atomic_inc_unchecked(&vcc->stats->tx_err);
37018 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37019 return -ENOMEM;
37020 }
37021@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37022 vcc->pop(vcc, skb);
37023 else
37024 dev_kfree_skb_any(skb);
37025- atomic_inc(&vcc->stats->tx_err);
37026+ atomic_inc_unchecked(&vcc->stats->tx_err);
37027 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37028 return -ENOMEM;
37029 }
37030@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37031 __enqueue_tpd(he_dev, tpd, cid);
37032 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37033
37034- atomic_inc(&vcc->stats->tx);
37035+ atomic_inc_unchecked(&vcc->stats->tx);
37036
37037 return 0;
37038 }
37039diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37040index 1dc0519..1aadaf7 100644
37041--- a/drivers/atm/horizon.c
37042+++ b/drivers/atm/horizon.c
37043@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37044 {
37045 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37046 // VC layer stats
37047- atomic_inc(&vcc->stats->rx);
37048+ atomic_inc_unchecked(&vcc->stats->rx);
37049 __net_timestamp(skb);
37050 // end of our responsibility
37051 vcc->push (vcc, skb);
37052@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37053 dev->tx_iovec = NULL;
37054
37055 // VC layer stats
37056- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37057+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37058
37059 // free the skb
37060 hrz_kfree_skb (skb);
37061diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37062index 2b24ed0..b3d6acc 100644
37063--- a/drivers/atm/idt77252.c
37064+++ b/drivers/atm/idt77252.c
37065@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37066 else
37067 dev_kfree_skb(skb);
37068
37069- atomic_inc(&vcc->stats->tx);
37070+ atomic_inc_unchecked(&vcc->stats->tx);
37071 }
37072
37073 atomic_dec(&scq->used);
37074@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37075 if ((sb = dev_alloc_skb(64)) == NULL) {
37076 printk("%s: Can't allocate buffers for aal0.\n",
37077 card->name);
37078- atomic_add(i, &vcc->stats->rx_drop);
37079+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37080 break;
37081 }
37082 if (!atm_charge(vcc, sb->truesize)) {
37083 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37084 card->name);
37085- atomic_add(i - 1, &vcc->stats->rx_drop);
37086+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37087 dev_kfree_skb(sb);
37088 break;
37089 }
37090@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37091 ATM_SKB(sb)->vcc = vcc;
37092 __net_timestamp(sb);
37093 vcc->push(vcc, sb);
37094- atomic_inc(&vcc->stats->rx);
37095+ atomic_inc_unchecked(&vcc->stats->rx);
37096
37097 cell += ATM_CELL_PAYLOAD;
37098 }
37099@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37100 "(CDC: %08x)\n",
37101 card->name, len, rpp->len, readl(SAR_REG_CDC));
37102 recycle_rx_pool_skb(card, rpp);
37103- atomic_inc(&vcc->stats->rx_err);
37104+ atomic_inc_unchecked(&vcc->stats->rx_err);
37105 return;
37106 }
37107 if (stat & SAR_RSQE_CRC) {
37108 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37109 recycle_rx_pool_skb(card, rpp);
37110- atomic_inc(&vcc->stats->rx_err);
37111+ atomic_inc_unchecked(&vcc->stats->rx_err);
37112 return;
37113 }
37114 if (skb_queue_len(&rpp->queue) > 1) {
37115@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37116 RXPRINTK("%s: Can't alloc RX skb.\n",
37117 card->name);
37118 recycle_rx_pool_skb(card, rpp);
37119- atomic_inc(&vcc->stats->rx_err);
37120+ atomic_inc_unchecked(&vcc->stats->rx_err);
37121 return;
37122 }
37123 if (!atm_charge(vcc, skb->truesize)) {
37124@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37125 __net_timestamp(skb);
37126
37127 vcc->push(vcc, skb);
37128- atomic_inc(&vcc->stats->rx);
37129+ atomic_inc_unchecked(&vcc->stats->rx);
37130
37131 return;
37132 }
37133@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37134 __net_timestamp(skb);
37135
37136 vcc->push(vcc, skb);
37137- atomic_inc(&vcc->stats->rx);
37138+ atomic_inc_unchecked(&vcc->stats->rx);
37139
37140 if (skb->truesize > SAR_FB_SIZE_3)
37141 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37142@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37143 if (vcc->qos.aal != ATM_AAL0) {
37144 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37145 card->name, vpi, vci);
37146- atomic_inc(&vcc->stats->rx_drop);
37147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37148 goto drop;
37149 }
37150
37151 if ((sb = dev_alloc_skb(64)) == NULL) {
37152 printk("%s: Can't allocate buffers for AAL0.\n",
37153 card->name);
37154- atomic_inc(&vcc->stats->rx_err);
37155+ atomic_inc_unchecked(&vcc->stats->rx_err);
37156 goto drop;
37157 }
37158
37159@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37160 ATM_SKB(sb)->vcc = vcc;
37161 __net_timestamp(sb);
37162 vcc->push(vcc, sb);
37163- atomic_inc(&vcc->stats->rx);
37164+ atomic_inc_unchecked(&vcc->stats->rx);
37165
37166 drop:
37167 skb_pull(queue, 64);
37168@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37169
37170 if (vc == NULL) {
37171 printk("%s: NULL connection in send().\n", card->name);
37172- atomic_inc(&vcc->stats->tx_err);
37173+ atomic_inc_unchecked(&vcc->stats->tx_err);
37174 dev_kfree_skb(skb);
37175 return -EINVAL;
37176 }
37177 if (!test_bit(VCF_TX, &vc->flags)) {
37178 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37179- atomic_inc(&vcc->stats->tx_err);
37180+ atomic_inc_unchecked(&vcc->stats->tx_err);
37181 dev_kfree_skb(skb);
37182 return -EINVAL;
37183 }
37184@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37185 break;
37186 default:
37187 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37188- atomic_inc(&vcc->stats->tx_err);
37189+ atomic_inc_unchecked(&vcc->stats->tx_err);
37190 dev_kfree_skb(skb);
37191 return -EINVAL;
37192 }
37193
37194 if (skb_shinfo(skb)->nr_frags != 0) {
37195 printk("%s: No scatter-gather yet.\n", card->name);
37196- atomic_inc(&vcc->stats->tx_err);
37197+ atomic_inc_unchecked(&vcc->stats->tx_err);
37198 dev_kfree_skb(skb);
37199 return -EINVAL;
37200 }
37201@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37202
37203 err = queue_skb(card, vc, skb, oam);
37204 if (err) {
37205- atomic_inc(&vcc->stats->tx_err);
37206+ atomic_inc_unchecked(&vcc->stats->tx_err);
37207 dev_kfree_skb(skb);
37208 return err;
37209 }
37210@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37211 skb = dev_alloc_skb(64);
37212 if (!skb) {
37213 printk("%s: Out of memory in send_oam().\n", card->name);
37214- atomic_inc(&vcc->stats->tx_err);
37215+ atomic_inc_unchecked(&vcc->stats->tx_err);
37216 return -ENOMEM;
37217 }
37218 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37219diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37220index 4217f29..88f547a 100644
37221--- a/drivers/atm/iphase.c
37222+++ b/drivers/atm/iphase.c
37223@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37224 status = (u_short) (buf_desc_ptr->desc_mode);
37225 if (status & (RX_CER | RX_PTE | RX_OFL))
37226 {
37227- atomic_inc(&vcc->stats->rx_err);
37228+ atomic_inc_unchecked(&vcc->stats->rx_err);
37229 IF_ERR(printk("IA: bad packet, dropping it");)
37230 if (status & RX_CER) {
37231 IF_ERR(printk(" cause: packet CRC error\n");)
37232@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37233 len = dma_addr - buf_addr;
37234 if (len > iadev->rx_buf_sz) {
37235 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37236- atomic_inc(&vcc->stats->rx_err);
37237+ atomic_inc_unchecked(&vcc->stats->rx_err);
37238 goto out_free_desc;
37239 }
37240
37241@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37242 ia_vcc = INPH_IA_VCC(vcc);
37243 if (ia_vcc == NULL)
37244 {
37245- atomic_inc(&vcc->stats->rx_err);
37246+ atomic_inc_unchecked(&vcc->stats->rx_err);
37247 atm_return(vcc, skb->truesize);
37248 dev_kfree_skb_any(skb);
37249 goto INCR_DLE;
37250@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37251 if ((length > iadev->rx_buf_sz) || (length >
37252 (skb->len - sizeof(struct cpcs_trailer))))
37253 {
37254- atomic_inc(&vcc->stats->rx_err);
37255+ atomic_inc_unchecked(&vcc->stats->rx_err);
37256 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37257 length, skb->len);)
37258 atm_return(vcc, skb->truesize);
37259@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37260
37261 IF_RX(printk("rx_dle_intr: skb push");)
37262 vcc->push(vcc,skb);
37263- atomic_inc(&vcc->stats->rx);
37264+ atomic_inc_unchecked(&vcc->stats->rx);
37265 iadev->rx_pkt_cnt++;
37266 }
37267 INCR_DLE:
37268@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37269 {
37270 struct k_sonet_stats *stats;
37271 stats = &PRIV(_ia_dev[board])->sonet_stats;
37272- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37273- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37274- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37275- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37276- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37277- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37278- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37279- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37280- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37281+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37282+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37283+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37284+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37285+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37286+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37287+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37288+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37289+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37290 }
37291 ia_cmds.status = 0;
37292 break;
37293@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37294 if ((desc == 0) || (desc > iadev->num_tx_desc))
37295 {
37296 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37297- atomic_inc(&vcc->stats->tx);
37298+ atomic_inc_unchecked(&vcc->stats->tx);
37299 if (vcc->pop)
37300 vcc->pop(vcc, skb);
37301 else
37302@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37303 ATM_DESC(skb) = vcc->vci;
37304 skb_queue_tail(&iadev->tx_dma_q, skb);
37305
37306- atomic_inc(&vcc->stats->tx);
37307+ atomic_inc_unchecked(&vcc->stats->tx);
37308 iadev->tx_pkt_cnt++;
37309 /* Increment transaction counter */
37310 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37311
37312 #if 0
37313 /* add flow control logic */
37314- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37315+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37316 if (iavcc->vc_desc_cnt > 10) {
37317 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37318 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37319diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37320index 93eaf8d..b4ca7da 100644
37321--- a/drivers/atm/lanai.c
37322+++ b/drivers/atm/lanai.c
37323@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37324 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37325 lanai_endtx(lanai, lvcc);
37326 lanai_free_skb(lvcc->tx.atmvcc, skb);
37327- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37328+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37329 }
37330
37331 /* Try to fill the buffer - don't call unless there is backlog */
37332@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37333 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37334 __net_timestamp(skb);
37335 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37336- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37337+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37338 out:
37339 lvcc->rx.buf.ptr = end;
37340 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37341@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37342 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37343 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37344 lanai->stats.service_rxnotaal5++;
37345- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37346+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37347 return 0;
37348 }
37349 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37350@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37351 int bytes;
37352 read_unlock(&vcc_sklist_lock);
37353 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37354- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37355+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37356 lvcc->stats.x.aal5.service_trash++;
37357 bytes = (SERVICE_GET_END(s) * 16) -
37358 (((unsigned long) lvcc->rx.buf.ptr) -
37359@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37360 }
37361 if (s & SERVICE_STREAM) {
37362 read_unlock(&vcc_sklist_lock);
37363- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37364+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37365 lvcc->stats.x.aal5.service_stream++;
37366 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37367 "PDU on VCI %d!\n", lanai->number, vci);
37368@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37369 return 0;
37370 }
37371 DPRINTK("got rx crc error on vci %d\n", vci);
37372- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37373+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37374 lvcc->stats.x.aal5.service_rxcrc++;
37375 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37376 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37377diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37378index 9988ac9..7c52585 100644
37379--- a/drivers/atm/nicstar.c
37380+++ b/drivers/atm/nicstar.c
37381@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37382 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37383 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37384 card->index);
37385- atomic_inc(&vcc->stats->tx_err);
37386+ atomic_inc_unchecked(&vcc->stats->tx_err);
37387 dev_kfree_skb_any(skb);
37388 return -EINVAL;
37389 }
37390@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37391 if (!vc->tx) {
37392 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37393 card->index);
37394- atomic_inc(&vcc->stats->tx_err);
37395+ atomic_inc_unchecked(&vcc->stats->tx_err);
37396 dev_kfree_skb_any(skb);
37397 return -EINVAL;
37398 }
37399@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37400 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37401 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37402 card->index);
37403- atomic_inc(&vcc->stats->tx_err);
37404+ atomic_inc_unchecked(&vcc->stats->tx_err);
37405 dev_kfree_skb_any(skb);
37406 return -EINVAL;
37407 }
37408
37409 if (skb_shinfo(skb)->nr_frags != 0) {
37410 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37411- atomic_inc(&vcc->stats->tx_err);
37412+ atomic_inc_unchecked(&vcc->stats->tx_err);
37413 dev_kfree_skb_any(skb);
37414 return -EINVAL;
37415 }
37416@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37417 }
37418
37419 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37420- atomic_inc(&vcc->stats->tx_err);
37421+ atomic_inc_unchecked(&vcc->stats->tx_err);
37422 dev_kfree_skb_any(skb);
37423 return -EIO;
37424 }
37425- atomic_inc(&vcc->stats->tx);
37426+ atomic_inc_unchecked(&vcc->stats->tx);
37427
37428 return 0;
37429 }
37430@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37431 printk
37432 ("nicstar%d: Can't allocate buffers for aal0.\n",
37433 card->index);
37434- atomic_add(i, &vcc->stats->rx_drop);
37435+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37436 break;
37437 }
37438 if (!atm_charge(vcc, sb->truesize)) {
37439 RXPRINTK
37440 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37441 card->index);
37442- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37443+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37444 dev_kfree_skb_any(sb);
37445 break;
37446 }
37447@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37448 ATM_SKB(sb)->vcc = vcc;
37449 __net_timestamp(sb);
37450 vcc->push(vcc, sb);
37451- atomic_inc(&vcc->stats->rx);
37452+ atomic_inc_unchecked(&vcc->stats->rx);
37453 cell += ATM_CELL_PAYLOAD;
37454 }
37455
37456@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37457 if (iovb == NULL) {
37458 printk("nicstar%d: Out of iovec buffers.\n",
37459 card->index);
37460- atomic_inc(&vcc->stats->rx_drop);
37461+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37462 recycle_rx_buf(card, skb);
37463 return;
37464 }
37465@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37466 small or large buffer itself. */
37467 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37468 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37469- atomic_inc(&vcc->stats->rx_err);
37470+ atomic_inc_unchecked(&vcc->stats->rx_err);
37471 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37472 NS_MAX_IOVECS);
37473 NS_PRV_IOVCNT(iovb) = 0;
37474@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37475 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37476 card->index);
37477 which_list(card, skb);
37478- atomic_inc(&vcc->stats->rx_err);
37479+ atomic_inc_unchecked(&vcc->stats->rx_err);
37480 recycle_rx_buf(card, skb);
37481 vc->rx_iov = NULL;
37482 recycle_iov_buf(card, iovb);
37483@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37484 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37485 card->index);
37486 which_list(card, skb);
37487- atomic_inc(&vcc->stats->rx_err);
37488+ atomic_inc_unchecked(&vcc->stats->rx_err);
37489 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37490 NS_PRV_IOVCNT(iovb));
37491 vc->rx_iov = NULL;
37492@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37493 printk(" - PDU size mismatch.\n");
37494 else
37495 printk(".\n");
37496- atomic_inc(&vcc->stats->rx_err);
37497+ atomic_inc_unchecked(&vcc->stats->rx_err);
37498 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37499 NS_PRV_IOVCNT(iovb));
37500 vc->rx_iov = NULL;
37501@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37502 /* skb points to a small buffer */
37503 if (!atm_charge(vcc, skb->truesize)) {
37504 push_rxbufs(card, skb);
37505- atomic_inc(&vcc->stats->rx_drop);
37506+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37507 } else {
37508 skb_put(skb, len);
37509 dequeue_sm_buf(card, skb);
37510@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37511 ATM_SKB(skb)->vcc = vcc;
37512 __net_timestamp(skb);
37513 vcc->push(vcc, skb);
37514- atomic_inc(&vcc->stats->rx);
37515+ atomic_inc_unchecked(&vcc->stats->rx);
37516 }
37517 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37518 struct sk_buff *sb;
37519@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37520 if (len <= NS_SMBUFSIZE) {
37521 if (!atm_charge(vcc, sb->truesize)) {
37522 push_rxbufs(card, sb);
37523- atomic_inc(&vcc->stats->rx_drop);
37524+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37525 } else {
37526 skb_put(sb, len);
37527 dequeue_sm_buf(card, sb);
37528@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37529 ATM_SKB(sb)->vcc = vcc;
37530 __net_timestamp(sb);
37531 vcc->push(vcc, sb);
37532- atomic_inc(&vcc->stats->rx);
37533+ atomic_inc_unchecked(&vcc->stats->rx);
37534 }
37535
37536 push_rxbufs(card, skb);
37537@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37538
37539 if (!atm_charge(vcc, skb->truesize)) {
37540 push_rxbufs(card, skb);
37541- atomic_inc(&vcc->stats->rx_drop);
37542+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37543 } else {
37544 dequeue_lg_buf(card, skb);
37545 #ifdef NS_USE_DESTRUCTORS
37546@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37547 ATM_SKB(skb)->vcc = vcc;
37548 __net_timestamp(skb);
37549 vcc->push(vcc, skb);
37550- atomic_inc(&vcc->stats->rx);
37551+ atomic_inc_unchecked(&vcc->stats->rx);
37552 }
37553
37554 push_rxbufs(card, sb);
37555@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37556 printk
37557 ("nicstar%d: Out of huge buffers.\n",
37558 card->index);
37559- atomic_inc(&vcc->stats->rx_drop);
37560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37561 recycle_iovec_rx_bufs(card,
37562 (struct iovec *)
37563 iovb->data,
37564@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37565 card->hbpool.count++;
37566 } else
37567 dev_kfree_skb_any(hb);
37568- atomic_inc(&vcc->stats->rx_drop);
37569+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37570 } else {
37571 /* Copy the small buffer to the huge buffer */
37572 sb = (struct sk_buff *)iov->iov_base;
37573@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37574 #endif /* NS_USE_DESTRUCTORS */
37575 __net_timestamp(hb);
37576 vcc->push(vcc, hb);
37577- atomic_inc(&vcc->stats->rx);
37578+ atomic_inc_unchecked(&vcc->stats->rx);
37579 }
37580 }
37581
37582diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37583index 21b0bc6..b5f40ba 100644
37584--- a/drivers/atm/solos-pci.c
37585+++ b/drivers/atm/solos-pci.c
37586@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37587 }
37588 atm_charge(vcc, skb->truesize);
37589 vcc->push(vcc, skb);
37590- atomic_inc(&vcc->stats->rx);
37591+ atomic_inc_unchecked(&vcc->stats->rx);
37592 break;
37593
37594 case PKT_STATUS:
37595@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37596 vcc = SKB_CB(oldskb)->vcc;
37597
37598 if (vcc) {
37599- atomic_inc(&vcc->stats->tx);
37600+ atomic_inc_unchecked(&vcc->stats->tx);
37601 solos_pop(vcc, oldskb);
37602 } else {
37603 dev_kfree_skb_irq(oldskb);
37604diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37605index 0215934..ce9f5b1 100644
37606--- a/drivers/atm/suni.c
37607+++ b/drivers/atm/suni.c
37608@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37609
37610
37611 #define ADD_LIMITED(s,v) \
37612- atomic_add((v),&stats->s); \
37613- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37614+ atomic_add_unchecked((v),&stats->s); \
37615+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37616
37617
37618 static void suni_hz(unsigned long from_timer)
37619diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37620index 5120a96..e2572bd 100644
37621--- a/drivers/atm/uPD98402.c
37622+++ b/drivers/atm/uPD98402.c
37623@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37624 struct sonet_stats tmp;
37625 int error = 0;
37626
37627- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37628+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37629 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37630 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37631 if (zero && !error) {
37632@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37633
37634
37635 #define ADD_LIMITED(s,v) \
37636- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37637- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37638- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37639+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37640+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37641+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37642
37643
37644 static void stat_event(struct atm_dev *dev)
37645@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37646 if (reason & uPD98402_INT_PFM) stat_event(dev);
37647 if (reason & uPD98402_INT_PCO) {
37648 (void) GET(PCOCR); /* clear interrupt cause */
37649- atomic_add(GET(HECCT),
37650+ atomic_add_unchecked(GET(HECCT),
37651 &PRIV(dev)->sonet_stats.uncorr_hcs);
37652 }
37653 if ((reason & uPD98402_INT_RFO) &&
37654@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37655 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37656 uPD98402_INT_LOS),PIMR); /* enable them */
37657 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37658- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37659- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37660- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37661+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37662+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37663+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37664 return 0;
37665 }
37666
37667diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37668index 969c3c2..9b72956 100644
37669--- a/drivers/atm/zatm.c
37670+++ b/drivers/atm/zatm.c
37671@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37672 }
37673 if (!size) {
37674 dev_kfree_skb_irq(skb);
37675- if (vcc) atomic_inc(&vcc->stats->rx_err);
37676+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37677 continue;
37678 }
37679 if (!atm_charge(vcc,skb->truesize)) {
37680@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37681 skb->len = size;
37682 ATM_SKB(skb)->vcc = vcc;
37683 vcc->push(vcc,skb);
37684- atomic_inc(&vcc->stats->rx);
37685+ atomic_inc_unchecked(&vcc->stats->rx);
37686 }
37687 zout(pos & 0xffff,MTA(mbx));
37688 #if 0 /* probably a stupid idea */
37689@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37690 skb_queue_head(&zatm_vcc->backlog,skb);
37691 break;
37692 }
37693- atomic_inc(&vcc->stats->tx);
37694+ atomic_inc_unchecked(&vcc->stats->tx);
37695 wake_up(&zatm_vcc->tx_wait);
37696 }
37697
37698diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37699index 876bae5..8978785 100644
37700--- a/drivers/base/bus.c
37701+++ b/drivers/base/bus.c
37702@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37703 return -EINVAL;
37704
37705 mutex_lock(&subsys->p->mutex);
37706- list_add_tail(&sif->node, &subsys->p->interfaces);
37707+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37708 if (sif->add_dev) {
37709 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37710 while ((dev = subsys_dev_iter_next(&iter)))
37711@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37712 subsys = sif->subsys;
37713
37714 mutex_lock(&subsys->p->mutex);
37715- list_del_init(&sif->node);
37716+ pax_list_del_init((struct list_head *)&sif->node);
37717 if (sif->remove_dev) {
37718 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37719 while ((dev = subsys_dev_iter_next(&iter)))
37720diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37721index 25798db..15f130e 100644
37722--- a/drivers/base/devtmpfs.c
37723+++ b/drivers/base/devtmpfs.c
37724@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37725 if (!thread)
37726 return 0;
37727
37728- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37729+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37730 if (err)
37731 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37732 else
37733@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37734 *err = sys_unshare(CLONE_NEWNS);
37735 if (*err)
37736 goto out;
37737- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37738+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37739 if (*err)
37740 goto out;
37741- sys_chdir("/.."); /* will traverse into overmounted root */
37742- sys_chroot(".");
37743+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37744+ sys_chroot((char __force_user *)".");
37745 complete(&setup_done);
37746 while (1) {
37747 spin_lock(&req_lock);
37748diff --git a/drivers/base/node.c b/drivers/base/node.c
37749index a3b82e9..f90a8ce 100644
37750--- a/drivers/base/node.c
37751+++ b/drivers/base/node.c
37752@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37753 struct node_attr {
37754 struct device_attribute attr;
37755 enum node_states state;
37756-};
37757+} __do_const;
37758
37759 static ssize_t show_node_state(struct device *dev,
37760 struct device_attribute *attr, char *buf)
37761diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37762index 0d8780c..0b5df3f 100644
37763--- a/drivers/base/power/domain.c
37764+++ b/drivers/base/power/domain.c
37765@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37766 {
37767 struct cpuidle_driver *cpuidle_drv;
37768 struct gpd_cpuidle_data *cpuidle_data;
37769- struct cpuidle_state *idle_state;
37770+ cpuidle_state_no_const *idle_state;
37771 int ret = 0;
37772
37773 if (IS_ERR_OR_NULL(genpd) || state < 0)
37774@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37775 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37776 {
37777 struct gpd_cpuidle_data *cpuidle_data;
37778- struct cpuidle_state *idle_state;
37779+ cpuidle_state_no_const *idle_state;
37780 int ret = 0;
37781
37782 if (IS_ERR_OR_NULL(genpd))
37783@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37784 return ret;
37785 }
37786
37787- dev->pm_domain->detach = genpd_dev_pm_detach;
37788+ pax_open_kernel();
37789+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37790+ pax_close_kernel();
37791+
37792 pm_genpd_poweron(pd);
37793
37794 return 0;
37795diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37796index d2be3f9..0a3167a 100644
37797--- a/drivers/base/power/sysfs.c
37798+++ b/drivers/base/power/sysfs.c
37799@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37800 return -EIO;
37801 }
37802 }
37803- return sprintf(buf, p);
37804+ return sprintf(buf, "%s", p);
37805 }
37806
37807 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37808diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37809index c2744b3..08fac19 100644
37810--- a/drivers/base/power/wakeup.c
37811+++ b/drivers/base/power/wakeup.c
37812@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37813 * They need to be modified together atomically, so it's better to use one
37814 * atomic variable to hold them both.
37815 */
37816-static atomic_t combined_event_count = ATOMIC_INIT(0);
37817+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37818
37819 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37820 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37821
37822 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37823 {
37824- unsigned int comb = atomic_read(&combined_event_count);
37825+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37826
37827 *cnt = (comb >> IN_PROGRESS_BITS);
37828 *inpr = comb & MAX_IN_PROGRESS;
37829@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37830 ws->start_prevent_time = ws->last_time;
37831
37832 /* Increment the counter of events in progress. */
37833- cec = atomic_inc_return(&combined_event_count);
37834+ cec = atomic_inc_return_unchecked(&combined_event_count);
37835
37836 trace_wakeup_source_activate(ws->name, cec);
37837 }
37838@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37839 * Increment the counter of registered wakeup events and decrement the
37840 * couter of wakeup events in progress simultaneously.
37841 */
37842- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37843+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37844 trace_wakeup_source_deactivate(ws->name, cec);
37845
37846 split_counters(&cnt, &inpr);
37847diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37848index 8d98a32..61d3165 100644
37849--- a/drivers/base/syscore.c
37850+++ b/drivers/base/syscore.c
37851@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37852 void register_syscore_ops(struct syscore_ops *ops)
37853 {
37854 mutex_lock(&syscore_ops_lock);
37855- list_add_tail(&ops->node, &syscore_ops_list);
37856+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37857 mutex_unlock(&syscore_ops_lock);
37858 }
37859 EXPORT_SYMBOL_GPL(register_syscore_ops);
37860@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37861 void unregister_syscore_ops(struct syscore_ops *ops)
37862 {
37863 mutex_lock(&syscore_ops_lock);
37864- list_del(&ops->node);
37865+ pax_list_del((struct list_head *)&ops->node);
37866 mutex_unlock(&syscore_ops_lock);
37867 }
37868 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37869diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37870index ff20f19..018f1da 100644
37871--- a/drivers/block/cciss.c
37872+++ b/drivers/block/cciss.c
37873@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37874 while (!list_empty(&h->reqQ)) {
37875 c = list_entry(h->reqQ.next, CommandList_struct, list);
37876 /* can't do anything if fifo is full */
37877- if ((h->access.fifo_full(h))) {
37878+ if ((h->access->fifo_full(h))) {
37879 dev_warn(&h->pdev->dev, "fifo full\n");
37880 break;
37881 }
37882@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37883 h->Qdepth--;
37884
37885 /* Tell the controller execute command */
37886- h->access.submit_command(h, c);
37887+ h->access->submit_command(h, c);
37888
37889 /* Put job onto the completed Q */
37890 addQ(&h->cmpQ, c);
37891@@ -3444,17 +3444,17 @@ startio:
37892
37893 static inline unsigned long get_next_completion(ctlr_info_t *h)
37894 {
37895- return h->access.command_completed(h);
37896+ return h->access->command_completed(h);
37897 }
37898
37899 static inline int interrupt_pending(ctlr_info_t *h)
37900 {
37901- return h->access.intr_pending(h);
37902+ return h->access->intr_pending(h);
37903 }
37904
37905 static inline long interrupt_not_for_us(ctlr_info_t *h)
37906 {
37907- return ((h->access.intr_pending(h) == 0) ||
37908+ return ((h->access->intr_pending(h) == 0) ||
37909 (h->interrupts_enabled == 0));
37910 }
37911
37912@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37913 u32 a;
37914
37915 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37916- return h->access.command_completed(h);
37917+ return h->access->command_completed(h);
37918
37919 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37920 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37921@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37922 trans_support & CFGTBL_Trans_use_short_tags);
37923
37924 /* Change the access methods to the performant access methods */
37925- h->access = SA5_performant_access;
37926+ h->access = &SA5_performant_access;
37927 h->transMethod = CFGTBL_Trans_Performant;
37928
37929 return;
37930@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37931 if (prod_index < 0)
37932 return -ENODEV;
37933 h->product_name = products[prod_index].product_name;
37934- h->access = *(products[prod_index].access);
37935+ h->access = products[prod_index].access;
37936
37937 if (cciss_board_disabled(h)) {
37938 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37939@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37940 }
37941
37942 /* make sure the board interrupts are off */
37943- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37944+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37945 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37946 if (rc)
37947 goto clean2;
37948@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37949 * fake ones to scoop up any residual completions.
37950 */
37951 spin_lock_irqsave(&h->lock, flags);
37952- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37953+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37954 spin_unlock_irqrestore(&h->lock, flags);
37955 free_irq(h->intr[h->intr_mode], h);
37956 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37957@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37958 dev_info(&h->pdev->dev, "Board READY.\n");
37959 dev_info(&h->pdev->dev,
37960 "Waiting for stale completions to drain.\n");
37961- h->access.set_intr_mask(h, CCISS_INTR_ON);
37962+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37963 msleep(10000);
37964- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37965+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37966
37967 rc = controller_reset_failed(h->cfgtable);
37968 if (rc)
37969@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37970 cciss_scsi_setup(h);
37971
37972 /* Turn the interrupts on so we can service requests */
37973- h->access.set_intr_mask(h, CCISS_INTR_ON);
37974+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37975
37976 /* Get the firmware version */
37977 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37978@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37979 kfree(flush_buf);
37980 if (return_code != IO_OK)
37981 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37982- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37983+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37984 free_irq(h->intr[h->intr_mode], h);
37985 }
37986
37987diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37988index 7fda30e..2f27946 100644
37989--- a/drivers/block/cciss.h
37990+++ b/drivers/block/cciss.h
37991@@ -101,7 +101,7 @@ struct ctlr_info
37992 /* information about each logical volume */
37993 drive_info_struct *drv[CISS_MAX_LUN];
37994
37995- struct access_method access;
37996+ struct access_method *access;
37997
37998 /* queue and queue Info */
37999 struct list_head reqQ;
38000@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38001 }
38002
38003 static struct access_method SA5_access = {
38004- SA5_submit_command,
38005- SA5_intr_mask,
38006- SA5_fifo_full,
38007- SA5_intr_pending,
38008- SA5_completed,
38009+ .submit_command = SA5_submit_command,
38010+ .set_intr_mask = SA5_intr_mask,
38011+ .fifo_full = SA5_fifo_full,
38012+ .intr_pending = SA5_intr_pending,
38013+ .command_completed = SA5_completed,
38014 };
38015
38016 static struct access_method SA5B_access = {
38017- SA5_submit_command,
38018- SA5B_intr_mask,
38019- SA5_fifo_full,
38020- SA5B_intr_pending,
38021- SA5_completed,
38022+ .submit_command = SA5_submit_command,
38023+ .set_intr_mask = SA5B_intr_mask,
38024+ .fifo_full = SA5_fifo_full,
38025+ .intr_pending = SA5B_intr_pending,
38026+ .command_completed = SA5_completed,
38027 };
38028
38029 static struct access_method SA5_performant_access = {
38030- SA5_submit_command,
38031- SA5_performant_intr_mask,
38032- SA5_fifo_full,
38033- SA5_performant_intr_pending,
38034- SA5_performant_completed,
38035+ .submit_command = SA5_submit_command,
38036+ .set_intr_mask = SA5_performant_intr_mask,
38037+ .fifo_full = SA5_fifo_full,
38038+ .intr_pending = SA5_performant_intr_pending,
38039+ .command_completed = SA5_performant_completed,
38040 };
38041
38042 struct board_type {
38043diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38044index 2b94403..fd6ad1f 100644
38045--- a/drivers/block/cpqarray.c
38046+++ b/drivers/block/cpqarray.c
38047@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38048 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38049 goto Enomem4;
38050 }
38051- hba[i]->access.set_intr_mask(hba[i], 0);
38052+ hba[i]->access->set_intr_mask(hba[i], 0);
38053 if (request_irq(hba[i]->intr, do_ida_intr,
38054 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38055 {
38056@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38057 add_timer(&hba[i]->timer);
38058
38059 /* Enable IRQ now that spinlock and rate limit timer are set up */
38060- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38061+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38062
38063 for(j=0; j<NWD; j++) {
38064 struct gendisk *disk = ida_gendisk[i][j];
38065@@ -694,7 +694,7 @@ DBGINFO(
38066 for(i=0; i<NR_PRODUCTS; i++) {
38067 if (board_id == products[i].board_id) {
38068 c->product_name = products[i].product_name;
38069- c->access = *(products[i].access);
38070+ c->access = products[i].access;
38071 break;
38072 }
38073 }
38074@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38075 hba[ctlr]->intr = intr;
38076 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38077 hba[ctlr]->product_name = products[j].product_name;
38078- hba[ctlr]->access = *(products[j].access);
38079+ hba[ctlr]->access = products[j].access;
38080 hba[ctlr]->ctlr = ctlr;
38081 hba[ctlr]->board_id = board_id;
38082 hba[ctlr]->pci_dev = NULL; /* not PCI */
38083@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38084
38085 while((c = h->reqQ) != NULL) {
38086 /* Can't do anything if we're busy */
38087- if (h->access.fifo_full(h) == 0)
38088+ if (h->access->fifo_full(h) == 0)
38089 return;
38090
38091 /* Get the first entry from the request Q */
38092@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38093 h->Qdepth--;
38094
38095 /* Tell the controller to do our bidding */
38096- h->access.submit_command(h, c);
38097+ h->access->submit_command(h, c);
38098
38099 /* Get onto the completion Q */
38100 addQ(&h->cmpQ, c);
38101@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38102 unsigned long flags;
38103 __u32 a,a1;
38104
38105- istat = h->access.intr_pending(h);
38106+ istat = h->access->intr_pending(h);
38107 /* Is this interrupt for us? */
38108 if (istat == 0)
38109 return IRQ_NONE;
38110@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38111 */
38112 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38113 if (istat & FIFO_NOT_EMPTY) {
38114- while((a = h->access.command_completed(h))) {
38115+ while((a = h->access->command_completed(h))) {
38116 a1 = a; a &= ~3;
38117 if ((c = h->cmpQ) == NULL)
38118 {
38119@@ -1448,11 +1448,11 @@ static int sendcmd(
38120 /*
38121 * Disable interrupt
38122 */
38123- info_p->access.set_intr_mask(info_p, 0);
38124+ info_p->access->set_intr_mask(info_p, 0);
38125 /* Make sure there is room in the command FIFO */
38126 /* Actually it should be completely empty at this time. */
38127 for (i = 200000; i > 0; i--) {
38128- temp = info_p->access.fifo_full(info_p);
38129+ temp = info_p->access->fifo_full(info_p);
38130 if (temp != 0) {
38131 break;
38132 }
38133@@ -1465,7 +1465,7 @@ DBG(
38134 /*
38135 * Send the cmd
38136 */
38137- info_p->access.submit_command(info_p, c);
38138+ info_p->access->submit_command(info_p, c);
38139 complete = pollcomplete(ctlr);
38140
38141 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38142@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38143 * we check the new geometry. Then turn interrupts back on when
38144 * we're done.
38145 */
38146- host->access.set_intr_mask(host, 0);
38147+ host->access->set_intr_mask(host, 0);
38148 getgeometry(ctlr);
38149- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38150+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38151
38152 for(i=0; i<NWD; i++) {
38153 struct gendisk *disk = ida_gendisk[ctlr][i];
38154@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38155 /* Wait (up to 2 seconds) for a command to complete */
38156
38157 for (i = 200000; i > 0; i--) {
38158- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38159+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38160 if (done == 0) {
38161 udelay(10); /* a short fixed delay */
38162 } else
38163diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38164index be73e9d..7fbf140 100644
38165--- a/drivers/block/cpqarray.h
38166+++ b/drivers/block/cpqarray.h
38167@@ -99,7 +99,7 @@ struct ctlr_info {
38168 drv_info_t drv[NWD];
38169 struct proc_dir_entry *proc;
38170
38171- struct access_method access;
38172+ struct access_method *access;
38173
38174 cmdlist_t *reqQ;
38175 cmdlist_t *cmpQ;
38176diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38177index 434c77d..6d3219a 100644
38178--- a/drivers/block/drbd/drbd_bitmap.c
38179+++ b/drivers/block/drbd/drbd_bitmap.c
38180@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38181 submit_bio(rw, bio);
38182 /* this should not count as user activity and cause the
38183 * resync to throttle -- see drbd_rs_should_slow_down(). */
38184- atomic_add(len >> 9, &device->rs_sect_ev);
38185+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38186 }
38187 }
38188
38189diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38190index b905e98..0812ed8 100644
38191--- a/drivers/block/drbd/drbd_int.h
38192+++ b/drivers/block/drbd/drbd_int.h
38193@@ -385,7 +385,7 @@ struct drbd_epoch {
38194 struct drbd_connection *connection;
38195 struct list_head list;
38196 unsigned int barrier_nr;
38197- atomic_t epoch_size; /* increased on every request added. */
38198+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38199 atomic_t active; /* increased on every req. added, and dec on every finished. */
38200 unsigned long flags;
38201 };
38202@@ -946,7 +946,7 @@ struct drbd_device {
38203 unsigned int al_tr_number;
38204 int al_tr_cycle;
38205 wait_queue_head_t seq_wait;
38206- atomic_t packet_seq;
38207+ atomic_unchecked_t packet_seq;
38208 unsigned int peer_seq;
38209 spinlock_t peer_seq_lock;
38210 unsigned long comm_bm_set; /* communicated number of set bits. */
38211@@ -955,8 +955,8 @@ struct drbd_device {
38212 struct mutex own_state_mutex;
38213 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38214 char congestion_reason; /* Why we where congested... */
38215- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38216- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38217+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38218+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38219 int rs_last_sect_ev; /* counter to compare with */
38220 int rs_last_events; /* counter of read or write "events" (unit sectors)
38221 * on the lower level device when we last looked. */
38222diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38223index 1fc8342..7e7742b 100644
38224--- a/drivers/block/drbd/drbd_main.c
38225+++ b/drivers/block/drbd/drbd_main.c
38226@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38227 p->sector = sector;
38228 p->block_id = block_id;
38229 p->blksize = blksize;
38230- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38231+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38232 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38233 }
38234
38235@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38236 return -EIO;
38237 p->sector = cpu_to_be64(req->i.sector);
38238 p->block_id = (unsigned long)req;
38239- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38240+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38241 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38242 if (device->state.conn >= C_SYNC_SOURCE &&
38243 device->state.conn <= C_PAUSED_SYNC_T)
38244@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38245 atomic_set(&device->unacked_cnt, 0);
38246 atomic_set(&device->local_cnt, 0);
38247 atomic_set(&device->pp_in_use_by_net, 0);
38248- atomic_set(&device->rs_sect_in, 0);
38249- atomic_set(&device->rs_sect_ev, 0);
38250+ atomic_set_unchecked(&device->rs_sect_in, 0);
38251+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38252 atomic_set(&device->ap_in_flight, 0);
38253 atomic_set(&device->md_io.in_use, 0);
38254
38255@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38256 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38257 struct drbd_resource *resource = connection->resource;
38258
38259- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38260- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38261+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38262+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38263 kfree(connection->current_epoch);
38264
38265 idr_destroy(&connection->peer_devices);
38266diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38267index 74df8cf..e41fc24 100644
38268--- a/drivers/block/drbd/drbd_nl.c
38269+++ b/drivers/block/drbd/drbd_nl.c
38270@@ -3637,13 +3637,13 @@ finish:
38271
38272 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38273 {
38274- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38275+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38276 struct sk_buff *msg;
38277 struct drbd_genlmsghdr *d_out;
38278 unsigned seq;
38279 int err = -ENOMEM;
38280
38281- seq = atomic_inc_return(&drbd_genl_seq);
38282+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38283 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38284 if (!msg)
38285 goto failed;
38286diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38287index d169b4a..481463f 100644
38288--- a/drivers/block/drbd/drbd_receiver.c
38289+++ b/drivers/block/drbd/drbd_receiver.c
38290@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38291 struct drbd_device *device = peer_device->device;
38292 int err;
38293
38294- atomic_set(&device->packet_seq, 0);
38295+ atomic_set_unchecked(&device->packet_seq, 0);
38296 device->peer_seq = 0;
38297
38298 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38299@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38300 do {
38301 next_epoch = NULL;
38302
38303- epoch_size = atomic_read(&epoch->epoch_size);
38304+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38305
38306 switch (ev & ~EV_CLEANUP) {
38307 case EV_PUT:
38308@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38309 rv = FE_DESTROYED;
38310 } else {
38311 epoch->flags = 0;
38312- atomic_set(&epoch->epoch_size, 0);
38313+ atomic_set_unchecked(&epoch->epoch_size, 0);
38314 /* atomic_set(&epoch->active, 0); is already zero */
38315 if (rv == FE_STILL_LIVE)
38316 rv = FE_RECYCLED;
38317@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38318 conn_wait_active_ee_empty(connection);
38319 drbd_flush(connection);
38320
38321- if (atomic_read(&connection->current_epoch->epoch_size)) {
38322+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38323 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38324 if (epoch)
38325 break;
38326@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38327 }
38328
38329 epoch->flags = 0;
38330- atomic_set(&epoch->epoch_size, 0);
38331+ atomic_set_unchecked(&epoch->epoch_size, 0);
38332 atomic_set(&epoch->active, 0);
38333
38334 spin_lock(&connection->epoch_lock);
38335- if (atomic_read(&connection->current_epoch->epoch_size)) {
38336+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38337 list_add(&epoch->list, &connection->current_epoch->list);
38338 connection->current_epoch = epoch;
38339 connection->epochs++;
38340@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38341 list_add_tail(&peer_req->w.list, &device->sync_ee);
38342 spin_unlock_irq(&device->resource->req_lock);
38343
38344- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38345+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38346 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38347 return 0;
38348
38349@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38350 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38351 }
38352
38353- atomic_add(pi->size >> 9, &device->rs_sect_in);
38354+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38355
38356 return err;
38357 }
38358@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38359
38360 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38361 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38362- atomic_inc(&connection->current_epoch->epoch_size);
38363+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38364 err2 = drbd_drain_block(peer_device, pi->size);
38365 if (!err)
38366 err = err2;
38367@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38368
38369 spin_lock(&connection->epoch_lock);
38370 peer_req->epoch = connection->current_epoch;
38371- atomic_inc(&peer_req->epoch->epoch_size);
38372+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38373 atomic_inc(&peer_req->epoch->active);
38374 spin_unlock(&connection->epoch_lock);
38375
38376@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38377
38378 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38379 (int)part_stat_read(&disk->part0, sectors[1]) -
38380- atomic_read(&device->rs_sect_ev);
38381+ atomic_read_unchecked(&device->rs_sect_ev);
38382
38383 if (atomic_read(&device->ap_actlog_cnt)
38384 || curr_events - device->rs_last_events > 64) {
38385@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38386 device->use_csums = true;
38387 } else if (pi->cmd == P_OV_REPLY) {
38388 /* track progress, we may need to throttle */
38389- atomic_add(size >> 9, &device->rs_sect_in);
38390+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38391 peer_req->w.cb = w_e_end_ov_reply;
38392 dec_rs_pending(device);
38393 /* drbd_rs_begin_io done when we sent this request,
38394@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38395 goto out_free_e;
38396
38397 submit_for_resync:
38398- atomic_add(size >> 9, &device->rs_sect_ev);
38399+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38400
38401 submit:
38402 update_receiver_timing_details(connection, drbd_submit_peer_request);
38403@@ -4564,7 +4564,7 @@ struct data_cmd {
38404 int expect_payload;
38405 size_t pkt_size;
38406 int (*fn)(struct drbd_connection *, struct packet_info *);
38407-};
38408+} __do_const;
38409
38410 static struct data_cmd drbd_cmd_handler[] = {
38411 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38412@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38413 if (!list_empty(&connection->current_epoch->list))
38414 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38415 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38416- atomic_set(&connection->current_epoch->epoch_size, 0);
38417+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38418 connection->send.seen_any_write_yet = false;
38419
38420 drbd_info(connection, "Connection closed\n");
38421@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38422 put_ldev(device);
38423 }
38424 dec_rs_pending(device);
38425- atomic_add(blksize >> 9, &device->rs_sect_in);
38426+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38427
38428 return 0;
38429 }
38430@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38431 struct asender_cmd {
38432 size_t pkt_size;
38433 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38434-};
38435+} __do_const;
38436
38437 static struct asender_cmd asender_tbl[] = {
38438 [P_PING] = { 0, got_Ping },
38439diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38440index d0fae55..4469096 100644
38441--- a/drivers/block/drbd/drbd_worker.c
38442+++ b/drivers/block/drbd/drbd_worker.c
38443@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38444 list_add_tail(&peer_req->w.list, &device->read_ee);
38445 spin_unlock_irq(&device->resource->req_lock);
38446
38447- atomic_add(size >> 9, &device->rs_sect_ev);
38448+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38449 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38450 return 0;
38451
38452@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38453 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38454 int number, mxb;
38455
38456- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38457+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38458 device->rs_in_flight -= sect_in;
38459
38460 rcu_read_lock();
38461@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38462 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38463 struct fifo_buffer *plan;
38464
38465- atomic_set(&device->rs_sect_in, 0);
38466- atomic_set(&device->rs_sect_ev, 0);
38467+ atomic_set_unchecked(&device->rs_sect_in, 0);
38468+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38469 device->rs_in_flight = 0;
38470 device->rs_last_events =
38471 (int)part_stat_read(&disk->part0, sectors[0]) +
38472diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38473index 6cb1beb..bf490f7 100644
38474--- a/drivers/block/loop.c
38475+++ b/drivers/block/loop.c
38476@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38477
38478 file_start_write(file);
38479 set_fs(get_ds());
38480- bw = file->f_op->write(file, buf, len, &pos);
38481+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38482 set_fs(old_fs);
38483 file_end_write(file);
38484 if (likely(bw == len))
38485diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38486index d826bf3..8eb406c 100644
38487--- a/drivers/block/nvme-core.c
38488+++ b/drivers/block/nvme-core.c
38489@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38490 static struct task_struct *nvme_thread;
38491 static struct workqueue_struct *nvme_workq;
38492 static wait_queue_head_t nvme_kthread_wait;
38493-static struct notifier_block nvme_nb;
38494
38495 static void nvme_reset_failed_dev(struct work_struct *ws);
38496 static int nvme_process_cq(struct nvme_queue *nvmeq);
38497@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38498 static void __exit nvme_exit(void)
38499 {
38500 pci_unregister_driver(&nvme_driver);
38501- unregister_hotcpu_notifier(&nvme_nb);
38502 unregister_blkdev(nvme_major, "nvme");
38503 destroy_workqueue(nvme_workq);
38504 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38505diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38506index 09e628da..7607aaa 100644
38507--- a/drivers/block/pktcdvd.c
38508+++ b/drivers/block/pktcdvd.c
38509@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38510
38511 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38512 {
38513- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38514+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38515 }
38516
38517 /*
38518@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38519 return -EROFS;
38520 }
38521 pd->settings.fp = ti.fp;
38522- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38523+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38524
38525 if (ti.nwa_v) {
38526 pd->nwa = be32_to_cpu(ti.next_writable);
38527diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38528index 8a86b62..f54c87e 100644
38529--- a/drivers/block/rbd.c
38530+++ b/drivers/block/rbd.c
38531@@ -63,7 +63,7 @@
38532 * If the counter is already at its maximum value returns
38533 * -EINVAL without updating it.
38534 */
38535-static int atomic_inc_return_safe(atomic_t *v)
38536+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38537 {
38538 unsigned int counter;
38539
38540diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38541index e5565fb..71be10b4 100644
38542--- a/drivers/block/smart1,2.h
38543+++ b/drivers/block/smart1,2.h
38544@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38545 }
38546
38547 static struct access_method smart4_access = {
38548- smart4_submit_command,
38549- smart4_intr_mask,
38550- smart4_fifo_full,
38551- smart4_intr_pending,
38552- smart4_completed,
38553+ .submit_command = smart4_submit_command,
38554+ .set_intr_mask = smart4_intr_mask,
38555+ .fifo_full = smart4_fifo_full,
38556+ .intr_pending = smart4_intr_pending,
38557+ .command_completed = smart4_completed,
38558 };
38559
38560 /*
38561@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38562 }
38563
38564 static struct access_method smart2_access = {
38565- smart2_submit_command,
38566- smart2_intr_mask,
38567- smart2_fifo_full,
38568- smart2_intr_pending,
38569- smart2_completed,
38570+ .submit_command = smart2_submit_command,
38571+ .set_intr_mask = smart2_intr_mask,
38572+ .fifo_full = smart2_fifo_full,
38573+ .intr_pending = smart2_intr_pending,
38574+ .command_completed = smart2_completed,
38575 };
38576
38577 /*
38578@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38579 }
38580
38581 static struct access_method smart2e_access = {
38582- smart2e_submit_command,
38583- smart2e_intr_mask,
38584- smart2e_fifo_full,
38585- smart2e_intr_pending,
38586- smart2e_completed,
38587+ .submit_command = smart2e_submit_command,
38588+ .set_intr_mask = smart2e_intr_mask,
38589+ .fifo_full = smart2e_fifo_full,
38590+ .intr_pending = smart2e_intr_pending,
38591+ .command_completed = smart2e_completed,
38592 };
38593
38594 /*
38595@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38596 }
38597
38598 static struct access_method smart1_access = {
38599- smart1_submit_command,
38600- smart1_intr_mask,
38601- smart1_fifo_full,
38602- smart1_intr_pending,
38603- smart1_completed,
38604+ .submit_command = smart1_submit_command,
38605+ .set_intr_mask = smart1_intr_mask,
38606+ .fifo_full = smart1_fifo_full,
38607+ .intr_pending = smart1_intr_pending,
38608+ .command_completed = smart1_completed,
38609 };
38610diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38611index 55c135b..9f8d60c 100644
38612--- a/drivers/bluetooth/btwilink.c
38613+++ b/drivers/bluetooth/btwilink.c
38614@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38615
38616 static int bt_ti_probe(struct platform_device *pdev)
38617 {
38618- static struct ti_st *hst;
38619+ struct ti_st *hst;
38620 struct hci_dev *hdev;
38621 int err;
38622
38623diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38624index 5d28a45..a538f90 100644
38625--- a/drivers/cdrom/cdrom.c
38626+++ b/drivers/cdrom/cdrom.c
38627@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38628 ENSURE(reset, CDC_RESET);
38629 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38630 cdi->mc_flags = 0;
38631- cdo->n_minors = 0;
38632 cdi->options = CDO_USE_FFLAGS;
38633
38634 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38635@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38636 else
38637 cdi->cdda_method = CDDA_OLD;
38638
38639- if (!cdo->generic_packet)
38640- cdo->generic_packet = cdrom_dummy_generic_packet;
38641+ if (!cdo->generic_packet) {
38642+ pax_open_kernel();
38643+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38644+ pax_close_kernel();
38645+ }
38646
38647 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38648 mutex_lock(&cdrom_mutex);
38649@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38650 if (cdi->exit)
38651 cdi->exit(cdi);
38652
38653- cdi->ops->n_minors--;
38654 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38655 }
38656
38657@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38658 */
38659 nr = nframes;
38660 do {
38661- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38662+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38663 if (cgc.buffer)
38664 break;
38665
38666@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38667 struct cdrom_device_info *cdi;
38668 int ret;
38669
38670- ret = scnprintf(info + *pos, max_size - *pos, header);
38671+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38672 if (!ret)
38673 return 1;
38674
38675diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38676index 584bc31..e64a12c 100644
38677--- a/drivers/cdrom/gdrom.c
38678+++ b/drivers/cdrom/gdrom.c
38679@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38680 .audio_ioctl = gdrom_audio_ioctl,
38681 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38682 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38683- .n_minors = 1,
38684 };
38685
38686 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38687diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38688index efefd12..4f1d494 100644
38689--- a/drivers/char/Kconfig
38690+++ b/drivers/char/Kconfig
38691@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38692
38693 config DEVKMEM
38694 bool "/dev/kmem virtual device support"
38695- default y
38696+ default n
38697+ depends on !GRKERNSEC_KMEM
38698 help
38699 Say Y here if you want to support the /dev/kmem device. The
38700 /dev/kmem device is rarely used, but can be used for certain
38701@@ -577,6 +578,7 @@ config DEVPORT
38702 bool
38703 depends on !M68K
38704 depends on ISA || PCI
38705+ depends on !GRKERNSEC_KMEM
38706 default y
38707
38708 source "drivers/s390/char/Kconfig"
38709diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38710index a48e05b..6bac831 100644
38711--- a/drivers/char/agp/compat_ioctl.c
38712+++ b/drivers/char/agp/compat_ioctl.c
38713@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38714 return -ENOMEM;
38715 }
38716
38717- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38718+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38719 sizeof(*usegment) * ureserve.seg_count)) {
38720 kfree(usegment);
38721 kfree(ksegment);
38722diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38723index 09f17eb..8531d2f 100644
38724--- a/drivers/char/agp/frontend.c
38725+++ b/drivers/char/agp/frontend.c
38726@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38727 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38728 return -EFAULT;
38729
38730- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38731+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38732 return -EFAULT;
38733
38734 client = agp_find_client_by_pid(reserve.pid);
38735@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38736 if (segment == NULL)
38737 return -ENOMEM;
38738
38739- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38740+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38741 sizeof(struct agp_segment) * reserve.seg_count)) {
38742 kfree(segment);
38743 return -EFAULT;
38744diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38745index 4f94375..413694e 100644
38746--- a/drivers/char/genrtc.c
38747+++ b/drivers/char/genrtc.c
38748@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38749 switch (cmd) {
38750
38751 case RTC_PLL_GET:
38752+ memset(&pll, 0, sizeof(pll));
38753 if (get_rtc_pll(&pll))
38754 return -EINVAL;
38755 else
38756diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38757index d5d4cd8..22d561d 100644
38758--- a/drivers/char/hpet.c
38759+++ b/drivers/char/hpet.c
38760@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38761 }
38762
38763 static int
38764-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38765+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38766 struct hpet_info *info)
38767 {
38768 struct hpet_timer __iomem *timer;
38769diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38770index 6b65fa4..8ebbc99 100644
38771--- a/drivers/char/ipmi/ipmi_msghandler.c
38772+++ b/drivers/char/ipmi/ipmi_msghandler.c
38773@@ -436,7 +436,7 @@ struct ipmi_smi {
38774 struct proc_dir_entry *proc_dir;
38775 char proc_dir_name[10];
38776
38777- atomic_t stats[IPMI_NUM_STATS];
38778+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38779
38780 /*
38781 * run_to_completion duplicate of smb_info, smi_info
38782@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38783 static DEFINE_MUTEX(smi_watchers_mutex);
38784
38785 #define ipmi_inc_stat(intf, stat) \
38786- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38787+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38788 #define ipmi_get_stat(intf, stat) \
38789- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38790+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38791
38792 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38793 "ACPI", "SMBIOS", "PCI",
38794@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38795 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38796 init_waitqueue_head(&intf->waitq);
38797 for (i = 0; i < IPMI_NUM_STATS; i++)
38798- atomic_set(&intf->stats[i], 0);
38799+ atomic_set_unchecked(&intf->stats[i], 0);
38800
38801 intf->proc_dir = NULL;
38802
38803diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38804index 967b73a..946e94c 100644
38805--- a/drivers/char/ipmi/ipmi_si_intf.c
38806+++ b/drivers/char/ipmi/ipmi_si_intf.c
38807@@ -284,7 +284,7 @@ struct smi_info {
38808 unsigned char slave_addr;
38809
38810 /* Counters and things for the proc filesystem. */
38811- atomic_t stats[SI_NUM_STATS];
38812+ atomic_unchecked_t stats[SI_NUM_STATS];
38813
38814 struct task_struct *thread;
38815
38816@@ -293,9 +293,9 @@ struct smi_info {
38817 };
38818
38819 #define smi_inc_stat(smi, stat) \
38820- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38821+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38822 #define smi_get_stat(smi, stat) \
38823- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38824+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38825
38826 #define SI_MAX_PARMS 4
38827
38828@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38829 atomic_set(&new_smi->req_events, 0);
38830 new_smi->run_to_completion = false;
38831 for (i = 0; i < SI_NUM_STATS; i++)
38832- atomic_set(&new_smi->stats[i], 0);
38833+ atomic_set_unchecked(&new_smi->stats[i], 0);
38834
38835 new_smi->interrupt_disabled = true;
38836 atomic_set(&new_smi->need_watch, 0);
38837diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38838index 4c58333..d5cca27 100644
38839--- a/drivers/char/mem.c
38840+++ b/drivers/char/mem.c
38841@@ -18,6 +18,7 @@
38842 #include <linux/raw.h>
38843 #include <linux/tty.h>
38844 #include <linux/capability.h>
38845+#include <linux/security.h>
38846 #include <linux/ptrace.h>
38847 #include <linux/device.h>
38848 #include <linux/highmem.h>
38849@@ -36,6 +37,10 @@
38850
38851 #define DEVPORT_MINOR 4
38852
38853+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38854+extern const struct file_operations grsec_fops;
38855+#endif
38856+
38857 static inline unsigned long size_inside_page(unsigned long start,
38858 unsigned long size)
38859 {
38860@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38861
38862 while (cursor < to) {
38863 if (!devmem_is_allowed(pfn)) {
38864+#ifdef CONFIG_GRKERNSEC_KMEM
38865+ gr_handle_mem_readwrite(from, to);
38866+#else
38867 printk(KERN_INFO
38868 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38869 current->comm, from, to);
38870+#endif
38871 return 0;
38872 }
38873 cursor += PAGE_SIZE;
38874@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38875 }
38876 return 1;
38877 }
38878+#elif defined(CONFIG_GRKERNSEC_KMEM)
38879+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38880+{
38881+ return 0;
38882+}
38883 #else
38884 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38885 {
38886@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38887 #endif
38888
38889 while (count > 0) {
38890- unsigned long remaining;
38891+ unsigned long remaining = 0;
38892+ char *temp;
38893
38894 sz = size_inside_page(p, count);
38895
38896@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38897 if (!ptr)
38898 return -EFAULT;
38899
38900- remaining = copy_to_user(buf, ptr, sz);
38901+#ifdef CONFIG_PAX_USERCOPY
38902+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38903+ if (!temp) {
38904+ unxlate_dev_mem_ptr(p, ptr);
38905+ return -ENOMEM;
38906+ }
38907+ remaining = probe_kernel_read(temp, ptr, sz);
38908+#else
38909+ temp = ptr;
38910+#endif
38911+
38912+ if (!remaining)
38913+ remaining = copy_to_user(buf, temp, sz);
38914+
38915+#ifdef CONFIG_PAX_USERCOPY
38916+ kfree(temp);
38917+#endif
38918+
38919 unxlate_dev_mem_ptr(p, ptr);
38920 if (remaining)
38921 return -EFAULT;
38922@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38923 size_t count, loff_t *ppos)
38924 {
38925 unsigned long p = *ppos;
38926- ssize_t low_count, read, sz;
38927+ ssize_t low_count, read, sz, err = 0;
38928 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38929- int err = 0;
38930
38931 read = 0;
38932 if (p < (unsigned long) high_memory) {
38933@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38934 }
38935 #endif
38936 while (low_count > 0) {
38937+ char *temp;
38938+
38939 sz = size_inside_page(p, low_count);
38940
38941 /*
38942@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38943 */
38944 kbuf = xlate_dev_kmem_ptr((void *)p);
38945
38946- if (copy_to_user(buf, kbuf, sz))
38947+#ifdef CONFIG_PAX_USERCOPY
38948+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38949+ if (!temp)
38950+ return -ENOMEM;
38951+ err = probe_kernel_read(temp, kbuf, sz);
38952+#else
38953+ temp = kbuf;
38954+#endif
38955+
38956+ if (!err)
38957+ err = copy_to_user(buf, temp, sz);
38958+
38959+#ifdef CONFIG_PAX_USERCOPY
38960+ kfree(temp);
38961+#endif
38962+
38963+ if (err)
38964 return -EFAULT;
38965 buf += sz;
38966 p += sz;
38967@@ -800,6 +849,9 @@ static const struct memdev {
38968 #ifdef CONFIG_PRINTK
38969 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38970 #endif
38971+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38972+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38973+#endif
38974 };
38975
38976 static int memory_open(struct inode *inode, struct file *filp)
38977@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38978 continue;
38979
38980 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38981- NULL, devlist[minor].name);
38982+ NULL, "%s", devlist[minor].name);
38983 }
38984
38985 return tty_init();
38986diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38987index 9df78e2..01ba9ae 100644
38988--- a/drivers/char/nvram.c
38989+++ b/drivers/char/nvram.c
38990@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38991
38992 spin_unlock_irq(&rtc_lock);
38993
38994- if (copy_to_user(buf, contents, tmp - contents))
38995+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38996 return -EFAULT;
38997
38998 *ppos = i;
38999diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39000index 0ea9986..e7b07e4 100644
39001--- a/drivers/char/pcmcia/synclink_cs.c
39002+++ b/drivers/char/pcmcia/synclink_cs.c
39003@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39004
39005 if (debug_level >= DEBUG_LEVEL_INFO)
39006 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39007- __FILE__, __LINE__, info->device_name, port->count);
39008+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39009
39010 if (tty_port_close_start(port, tty, filp) == 0)
39011 goto cleanup;
39012@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39013 cleanup:
39014 if (debug_level >= DEBUG_LEVEL_INFO)
39015 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39016- tty->driver->name, port->count);
39017+ tty->driver->name, atomic_read(&port->count));
39018 }
39019
39020 /* Wait until the transmitter is empty.
39021@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39022
39023 if (debug_level >= DEBUG_LEVEL_INFO)
39024 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39025- __FILE__, __LINE__, tty->driver->name, port->count);
39026+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39027
39028 /* If port is closing, signal caller to try again */
39029 if (port->flags & ASYNC_CLOSING){
39030@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39031 goto cleanup;
39032 }
39033 spin_lock(&port->lock);
39034- port->count++;
39035+ atomic_inc(&port->count);
39036 spin_unlock(&port->lock);
39037 spin_unlock_irqrestore(&info->netlock, flags);
39038
39039- if (port->count == 1) {
39040+ if (atomic_read(&port->count) == 1) {
39041 /* 1st open on this device, init hardware */
39042 retval = startup(info, tty);
39043 if (retval < 0)
39044@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39045 unsigned short new_crctype;
39046
39047 /* return error if TTY interface open */
39048- if (info->port.count)
39049+ if (atomic_read(&info->port.count))
39050 return -EBUSY;
39051
39052 switch (encoding)
39053@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39054
39055 /* arbitrate between network and tty opens */
39056 spin_lock_irqsave(&info->netlock, flags);
39057- if (info->port.count != 0 || info->netcount != 0) {
39058+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39059 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39060 spin_unlock_irqrestore(&info->netlock, flags);
39061 return -EBUSY;
39062@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39063 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39064
39065 /* return error if TTY interface open */
39066- if (info->port.count)
39067+ if (atomic_read(&info->port.count))
39068 return -EBUSY;
39069
39070 if (cmd != SIOCWANDEV)
39071diff --git a/drivers/char/random.c b/drivers/char/random.c
39072index 9cd6968..6416f00 100644
39073--- a/drivers/char/random.c
39074+++ b/drivers/char/random.c
39075@@ -289,9 +289,6 @@
39076 /*
39077 * To allow fractional bits to be tracked, the entropy_count field is
39078 * denominated in units of 1/8th bits.
39079- *
39080- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39081- * credit_entropy_bits() needs to be 64 bits wide.
39082 */
39083 #define ENTROPY_SHIFT 3
39084 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39085@@ -439,9 +436,9 @@ struct entropy_store {
39086 };
39087
39088 static void push_to_pool(struct work_struct *work);
39089-static __u32 input_pool_data[INPUT_POOL_WORDS];
39090-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39091-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39092+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39093+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39094+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39095
39096 static struct entropy_store input_pool = {
39097 .poolinfo = &poolinfo_table[0],
39098@@ -635,7 +632,7 @@ retry:
39099 /* The +2 corresponds to the /4 in the denominator */
39100
39101 do {
39102- unsigned int anfrac = min(pnfrac, pool_size/2);
39103+ u64 anfrac = min(pnfrac, pool_size/2);
39104 unsigned int add =
39105 ((pool_size - entropy_count)*anfrac*3) >> s;
39106
39107@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39108
39109 extract_buf(r, tmp);
39110 i = min_t(int, nbytes, EXTRACT_SIZE);
39111- if (copy_to_user(buf, tmp, i)) {
39112+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39113 ret = -EFAULT;
39114 break;
39115 }
39116@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39117 static int proc_do_uuid(struct ctl_table *table, int write,
39118 void __user *buffer, size_t *lenp, loff_t *ppos)
39119 {
39120- struct ctl_table fake_table;
39121+ ctl_table_no_const fake_table;
39122 unsigned char buf[64], tmp_uuid[16], *uuid;
39123
39124 uuid = table->data;
39125@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39126 static int proc_do_entropy(struct ctl_table *table, int write,
39127 void __user *buffer, size_t *lenp, loff_t *ppos)
39128 {
39129- struct ctl_table fake_table;
39130+ ctl_table_no_const fake_table;
39131 int entropy_count;
39132
39133 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39134diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39135index e496dae..b793e7d 100644
39136--- a/drivers/char/sonypi.c
39137+++ b/drivers/char/sonypi.c
39138@@ -54,6 +54,7 @@
39139
39140 #include <asm/uaccess.h>
39141 #include <asm/io.h>
39142+#include <asm/local.h>
39143
39144 #include <linux/sonypi.h>
39145
39146@@ -490,7 +491,7 @@ static struct sonypi_device {
39147 spinlock_t fifo_lock;
39148 wait_queue_head_t fifo_proc_list;
39149 struct fasync_struct *fifo_async;
39150- int open_count;
39151+ local_t open_count;
39152 int model;
39153 struct input_dev *input_jog_dev;
39154 struct input_dev *input_key_dev;
39155@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39156 static int sonypi_misc_release(struct inode *inode, struct file *file)
39157 {
39158 mutex_lock(&sonypi_device.lock);
39159- sonypi_device.open_count--;
39160+ local_dec(&sonypi_device.open_count);
39161 mutex_unlock(&sonypi_device.lock);
39162 return 0;
39163 }
39164@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39165 {
39166 mutex_lock(&sonypi_device.lock);
39167 /* Flush input queue on first open */
39168- if (!sonypi_device.open_count)
39169+ if (!local_read(&sonypi_device.open_count))
39170 kfifo_reset(&sonypi_device.fifo);
39171- sonypi_device.open_count++;
39172+ local_inc(&sonypi_device.open_count);
39173 mutex_unlock(&sonypi_device.lock);
39174
39175 return 0;
39176diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39177index 565a947..dcdc06e 100644
39178--- a/drivers/char/tpm/tpm_acpi.c
39179+++ b/drivers/char/tpm/tpm_acpi.c
39180@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39181 virt = acpi_os_map_iomem(start, len);
39182 if (!virt) {
39183 kfree(log->bios_event_log);
39184+ log->bios_event_log = NULL;
39185 printk("%s: ERROR - Unable to map memory\n", __func__);
39186 return -EIO;
39187 }
39188
39189- memcpy_fromio(log->bios_event_log, virt, len);
39190+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39191
39192 acpi_os_unmap_iomem(virt, len);
39193 return 0;
39194diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39195index 3a56a13..f8cbd25 100644
39196--- a/drivers/char/tpm/tpm_eventlog.c
39197+++ b/drivers/char/tpm/tpm_eventlog.c
39198@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39199 event = addr;
39200
39201 if ((event->event_type == 0 && event->event_size == 0) ||
39202- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39203+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39204 return NULL;
39205
39206 return addr;
39207@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39208 return NULL;
39209
39210 if ((event->event_type == 0 && event->event_size == 0) ||
39211- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39212+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39213 return NULL;
39214
39215 (*pos)++;
39216@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39217 int i;
39218
39219 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39220- seq_putc(m, data[i]);
39221+ if (!seq_putc(m, data[i]))
39222+ return -EFAULT;
39223
39224 return 0;
39225 }
39226diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39227index c3aac4c..88de09f9 100644
39228--- a/drivers/char/virtio_console.c
39229+++ b/drivers/char/virtio_console.c
39230@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39231 if (to_user) {
39232 ssize_t ret;
39233
39234- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39235+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39236 if (ret)
39237 return -EFAULT;
39238 } else {
39239@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39240 if (!port_has_data(port) && !port->host_connected)
39241 return 0;
39242
39243- return fill_readbuf(port, ubuf, count, true);
39244+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39245 }
39246
39247 static int wait_port_writable(struct port *port, bool nonblock)
39248diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39249index 4386697..754ceca 100644
39250--- a/drivers/clk/clk-composite.c
39251+++ b/drivers/clk/clk-composite.c
39252@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39253 struct clk *clk;
39254 struct clk_init_data init;
39255 struct clk_composite *composite;
39256- struct clk_ops *clk_composite_ops;
39257+ clk_ops_no_const *clk_composite_ops;
39258
39259 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39260 if (!composite) {
39261diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39262index dd3a78c..386d49c 100644
39263--- a/drivers/clk/socfpga/clk-gate.c
39264+++ b/drivers/clk/socfpga/clk-gate.c
39265@@ -22,6 +22,7 @@
39266 #include <linux/mfd/syscon.h>
39267 #include <linux/of.h>
39268 #include <linux/regmap.h>
39269+#include <asm/pgtable.h>
39270
39271 #include "clk.h"
39272
39273@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39274 return 0;
39275 }
39276
39277-static struct clk_ops gateclk_ops = {
39278+static clk_ops_no_const gateclk_ops __read_only = {
39279 .prepare = socfpga_clk_prepare,
39280 .recalc_rate = socfpga_clk_recalc_rate,
39281 .get_parent = socfpga_clk_get_parent,
39282@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39283 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39284 socfpga_clk->hw.bit_idx = clk_gate[1];
39285
39286- gateclk_ops.enable = clk_gate_ops.enable;
39287- gateclk_ops.disable = clk_gate_ops.disable;
39288+ pax_open_kernel();
39289+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39290+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39291+ pax_close_kernel();
39292 }
39293
39294 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39295diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39296index de6da95..c98278b 100644
39297--- a/drivers/clk/socfpga/clk-pll.c
39298+++ b/drivers/clk/socfpga/clk-pll.c
39299@@ -21,6 +21,7 @@
39300 #include <linux/io.h>
39301 #include <linux/of.h>
39302 #include <linux/of_address.h>
39303+#include <asm/pgtable.h>
39304
39305 #include "clk.h"
39306
39307@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39308 CLK_MGR_PLL_CLK_SRC_MASK;
39309 }
39310
39311-static struct clk_ops clk_pll_ops = {
39312+static clk_ops_no_const clk_pll_ops __read_only = {
39313 .recalc_rate = clk_pll_recalc_rate,
39314 .get_parent = clk_pll_get_parent,
39315 };
39316@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39317 pll_clk->hw.hw.init = &init;
39318
39319 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39320- clk_pll_ops.enable = clk_gate_ops.enable;
39321- clk_pll_ops.disable = clk_gate_ops.disable;
39322+ pax_open_kernel();
39323+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39324+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39325+ pax_close_kernel();
39326
39327 clk = clk_register(NULL, &pll_clk->hw.hw);
39328 if (WARN_ON(IS_ERR(clk))) {
39329diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39330index b0c18ed..1713a80 100644
39331--- a/drivers/cpufreq/acpi-cpufreq.c
39332+++ b/drivers/cpufreq/acpi-cpufreq.c
39333@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39334 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39335 per_cpu(acfreq_data, cpu) = data;
39336
39337- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39338- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39339+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39340+ pax_open_kernel();
39341+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39342+ pax_close_kernel();
39343+ }
39344
39345 result = acpi_processor_register_performance(data->acpi_data, cpu);
39346 if (result)
39347@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39348 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39349 break;
39350 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39351- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39352+ pax_open_kernel();
39353+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39354+ pax_close_kernel();
39355 break;
39356 default:
39357 break;
39358@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39359 if (!msrs)
39360 return;
39361
39362- acpi_cpufreq_driver.boost_supported = true;
39363- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39364+ pax_open_kernel();
39365+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39366+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39367+ pax_close_kernel();
39368
39369 cpu_notifier_register_begin();
39370
39371diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39372index fde97d6..3631eca 100644
39373--- a/drivers/cpufreq/cpufreq-dt.c
39374+++ b/drivers/cpufreq/cpufreq-dt.c
39375@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39376 if (!IS_ERR(cpu_reg))
39377 regulator_put(cpu_reg);
39378
39379- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39380+ pax_open_kernel();
39381+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39382+ pax_close_kernel();
39383
39384 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39385 if (ret)
39386diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39387index 24736bb..aae33f4 100644
39388--- a/drivers/cpufreq/cpufreq.c
39389+++ b/drivers/cpufreq/cpufreq.c
39390@@ -2138,7 +2138,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39391 }
39392
39393 mutex_lock(&cpufreq_governor_mutex);
39394- list_del(&governor->governor_list);
39395+ pax_list_del(&governor->governor_list);
39396 mutex_unlock(&cpufreq_governor_mutex);
39397 return;
39398 }
39399@@ -2354,7 +2354,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39400 return NOTIFY_OK;
39401 }
39402
39403-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39404+static struct notifier_block cpufreq_cpu_notifier = {
39405 .notifier_call = cpufreq_cpu_callback,
39406 };
39407
39408@@ -2394,13 +2394,17 @@ int cpufreq_boost_trigger_state(int state)
39409 return 0;
39410
39411 write_lock_irqsave(&cpufreq_driver_lock, flags);
39412- cpufreq_driver->boost_enabled = state;
39413+ pax_open_kernel();
39414+ *(bool *)&cpufreq_driver->boost_enabled = state;
39415+ pax_close_kernel();
39416 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39417
39418 ret = cpufreq_driver->set_boost(state);
39419 if (ret) {
39420 write_lock_irqsave(&cpufreq_driver_lock, flags);
39421- cpufreq_driver->boost_enabled = !state;
39422+ pax_open_kernel();
39423+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39424+ pax_close_kernel();
39425 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39426
39427 pr_err("%s: Cannot %s BOOST\n",
39428@@ -2457,8 +2461,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39429
39430 pr_debug("trying to register driver %s\n", driver_data->name);
39431
39432- if (driver_data->setpolicy)
39433- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39434+ if (driver_data->setpolicy) {
39435+ pax_open_kernel();
39436+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39437+ pax_close_kernel();
39438+ }
39439
39440 write_lock_irqsave(&cpufreq_driver_lock, flags);
39441 if (cpufreq_driver) {
39442@@ -2473,8 +2480,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39443 * Check if driver provides function to enable boost -
39444 * if not, use cpufreq_boost_set_sw as default
39445 */
39446- if (!cpufreq_driver->set_boost)
39447- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39448+ if (!cpufreq_driver->set_boost) {
39449+ pax_open_kernel();
39450+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39451+ pax_close_kernel();
39452+ }
39453
39454 ret = cpufreq_sysfs_create_file(&boost.attr);
39455 if (ret) {
39456diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39457index 1b44496..b80ff5e 100644
39458--- a/drivers/cpufreq/cpufreq_governor.c
39459+++ b/drivers/cpufreq/cpufreq_governor.c
39460@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39461 struct dbs_data *dbs_data;
39462 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39463 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39464- struct od_ops *od_ops = NULL;
39465+ const struct od_ops *od_ops = NULL;
39466 struct od_dbs_tuners *od_tuners = NULL;
39467 struct cs_dbs_tuners *cs_tuners = NULL;
39468 struct cpu_dbs_common_info *cpu_cdbs;
39469@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39470
39471 if ((cdata->governor == GOV_CONSERVATIVE) &&
39472 (!policy->governor->initialized)) {
39473- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39474+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39475
39476 cpufreq_register_notifier(cs_ops->notifier_block,
39477 CPUFREQ_TRANSITION_NOTIFIER);
39478@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39479
39480 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39481 (policy->governor->initialized == 1)) {
39482- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39483+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39484
39485 cpufreq_unregister_notifier(cs_ops->notifier_block,
39486 CPUFREQ_TRANSITION_NOTIFIER);
39487diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39488index cc401d1..8197340 100644
39489--- a/drivers/cpufreq/cpufreq_governor.h
39490+++ b/drivers/cpufreq/cpufreq_governor.h
39491@@ -212,7 +212,7 @@ struct common_dbs_data {
39492 void (*exit)(struct dbs_data *dbs_data);
39493
39494 /* Governor specific ops, see below */
39495- void *gov_ops;
39496+ const void *gov_ops;
39497 };
39498
39499 /* Governor Per policy data */
39500@@ -232,7 +232,7 @@ struct od_ops {
39501 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39502 unsigned int freq_next, unsigned int relation);
39503 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39504-};
39505+} __no_const;
39506
39507 struct cs_ops {
39508 struct notifier_block *notifier_block;
39509diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39510index ad3f38f..8f086cd 100644
39511--- a/drivers/cpufreq/cpufreq_ondemand.c
39512+++ b/drivers/cpufreq/cpufreq_ondemand.c
39513@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39514
39515 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39516
39517-static struct od_ops od_ops = {
39518+static struct od_ops od_ops __read_only = {
39519 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39520 .powersave_bias_target = generic_powersave_bias_target,
39521 .freq_increase = dbs_freq_increase,
39522@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39523 (struct cpufreq_policy *, unsigned int, unsigned int),
39524 unsigned int powersave_bias)
39525 {
39526- od_ops.powersave_bias_target = f;
39527+ pax_open_kernel();
39528+ *(void **)&od_ops.powersave_bias_target = f;
39529+ pax_close_kernel();
39530 od_set_powersave_bias(powersave_bias);
39531 }
39532 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39533
39534 void od_unregister_powersave_bias_handler(void)
39535 {
39536- od_ops.powersave_bias_target = generic_powersave_bias_target;
39537+ pax_open_kernel();
39538+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39539+ pax_close_kernel();
39540 od_set_powersave_bias(0);
39541 }
39542 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39543diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39544index 742eefb..e2fcfc8 100644
39545--- a/drivers/cpufreq/intel_pstate.c
39546+++ b/drivers/cpufreq/intel_pstate.c
39547@@ -133,10 +133,10 @@ struct pstate_funcs {
39548 struct cpu_defaults {
39549 struct pstate_adjust_policy pid_policy;
39550 struct pstate_funcs funcs;
39551-};
39552+} __do_const;
39553
39554 static struct pstate_adjust_policy pid_params;
39555-static struct pstate_funcs pstate_funcs;
39556+static struct pstate_funcs *pstate_funcs;
39557 static int hwp_active;
39558
39559 struct perf_limits {
39560@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39561
39562 cpu->pstate.current_pstate = pstate;
39563
39564- pstate_funcs.set(cpu, pstate);
39565+ pstate_funcs->set(cpu, pstate);
39566 }
39567
39568 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39569 {
39570- cpu->pstate.min_pstate = pstate_funcs.get_min();
39571- cpu->pstate.max_pstate = pstate_funcs.get_max();
39572- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39573- cpu->pstate.scaling = pstate_funcs.get_scaling();
39574+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39575+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39576+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39577+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39578
39579- if (pstate_funcs.get_vid)
39580- pstate_funcs.get_vid(cpu);
39581+ if (pstate_funcs->get_vid)
39582+ pstate_funcs->get_vid(cpu);
39583 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39584 }
39585
39586@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39587 rdmsrl(MSR_IA32_APERF, aperf);
39588 rdmsrl(MSR_IA32_MPERF, mperf);
39589
39590- if (!pstate_funcs.get_max() ||
39591- !pstate_funcs.get_min() ||
39592- !pstate_funcs.get_turbo())
39593+ if (!pstate_funcs->get_max() ||
39594+ !pstate_funcs->get_min() ||
39595+ !pstate_funcs->get_turbo())
39596 return -ENODEV;
39597
39598 rdmsrl(MSR_IA32_APERF, tmp);
39599@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39600 return 0;
39601 }
39602
39603-static void copy_pid_params(struct pstate_adjust_policy *policy)
39604+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39605 {
39606 pid_params.sample_rate_ms = policy->sample_rate_ms;
39607 pid_params.p_gain_pct = policy->p_gain_pct;
39608@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39609
39610 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39611 {
39612- pstate_funcs.get_max = funcs->get_max;
39613- pstate_funcs.get_min = funcs->get_min;
39614- pstate_funcs.get_turbo = funcs->get_turbo;
39615- pstate_funcs.get_scaling = funcs->get_scaling;
39616- pstate_funcs.set = funcs->set;
39617- pstate_funcs.get_vid = funcs->get_vid;
39618+ pstate_funcs = funcs;
39619 }
39620
39621 #if IS_ENABLED(CONFIG_ACPI)
39622diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39623index 529cfd9..0e28fff 100644
39624--- a/drivers/cpufreq/p4-clockmod.c
39625+++ b/drivers/cpufreq/p4-clockmod.c
39626@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39627 case 0x0F: /* Core Duo */
39628 case 0x16: /* Celeron Core */
39629 case 0x1C: /* Atom */
39630- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39631+ pax_open_kernel();
39632+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39633+ pax_close_kernel();
39634 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39635 case 0x0D: /* Pentium M (Dothan) */
39636- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39637+ pax_open_kernel();
39638+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39639+ pax_close_kernel();
39640 /* fall through */
39641 case 0x09: /* Pentium M (Banias) */
39642 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39643@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39644
39645 /* on P-4s, the TSC runs with constant frequency independent whether
39646 * throttling is active or not. */
39647- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39648+ pax_open_kernel();
39649+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39650+ pax_close_kernel();
39651
39652 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39653 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39654diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39655index 9bb42ba..b01b4a2 100644
39656--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39657+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39658@@ -18,14 +18,12 @@
39659 #include <asm/head.h>
39660 #include <asm/timer.h>
39661
39662-static struct cpufreq_driver *cpufreq_us3_driver;
39663-
39664 struct us3_freq_percpu_info {
39665 struct cpufreq_frequency_table table[4];
39666 };
39667
39668 /* Indexed by cpu number. */
39669-static struct us3_freq_percpu_info *us3_freq_table;
39670+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39671
39672 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39673 * in the Safari config register.
39674@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39675
39676 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39677 {
39678- if (cpufreq_us3_driver)
39679- us3_freq_target(policy, 0);
39680+ us3_freq_target(policy, 0);
39681
39682 return 0;
39683 }
39684
39685+static int __init us3_freq_init(void);
39686+static void __exit us3_freq_exit(void);
39687+
39688+static struct cpufreq_driver cpufreq_us3_driver = {
39689+ .init = us3_freq_cpu_init,
39690+ .verify = cpufreq_generic_frequency_table_verify,
39691+ .target_index = us3_freq_target,
39692+ .get = us3_freq_get,
39693+ .exit = us3_freq_cpu_exit,
39694+ .name = "UltraSPARC-III",
39695+
39696+};
39697+
39698 static int __init us3_freq_init(void)
39699 {
39700 unsigned long manuf, impl, ver;
39701- int ret;
39702
39703 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39704 return -ENODEV;
39705@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39706 (impl == CHEETAH_IMPL ||
39707 impl == CHEETAH_PLUS_IMPL ||
39708 impl == JAGUAR_IMPL ||
39709- impl == PANTHER_IMPL)) {
39710- struct cpufreq_driver *driver;
39711-
39712- ret = -ENOMEM;
39713- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39714- if (!driver)
39715- goto err_out;
39716-
39717- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39718- GFP_KERNEL);
39719- if (!us3_freq_table)
39720- goto err_out;
39721-
39722- driver->init = us3_freq_cpu_init;
39723- driver->verify = cpufreq_generic_frequency_table_verify;
39724- driver->target_index = us3_freq_target;
39725- driver->get = us3_freq_get;
39726- driver->exit = us3_freq_cpu_exit;
39727- strcpy(driver->name, "UltraSPARC-III");
39728-
39729- cpufreq_us3_driver = driver;
39730- ret = cpufreq_register_driver(driver);
39731- if (ret)
39732- goto err_out;
39733-
39734- return 0;
39735-
39736-err_out:
39737- if (driver) {
39738- kfree(driver);
39739- cpufreq_us3_driver = NULL;
39740- }
39741- kfree(us3_freq_table);
39742- us3_freq_table = NULL;
39743- return ret;
39744- }
39745+ impl == PANTHER_IMPL))
39746+ return cpufreq_register_driver(&cpufreq_us3_driver);
39747
39748 return -ENODEV;
39749 }
39750
39751 static void __exit us3_freq_exit(void)
39752 {
39753- if (cpufreq_us3_driver) {
39754- cpufreq_unregister_driver(cpufreq_us3_driver);
39755- kfree(cpufreq_us3_driver);
39756- cpufreq_us3_driver = NULL;
39757- kfree(us3_freq_table);
39758- us3_freq_table = NULL;
39759- }
39760+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39761 }
39762
39763 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39764diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39765index 7d4a315..21bb886 100644
39766--- a/drivers/cpufreq/speedstep-centrino.c
39767+++ b/drivers/cpufreq/speedstep-centrino.c
39768@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39769 !cpu_has(cpu, X86_FEATURE_EST))
39770 return -ENODEV;
39771
39772- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39773- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39774+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39775+ pax_open_kernel();
39776+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39777+ pax_close_kernel();
39778+ }
39779
39780 if (policy->cpu != 0)
39781 return -ENODEV;
39782diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39783index 2697e87..c32476c 100644
39784--- a/drivers/cpuidle/driver.c
39785+++ b/drivers/cpuidle/driver.c
39786@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39787
39788 static void poll_idle_init(struct cpuidle_driver *drv)
39789 {
39790- struct cpuidle_state *state = &drv->states[0];
39791+ cpuidle_state_no_const *state = &drv->states[0];
39792
39793 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39794 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39795diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39796index fb9f511..213e6cc 100644
39797--- a/drivers/cpuidle/governor.c
39798+++ b/drivers/cpuidle/governor.c
39799@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39800 mutex_lock(&cpuidle_lock);
39801 if (__cpuidle_find_governor(gov->name) == NULL) {
39802 ret = 0;
39803- list_add_tail(&gov->governor_list, &cpuidle_governors);
39804+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39805 if (!cpuidle_curr_governor ||
39806 cpuidle_curr_governor->rating < gov->rating)
39807 cpuidle_switch_governor(gov);
39808diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39809index 832a2c3..1794080 100644
39810--- a/drivers/cpuidle/sysfs.c
39811+++ b/drivers/cpuidle/sysfs.c
39812@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39813 NULL
39814 };
39815
39816-static struct attribute_group cpuidle_attr_group = {
39817+static attribute_group_no_const cpuidle_attr_group = {
39818 .attrs = cpuidle_default_attrs,
39819 .name = "cpuidle",
39820 };
39821diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39822index 8d2a772..33826c9 100644
39823--- a/drivers/crypto/hifn_795x.c
39824+++ b/drivers/crypto/hifn_795x.c
39825@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39826 MODULE_PARM_DESC(hifn_pll_ref,
39827 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39828
39829-static atomic_t hifn_dev_number;
39830+static atomic_unchecked_t hifn_dev_number;
39831
39832 #define ACRYPTO_OP_DECRYPT 0
39833 #define ACRYPTO_OP_ENCRYPT 1
39834@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39835 goto err_out_disable_pci_device;
39836
39837 snprintf(name, sizeof(name), "hifn%d",
39838- atomic_inc_return(&hifn_dev_number)-1);
39839+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39840
39841 err = pci_request_regions(pdev, name);
39842 if (err)
39843diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39844index 30b538d8..1610d75 100644
39845--- a/drivers/devfreq/devfreq.c
39846+++ b/drivers/devfreq/devfreq.c
39847@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39848 goto err_out;
39849 }
39850
39851- list_add(&governor->node, &devfreq_governor_list);
39852+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39853
39854 list_for_each_entry(devfreq, &devfreq_list, node) {
39855 int ret = 0;
39856@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39857 }
39858 }
39859
39860- list_del(&governor->node);
39861+ pax_list_del((struct list_head *)&governor->node);
39862 err_out:
39863 mutex_unlock(&devfreq_list_lock);
39864
39865diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39866index 3a2adb1..b3be9a3 100644
39867--- a/drivers/dma/sh/shdma-base.c
39868+++ b/drivers/dma/sh/shdma-base.c
39869@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39870 schan->slave_id = -EINVAL;
39871 }
39872
39873- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39874- sdev->desc_size, GFP_KERNEL);
39875+ schan->desc = kcalloc(sdev->desc_size,
39876+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39877 if (!schan->desc) {
39878 ret = -ENOMEM;
39879 goto edescalloc;
39880diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39881index aec8a84..7b45a1f 100644
39882--- a/drivers/dma/sh/shdmac.c
39883+++ b/drivers/dma/sh/shdmac.c
39884@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39885 return ret;
39886 }
39887
39888-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39889+static struct notifier_block sh_dmae_nmi_notifier = {
39890 .notifier_call = sh_dmae_nmi_handler,
39891
39892 /* Run before NMI debug handler and KGDB */
39893diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39894index 592af5f..bb1d583 100644
39895--- a/drivers/edac/edac_device.c
39896+++ b/drivers/edac/edac_device.c
39897@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39898 */
39899 int edac_device_alloc_index(void)
39900 {
39901- static atomic_t device_indexes = ATOMIC_INIT(0);
39902+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39903
39904- return atomic_inc_return(&device_indexes) - 1;
39905+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39906 }
39907 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39908
39909diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39910index 670d282..6675f4d 100644
39911--- a/drivers/edac/edac_mc_sysfs.c
39912+++ b/drivers/edac/edac_mc_sysfs.c
39913@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39914 struct dev_ch_attribute {
39915 struct device_attribute attr;
39916 int channel;
39917-};
39918+} __do_const;
39919
39920 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39921 struct dev_ch_attribute dev_attr_legacy_##_name = \
39922@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39923 }
39924
39925 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39926+ pax_open_kernel();
39927 if (mci->get_sdram_scrub_rate) {
39928- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39929- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39930+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39931+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39932 }
39933 if (mci->set_sdram_scrub_rate) {
39934- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39935- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39936+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39937+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39938 }
39939+ pax_close_kernel();
39940 err = device_create_file(&mci->dev,
39941 &dev_attr_sdram_scrub_rate);
39942 if (err) {
39943diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39944index 2cf44b4d..6dd2dc7 100644
39945--- a/drivers/edac/edac_pci.c
39946+++ b/drivers/edac/edac_pci.c
39947@@ -29,7 +29,7 @@
39948
39949 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39950 static LIST_HEAD(edac_pci_list);
39951-static atomic_t pci_indexes = ATOMIC_INIT(0);
39952+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39953
39954 /*
39955 * edac_pci_alloc_ctl_info
39956@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39957 */
39958 int edac_pci_alloc_index(void)
39959 {
39960- return atomic_inc_return(&pci_indexes) - 1;
39961+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39962 }
39963 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39964
39965diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39966index 24d877f..4e30133 100644
39967--- a/drivers/edac/edac_pci_sysfs.c
39968+++ b/drivers/edac/edac_pci_sysfs.c
39969@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39970 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39971 static int edac_pci_poll_msec = 1000; /* one second workq period */
39972
39973-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39974-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39975+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39976+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39977
39978 static struct kobject *edac_pci_top_main_kobj;
39979 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39980@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39981 void *value;
39982 ssize_t(*show) (void *, char *);
39983 ssize_t(*store) (void *, const char *, size_t);
39984-};
39985+} __do_const;
39986
39987 /* Set of show/store abstract level functions for PCI Parity object */
39988 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39989@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39990 edac_printk(KERN_CRIT, EDAC_PCI,
39991 "Signaled System Error on %s\n",
39992 pci_name(dev));
39993- atomic_inc(&pci_nonparity_count);
39994+ atomic_inc_unchecked(&pci_nonparity_count);
39995 }
39996
39997 if (status & (PCI_STATUS_PARITY)) {
39998@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39999 "Master Data Parity Error on %s\n",
40000 pci_name(dev));
40001
40002- atomic_inc(&pci_parity_count);
40003+ atomic_inc_unchecked(&pci_parity_count);
40004 }
40005
40006 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40007@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40008 "Detected Parity Error on %s\n",
40009 pci_name(dev));
40010
40011- atomic_inc(&pci_parity_count);
40012+ atomic_inc_unchecked(&pci_parity_count);
40013 }
40014 }
40015
40016@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40017 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40018 "Signaled System Error on %s\n",
40019 pci_name(dev));
40020- atomic_inc(&pci_nonparity_count);
40021+ atomic_inc_unchecked(&pci_nonparity_count);
40022 }
40023
40024 if (status & (PCI_STATUS_PARITY)) {
40025@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40026 "Master Data Parity Error on "
40027 "%s\n", pci_name(dev));
40028
40029- atomic_inc(&pci_parity_count);
40030+ atomic_inc_unchecked(&pci_parity_count);
40031 }
40032
40033 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40034@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40035 "Detected Parity Error on %s\n",
40036 pci_name(dev));
40037
40038- atomic_inc(&pci_parity_count);
40039+ atomic_inc_unchecked(&pci_parity_count);
40040 }
40041 }
40042 }
40043@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40044 if (!check_pci_errors)
40045 return;
40046
40047- before_count = atomic_read(&pci_parity_count);
40048+ before_count = atomic_read_unchecked(&pci_parity_count);
40049
40050 /* scan all PCI devices looking for a Parity Error on devices and
40051 * bridges.
40052@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40053 /* Only if operator has selected panic on PCI Error */
40054 if (edac_pci_get_panic_on_pe()) {
40055 /* If the count is different 'after' from 'before' */
40056- if (before_count != atomic_read(&pci_parity_count))
40057+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40058 panic("EDAC: PCI Parity Error");
40059 }
40060 }
40061diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40062index c2359a1..8bd119d 100644
40063--- a/drivers/edac/mce_amd.h
40064+++ b/drivers/edac/mce_amd.h
40065@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40066 bool (*mc0_mce)(u16, u8);
40067 bool (*mc1_mce)(u16, u8);
40068 bool (*mc2_mce)(u16, u8);
40069-};
40070+} __no_const;
40071
40072 void amd_report_gart_errors(bool);
40073 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40074diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40075index 57ea7f4..af06b76 100644
40076--- a/drivers/firewire/core-card.c
40077+++ b/drivers/firewire/core-card.c
40078@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40079 const struct fw_card_driver *driver,
40080 struct device *device)
40081 {
40082- static atomic_t index = ATOMIC_INIT(-1);
40083+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40084
40085- card->index = atomic_inc_return(&index);
40086+ card->index = atomic_inc_return_unchecked(&index);
40087 card->driver = driver;
40088 card->device = device;
40089 card->current_tlabel = 0;
40090@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40091
40092 void fw_core_remove_card(struct fw_card *card)
40093 {
40094- struct fw_card_driver dummy_driver = dummy_driver_template;
40095+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40096
40097 card->driver->update_phy_reg(card, 4,
40098 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40099diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40100index f9e3aee..269dbdb 100644
40101--- a/drivers/firewire/core-device.c
40102+++ b/drivers/firewire/core-device.c
40103@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40104 struct config_rom_attribute {
40105 struct device_attribute attr;
40106 u32 key;
40107-};
40108+} __do_const;
40109
40110 static ssize_t show_immediate(struct device *dev,
40111 struct device_attribute *dattr, char *buf)
40112diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40113index eb6935c..3cc2bfa 100644
40114--- a/drivers/firewire/core-transaction.c
40115+++ b/drivers/firewire/core-transaction.c
40116@@ -38,6 +38,7 @@
40117 #include <linux/timer.h>
40118 #include <linux/types.h>
40119 #include <linux/workqueue.h>
40120+#include <linux/sched.h>
40121
40122 #include <asm/byteorder.h>
40123
40124diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40125index e1480ff6..1a429bd 100644
40126--- a/drivers/firewire/core.h
40127+++ b/drivers/firewire/core.h
40128@@ -111,6 +111,7 @@ struct fw_card_driver {
40129
40130 int (*stop_iso)(struct fw_iso_context *ctx);
40131 };
40132+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40133
40134 void fw_card_initialize(struct fw_card *card,
40135 const struct fw_card_driver *driver, struct device *device);
40136diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40137index aff9018..fc87ded 100644
40138--- a/drivers/firewire/ohci.c
40139+++ b/drivers/firewire/ohci.c
40140@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
40141 be32_to_cpu(ohci->next_header));
40142 }
40143
40144+#ifndef CONFIG_GRKERNSEC
40145 if (param_remote_dma) {
40146 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40147 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40148 }
40149+#endif
40150
40151 spin_unlock_irq(&ohci->lock);
40152
40153@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40154 unsigned long flags;
40155 int n, ret = 0;
40156
40157+#ifndef CONFIG_GRKERNSEC
40158 if (param_remote_dma)
40159 return 0;
40160+#endif
40161
40162 /*
40163 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40164diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40165index 94a58a0..f5eba42 100644
40166--- a/drivers/firmware/dmi-id.c
40167+++ b/drivers/firmware/dmi-id.c
40168@@ -16,7 +16,7 @@
40169 struct dmi_device_attribute{
40170 struct device_attribute dev_attr;
40171 int field;
40172-};
40173+} __do_const;
40174 #define to_dmi_dev_attr(_dev_attr) \
40175 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40176
40177diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40178index 2eebd28b..4261350 100644
40179--- a/drivers/firmware/dmi_scan.c
40180+++ b/drivers/firmware/dmi_scan.c
40181@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40182 if (buf == NULL)
40183 return -1;
40184
40185- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40186+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40187
40188 dmi_unmap(buf);
40189 return 0;
40190diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40191index 4fd9961..52d60ce 100644
40192--- a/drivers/firmware/efi/cper.c
40193+++ b/drivers/firmware/efi/cper.c
40194@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40195 */
40196 u64 cper_next_record_id(void)
40197 {
40198- static atomic64_t seq;
40199+ static atomic64_unchecked_t seq;
40200
40201- if (!atomic64_read(&seq))
40202- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40203+ if (!atomic64_read_unchecked(&seq))
40204+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40205
40206- return atomic64_inc_return(&seq);
40207+ return atomic64_inc_return_unchecked(&seq);
40208 }
40209 EXPORT_SYMBOL_GPL(cper_next_record_id);
40210
40211diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40212index 9035c1b..aff45f8 100644
40213--- a/drivers/firmware/efi/efi.c
40214+++ b/drivers/firmware/efi/efi.c
40215@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40216 };
40217
40218 static struct efivars generic_efivars;
40219-static struct efivar_operations generic_ops;
40220+static efivar_operations_no_const generic_ops __read_only;
40221
40222 static int generic_ops_register(void)
40223 {
40224- generic_ops.get_variable = efi.get_variable;
40225- generic_ops.set_variable = efi.set_variable;
40226- generic_ops.get_next_variable = efi.get_next_variable;
40227- generic_ops.query_variable_store = efi_query_variable_store;
40228+ pax_open_kernel();
40229+ *(void **)&generic_ops.get_variable = efi.get_variable;
40230+ *(void **)&generic_ops.set_variable = efi.set_variable;
40231+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40232+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40233+ pax_close_kernel();
40234
40235 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40236 }
40237diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40238index f256ecd..387dcb1 100644
40239--- a/drivers/firmware/efi/efivars.c
40240+++ b/drivers/firmware/efi/efivars.c
40241@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40242 static int
40243 create_efivars_bin_attributes(void)
40244 {
40245- struct bin_attribute *attr;
40246+ bin_attribute_no_const *attr;
40247 int error;
40248
40249 /* new_var */
40250diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40251index 2f569aa..c95f4fb 100644
40252--- a/drivers/firmware/google/memconsole.c
40253+++ b/drivers/firmware/google/memconsole.c
40254@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40255 if (!found_memconsole())
40256 return -ENODEV;
40257
40258- memconsole_bin_attr.size = memconsole_length;
40259+ pax_open_kernel();
40260+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40261+ pax_close_kernel();
40262+
40263 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40264 }
40265
40266diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40267index 3cfcfc6..09d6f117 100644
40268--- a/drivers/gpio/gpio-em.c
40269+++ b/drivers/gpio/gpio-em.c
40270@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40271 struct em_gio_priv *p;
40272 struct resource *io[2], *irq[2];
40273 struct gpio_chip *gpio_chip;
40274- struct irq_chip *irq_chip;
40275+ irq_chip_no_const *irq_chip;
40276 const char *name = dev_name(&pdev->dev);
40277 int ret;
40278
40279diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40280index 7818cd1..1be40e5 100644
40281--- a/drivers/gpio/gpio-ich.c
40282+++ b/drivers/gpio/gpio-ich.c
40283@@ -94,7 +94,7 @@ struct ichx_desc {
40284 * this option allows driver caching written output values
40285 */
40286 bool use_outlvl_cache;
40287-};
40288+} __do_const;
40289
40290 static struct {
40291 spinlock_t lock;
40292diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40293index f476ae2..05e1bdd 100644
40294--- a/drivers/gpio/gpio-omap.c
40295+++ b/drivers/gpio/gpio-omap.c
40296@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40297 const struct omap_gpio_platform_data *pdata;
40298 struct resource *res;
40299 struct gpio_bank *bank;
40300- struct irq_chip *irqc;
40301+ irq_chip_no_const *irqc;
40302 int ret;
40303
40304 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40305diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40306index 584484e..e26ebd6 100644
40307--- a/drivers/gpio/gpio-rcar.c
40308+++ b/drivers/gpio/gpio-rcar.c
40309@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40310 struct gpio_rcar_priv *p;
40311 struct resource *io, *irq;
40312 struct gpio_chip *gpio_chip;
40313- struct irq_chip *irq_chip;
40314+ irq_chip_no_const *irq_chip;
40315 struct device *dev = &pdev->dev;
40316 const char *name = dev_name(dev);
40317 int ret;
40318diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40319index c1caa45..f0f97d2 100644
40320--- a/drivers/gpio/gpio-vr41xx.c
40321+++ b/drivers/gpio/gpio-vr41xx.c
40322@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40323 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40324 maskl, pendl, maskh, pendh);
40325
40326- atomic_inc(&irq_err_count);
40327+ atomic_inc_unchecked(&irq_err_count);
40328
40329 return -EINVAL;
40330 }
40331diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40332index 568aa2b..d1204d8 100644
40333--- a/drivers/gpio/gpiolib.c
40334+++ b/drivers/gpio/gpiolib.c
40335@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40336 }
40337
40338 if (gpiochip->irqchip) {
40339- gpiochip->irqchip->irq_request_resources = NULL;
40340- gpiochip->irqchip->irq_release_resources = NULL;
40341+ pax_open_kernel();
40342+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40343+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40344+ pax_close_kernel();
40345 gpiochip->irqchip = NULL;
40346 }
40347 }
40348@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40349 gpiochip->irqchip = NULL;
40350 return -EINVAL;
40351 }
40352- irqchip->irq_request_resources = gpiochip_irq_reqres;
40353- irqchip->irq_release_resources = gpiochip_irq_relres;
40354+
40355+ pax_open_kernel();
40356+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40357+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40358+ pax_close_kernel();
40359
40360 /*
40361 * Prepare the mapping since the irqchip shall be orthogonal to
40362diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40363index 27a37e5..b6c6c71 100644
40364--- a/drivers/gpu/drm/drm_crtc.c
40365+++ b/drivers/gpu/drm/drm_crtc.c
40366@@ -3953,7 +3953,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40367 goto done;
40368 }
40369
40370- if (copy_to_user(&enum_ptr[copied].name,
40371+ if (copy_to_user(enum_ptr[copied].name,
40372 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40373 ret = -EFAULT;
40374 goto done;
40375diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40376index 4f41377..ee33f40 100644
40377--- a/drivers/gpu/drm/drm_drv.c
40378+++ b/drivers/gpu/drm/drm_drv.c
40379@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40380
40381 drm_device_set_unplugged(dev);
40382
40383- if (dev->open_count == 0) {
40384+ if (local_read(&dev->open_count) == 0) {
40385 drm_put_dev(dev);
40386 }
40387 mutex_unlock(&drm_global_mutex);
40388diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40389index 0b9514b..6acd174 100644
40390--- a/drivers/gpu/drm/drm_fops.c
40391+++ b/drivers/gpu/drm/drm_fops.c
40392@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40393 return PTR_ERR(minor);
40394
40395 dev = minor->dev;
40396- if (!dev->open_count++)
40397+ if (local_inc_return(&dev->open_count) == 1)
40398 need_setup = 1;
40399
40400 /* share address_space across all char-devs of a single device */
40401@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40402 return 0;
40403
40404 err_undo:
40405- dev->open_count--;
40406+ local_dec(&dev->open_count);
40407 drm_minor_release(minor);
40408 return retcode;
40409 }
40410@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40411
40412 mutex_lock(&drm_global_mutex);
40413
40414- DRM_DEBUG("open_count = %d\n", dev->open_count);
40415+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40416
40417 mutex_lock(&dev->struct_mutex);
40418 list_del(&file_priv->lhead);
40419@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40420 * Begin inline drm_release
40421 */
40422
40423- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40424+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40425 task_pid_nr(current),
40426 (long)old_encode_dev(file_priv->minor->kdev->devt),
40427- dev->open_count);
40428+ local_read(&dev->open_count));
40429
40430 /* Release any auth tokens that might point to this file_priv,
40431 (do that under the drm_global_mutex) */
40432@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40433 * End inline drm_release
40434 */
40435
40436- if (!--dev->open_count) {
40437+ if (local_dec_and_test(&dev->open_count)) {
40438 retcode = drm_lastclose(dev);
40439 if (drm_device_is_unplugged(dev))
40440 drm_put_dev(dev);
40441diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40442index 3d2e91c..d31c4c9 100644
40443--- a/drivers/gpu/drm/drm_global.c
40444+++ b/drivers/gpu/drm/drm_global.c
40445@@ -36,7 +36,7 @@
40446 struct drm_global_item {
40447 struct mutex mutex;
40448 void *object;
40449- int refcount;
40450+ atomic_t refcount;
40451 };
40452
40453 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40454@@ -49,7 +49,7 @@ void drm_global_init(void)
40455 struct drm_global_item *item = &glob[i];
40456 mutex_init(&item->mutex);
40457 item->object = NULL;
40458- item->refcount = 0;
40459+ atomic_set(&item->refcount, 0);
40460 }
40461 }
40462
40463@@ -59,7 +59,7 @@ void drm_global_release(void)
40464 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40465 struct drm_global_item *item = &glob[i];
40466 BUG_ON(item->object != NULL);
40467- BUG_ON(item->refcount != 0);
40468+ BUG_ON(atomic_read(&item->refcount) != 0);
40469 }
40470 }
40471
40472@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40473 struct drm_global_item *item = &glob[ref->global_type];
40474
40475 mutex_lock(&item->mutex);
40476- if (item->refcount == 0) {
40477+ if (atomic_read(&item->refcount) == 0) {
40478 item->object = kzalloc(ref->size, GFP_KERNEL);
40479 if (unlikely(item->object == NULL)) {
40480 ret = -ENOMEM;
40481@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40482 goto out_err;
40483
40484 }
40485- ++item->refcount;
40486+ atomic_inc(&item->refcount);
40487 ref->object = item->object;
40488 mutex_unlock(&item->mutex);
40489 return 0;
40490@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40491 struct drm_global_item *item = &glob[ref->global_type];
40492
40493 mutex_lock(&item->mutex);
40494- BUG_ON(item->refcount == 0);
40495+ BUG_ON(atomic_read(&item->refcount) == 0);
40496 BUG_ON(ref->object != item->object);
40497- if (--item->refcount == 0) {
40498+ if (atomic_dec_and_test(&item->refcount)) {
40499 ref->release(ref);
40500 item->object = NULL;
40501 }
40502diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40503index 51efebd..2b70935 100644
40504--- a/drivers/gpu/drm/drm_info.c
40505+++ b/drivers/gpu/drm/drm_info.c
40506@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40507 struct drm_local_map *map;
40508 struct drm_map_list *r_list;
40509
40510- /* Hardcoded from _DRM_FRAME_BUFFER,
40511- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40512- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40513- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40514+ static const char * const types[] = {
40515+ [_DRM_FRAME_BUFFER] = "FB",
40516+ [_DRM_REGISTERS] = "REG",
40517+ [_DRM_SHM] = "SHM",
40518+ [_DRM_AGP] = "AGP",
40519+ [_DRM_SCATTER_GATHER] = "SG",
40520+ [_DRM_CONSISTENT] = "PCI"};
40521 const char *type;
40522 int i;
40523
40524@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40525 map = r_list->map;
40526 if (!map)
40527 continue;
40528- if (map->type < 0 || map->type > 5)
40529+ if (map->type >= ARRAY_SIZE(types))
40530 type = "??";
40531 else
40532 type = types[map->type];
40533diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40534index 2f4c4343..dd12cd2 100644
40535--- a/drivers/gpu/drm/drm_ioc32.c
40536+++ b/drivers/gpu/drm/drm_ioc32.c
40537@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40538 request = compat_alloc_user_space(nbytes);
40539 if (!access_ok(VERIFY_WRITE, request, nbytes))
40540 return -EFAULT;
40541- list = (struct drm_buf_desc *) (request + 1);
40542+ list = (struct drm_buf_desc __user *) (request + 1);
40543
40544 if (__put_user(count, &request->count)
40545 || __put_user(list, &request->list))
40546@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40547 request = compat_alloc_user_space(nbytes);
40548 if (!access_ok(VERIFY_WRITE, request, nbytes))
40549 return -EFAULT;
40550- list = (struct drm_buf_pub *) (request + 1);
40551+ list = (struct drm_buf_pub __user *) (request + 1);
40552
40553 if (__put_user(count, &request->count)
40554 || __put_user(list, &request->list))
40555@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40556 return 0;
40557 }
40558
40559-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40560+drm_ioctl_compat_t drm_compat_ioctls[] = {
40561 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40562 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40563 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40564@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40565 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40566 {
40567 unsigned int nr = DRM_IOCTL_NR(cmd);
40568- drm_ioctl_compat_t *fn;
40569 int ret;
40570
40571 /* Assume that ioctls without an explicit compat routine will just
40572@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40573 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40574 return drm_ioctl(filp, cmd, arg);
40575
40576- fn = drm_compat_ioctls[nr];
40577-
40578- if (fn != NULL)
40579- ret = (*fn) (filp, cmd, arg);
40580+ if (drm_compat_ioctls[nr] != NULL)
40581+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40582 else
40583 ret = drm_ioctl(filp, cmd, arg);
40584
40585diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40586index 00587a1..57a65ca 100644
40587--- a/drivers/gpu/drm/drm_ioctl.c
40588+++ b/drivers/gpu/drm/drm_ioctl.c
40589@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40590 struct drm_file *file_priv = filp->private_data;
40591 struct drm_device *dev;
40592 const struct drm_ioctl_desc *ioctl = NULL;
40593- drm_ioctl_t *func;
40594+ drm_ioctl_no_const_t func;
40595 unsigned int nr = DRM_IOCTL_NR(cmd);
40596 int retcode = -EINVAL;
40597 char stack_kdata[128];
40598diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40599index 93ec5dc..82acbaf 100644
40600--- a/drivers/gpu/drm/i810/i810_drv.h
40601+++ b/drivers/gpu/drm/i810/i810_drv.h
40602@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40603 int page_flipping;
40604
40605 wait_queue_head_t irq_queue;
40606- atomic_t irq_received;
40607- atomic_t irq_emitted;
40608+ atomic_unchecked_t irq_received;
40609+ atomic_unchecked_t irq_emitted;
40610
40611 int front_offset;
40612 } drm_i810_private_t;
40613diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40614index ecee3bc..ad5ae67 100644
40615--- a/drivers/gpu/drm/i915/i915_dma.c
40616+++ b/drivers/gpu/drm/i915/i915_dma.c
40617@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40618 * locking inversion with the driver load path. And the access here is
40619 * completely racy anyway. So don't bother with locking for now.
40620 */
40621- return dev->open_count == 0;
40622+ return local_read(&dev->open_count) == 0;
40623 }
40624
40625 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40626diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40627index 1173831..7dfb389 100644
40628--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40629+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40630@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40631 static int
40632 validate_exec_list(struct drm_device *dev,
40633 struct drm_i915_gem_exec_object2 *exec,
40634- int count)
40635+ unsigned int count)
40636 {
40637 unsigned relocs_total = 0;
40638 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40639 unsigned invalid_flags;
40640- int i;
40641+ unsigned int i;
40642
40643 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40644 if (USES_FULL_PPGTT(dev))
40645diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40646index 176de63..1ef9ac7 100644
40647--- a/drivers/gpu/drm/i915/i915_ioc32.c
40648+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40649@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40650 (unsigned long)request);
40651 }
40652
40653-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40654+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40655 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40656 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40657 [DRM_I915_GETPARAM] = compat_i915_getparam,
40658@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40659 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40660 {
40661 unsigned int nr = DRM_IOCTL_NR(cmd);
40662- drm_ioctl_compat_t *fn = NULL;
40663 int ret;
40664
40665 if (nr < DRM_COMMAND_BASE)
40666 return drm_compat_ioctl(filp, cmd, arg);
40667
40668- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40669- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40670-
40671- if (fn != NULL)
40672+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40673+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40674 ret = (*fn) (filp, cmd, arg);
40675- else
40676+ } else
40677 ret = drm_ioctl(filp, cmd, arg);
40678
40679 return ret;
40680diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40681index 791b00e..4d10235 100644
40682--- a/drivers/gpu/drm/i915/intel_display.c
40683+++ b/drivers/gpu/drm/i915/intel_display.c
40684@@ -12939,13 +12939,13 @@ struct intel_quirk {
40685 int subsystem_vendor;
40686 int subsystem_device;
40687 void (*hook)(struct drm_device *dev);
40688-};
40689+} __do_const;
40690
40691 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40692 struct intel_dmi_quirk {
40693 void (*hook)(struct drm_device *dev);
40694 const struct dmi_system_id (*dmi_id_list)[];
40695-};
40696+} __do_const;
40697
40698 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40699 {
40700@@ -12953,18 +12953,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40701 return 1;
40702 }
40703
40704-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40705+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40706 {
40707- .dmi_id_list = &(const struct dmi_system_id[]) {
40708- {
40709- .callback = intel_dmi_reverse_brightness,
40710- .ident = "NCR Corporation",
40711- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40712- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40713- },
40714- },
40715- { } /* terminating entry */
40716+ .callback = intel_dmi_reverse_brightness,
40717+ .ident = "NCR Corporation",
40718+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40719+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40720 },
40721+ },
40722+ { } /* terminating entry */
40723+};
40724+
40725+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40726+ {
40727+ .dmi_id_list = &intel_dmi_quirks_table,
40728 .hook = quirk_invert_brightness,
40729 },
40730 };
40731diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40732index b250130..98df2a4 100644
40733--- a/drivers/gpu/drm/imx/imx-drm-core.c
40734+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40735@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40736 if (imxdrm->pipes >= MAX_CRTC)
40737 return -EINVAL;
40738
40739- if (imxdrm->drm->open_count)
40740+ if (local_read(&imxdrm->drm->open_count))
40741 return -EBUSY;
40742
40743 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40744diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40745index b4a20149..219ab78 100644
40746--- a/drivers/gpu/drm/mga/mga_drv.h
40747+++ b/drivers/gpu/drm/mga/mga_drv.h
40748@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40749 u32 clear_cmd;
40750 u32 maccess;
40751
40752- atomic_t vbl_received; /**< Number of vblanks received. */
40753+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40754 wait_queue_head_t fence_queue;
40755- atomic_t last_fence_retired;
40756+ atomic_unchecked_t last_fence_retired;
40757 u32 next_fence_to_post;
40758
40759 unsigned int fb_cpp;
40760diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40761index 729bfd5..ead8823 100644
40762--- a/drivers/gpu/drm/mga/mga_ioc32.c
40763+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40764@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40765 return 0;
40766 }
40767
40768-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40769+drm_ioctl_compat_t mga_compat_ioctls[] = {
40770 [DRM_MGA_INIT] = compat_mga_init,
40771 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40772 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40773@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40774 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40775 {
40776 unsigned int nr = DRM_IOCTL_NR(cmd);
40777- drm_ioctl_compat_t *fn = NULL;
40778 int ret;
40779
40780 if (nr < DRM_COMMAND_BASE)
40781 return drm_compat_ioctl(filp, cmd, arg);
40782
40783- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40784- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40785-
40786- if (fn != NULL)
40787+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40788+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40789 ret = (*fn) (filp, cmd, arg);
40790- else
40791+ } else
40792 ret = drm_ioctl(filp, cmd, arg);
40793
40794 return ret;
40795diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40796index 1b071b8..de8601a 100644
40797--- a/drivers/gpu/drm/mga/mga_irq.c
40798+++ b/drivers/gpu/drm/mga/mga_irq.c
40799@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40800 if (crtc != 0)
40801 return 0;
40802
40803- return atomic_read(&dev_priv->vbl_received);
40804+ return atomic_read_unchecked(&dev_priv->vbl_received);
40805 }
40806
40807
40808@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40809 /* VBLANK interrupt */
40810 if (status & MGA_VLINEPEN) {
40811 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40812- atomic_inc(&dev_priv->vbl_received);
40813+ atomic_inc_unchecked(&dev_priv->vbl_received);
40814 drm_handle_vblank(dev, 0);
40815 handled = 1;
40816 }
40817@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40818 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40819 MGA_WRITE(MGA_PRIMEND, prim_end);
40820
40821- atomic_inc(&dev_priv->last_fence_retired);
40822+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40823 wake_up(&dev_priv->fence_queue);
40824 handled = 1;
40825 }
40826@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40827 * using fences.
40828 */
40829 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40830- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40831+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40832 - *sequence) <= (1 << 23)));
40833
40834 *sequence = cur_fence;
40835diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40836index 7df6acc..84bbe52 100644
40837--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40838+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40839@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40840 struct bit_table {
40841 const char id;
40842 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40843-};
40844+} __no_const;
40845
40846 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40847
40848diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40849index 8ae36f2..1147a30 100644
40850--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40851+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40852@@ -121,7 +121,6 @@ struct nouveau_drm {
40853 struct drm_global_reference mem_global_ref;
40854 struct ttm_bo_global_ref bo_global_ref;
40855 struct ttm_bo_device bdev;
40856- atomic_t validate_sequence;
40857 int (*move)(struct nouveau_channel *,
40858 struct ttm_buffer_object *,
40859 struct ttm_mem_reg *, struct ttm_mem_reg *);
40860diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40861index 462679a..88e32a7 100644
40862--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40863+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40864@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40865 unsigned long arg)
40866 {
40867 unsigned int nr = DRM_IOCTL_NR(cmd);
40868- drm_ioctl_compat_t *fn = NULL;
40869+ drm_ioctl_compat_t fn = NULL;
40870 int ret;
40871
40872 if (nr < DRM_COMMAND_BASE)
40873diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40874index 3d1cfcb..0542700 100644
40875--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40876+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40877@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40878 }
40879
40880 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40881- nouveau_vram_manager_init,
40882- nouveau_vram_manager_fini,
40883- nouveau_vram_manager_new,
40884- nouveau_vram_manager_del,
40885- nouveau_vram_manager_debug
40886+ .init = nouveau_vram_manager_init,
40887+ .takedown = nouveau_vram_manager_fini,
40888+ .get_node = nouveau_vram_manager_new,
40889+ .put_node = nouveau_vram_manager_del,
40890+ .debug = nouveau_vram_manager_debug
40891 };
40892
40893 static int
40894@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40895 }
40896
40897 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40898- nouveau_gart_manager_init,
40899- nouveau_gart_manager_fini,
40900- nouveau_gart_manager_new,
40901- nouveau_gart_manager_del,
40902- nouveau_gart_manager_debug
40903+ .init = nouveau_gart_manager_init,
40904+ .takedown = nouveau_gart_manager_fini,
40905+ .get_node = nouveau_gart_manager_new,
40906+ .put_node = nouveau_gart_manager_del,
40907+ .debug = nouveau_gart_manager_debug
40908 };
40909
40910 /*XXX*/
40911@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40912 }
40913
40914 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40915- nv04_gart_manager_init,
40916- nv04_gart_manager_fini,
40917- nv04_gart_manager_new,
40918- nv04_gart_manager_del,
40919- nv04_gart_manager_debug
40920+ .init = nv04_gart_manager_init,
40921+ .takedown = nv04_gart_manager_fini,
40922+ .get_node = nv04_gart_manager_new,
40923+ .put_node = nv04_gart_manager_del,
40924+ .debug = nv04_gart_manager_debug
40925 };
40926
40927 int
40928diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40929index c7592ec..dd45ebc 100644
40930--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40931+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40932@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40933 * locking inversion with the driver load path. And the access here is
40934 * completely racy anyway. So don't bother with locking for now.
40935 */
40936- return dev->open_count == 0;
40937+ return local_read(&dev->open_count) == 0;
40938 }
40939
40940 static const struct vga_switcheroo_client_ops
40941diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40942index 9782364..89bd954 100644
40943--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40944+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40945@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40946 int ret;
40947
40948 mutex_lock(&qdev->async_io_mutex);
40949- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40950+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40951 if (qdev->last_sent_io_cmd > irq_num) {
40952 if (intr)
40953 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40954- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40955+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40956 else
40957 ret = wait_event_timeout(qdev->io_cmd_event,
40958- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40959+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40960 /* 0 is timeout, just bail the "hw" has gone away */
40961 if (ret <= 0)
40962 goto out;
40963- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40964+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40965 }
40966 outb(val, addr);
40967 qdev->last_sent_io_cmd = irq_num + 1;
40968 if (intr)
40969 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40970- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40971+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40972 else
40973 ret = wait_event_timeout(qdev->io_cmd_event,
40974- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40975+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40976 out:
40977 if (ret > 0)
40978 ret = 0;
40979diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40980index 6911b8c..89d6867 100644
40981--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40982+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40983@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40984 struct drm_info_node *node = (struct drm_info_node *) m->private;
40985 struct qxl_device *qdev = node->minor->dev->dev_private;
40986
40987- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40988- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40989- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40990- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40991+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40992+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40993+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40994+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40995 seq_printf(m, "%d\n", qdev->irq_received_error);
40996 return 0;
40997 }
40998diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40999index 7c6cafe..460f542 100644
41000--- a/drivers/gpu/drm/qxl/qxl_drv.h
41001+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41002@@ -290,10 +290,10 @@ struct qxl_device {
41003 unsigned int last_sent_io_cmd;
41004
41005 /* interrupt handling */
41006- atomic_t irq_received;
41007- atomic_t irq_received_display;
41008- atomic_t irq_received_cursor;
41009- atomic_t irq_received_io_cmd;
41010+ atomic_unchecked_t irq_received;
41011+ atomic_unchecked_t irq_received_display;
41012+ atomic_unchecked_t irq_received_cursor;
41013+ atomic_unchecked_t irq_received_io_cmd;
41014 unsigned irq_received_error;
41015 wait_queue_head_t display_event;
41016 wait_queue_head_t cursor_event;
41017diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41018index b110883..dd06418 100644
41019--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41020+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41021@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41022
41023 /* TODO copy slow path code from i915 */
41024 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41025- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41026+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41027
41028 {
41029 struct qxl_drawable *draw = fb_cmd;
41030@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41031 struct drm_qxl_reloc reloc;
41032
41033 if (copy_from_user(&reloc,
41034- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41035+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41036 sizeof(reloc))) {
41037 ret = -EFAULT;
41038 goto out_free_bos;
41039@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41040
41041 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41042
41043- struct drm_qxl_command *commands =
41044- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41045+ struct drm_qxl_command __user *commands =
41046+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41047
41048- if (copy_from_user(&user_cmd, &commands[cmd_num],
41049+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41050 sizeof(user_cmd)))
41051 return -EFAULT;
41052
41053diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41054index 0bf1e20..42a7310 100644
41055--- a/drivers/gpu/drm/qxl/qxl_irq.c
41056+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41057@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41058 if (!pending)
41059 return IRQ_NONE;
41060
41061- atomic_inc(&qdev->irq_received);
41062+ atomic_inc_unchecked(&qdev->irq_received);
41063
41064 if (pending & QXL_INTERRUPT_DISPLAY) {
41065- atomic_inc(&qdev->irq_received_display);
41066+ atomic_inc_unchecked(&qdev->irq_received_display);
41067 wake_up_all(&qdev->display_event);
41068 qxl_queue_garbage_collect(qdev, false);
41069 }
41070 if (pending & QXL_INTERRUPT_CURSOR) {
41071- atomic_inc(&qdev->irq_received_cursor);
41072+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41073 wake_up_all(&qdev->cursor_event);
41074 }
41075 if (pending & QXL_INTERRUPT_IO_CMD) {
41076- atomic_inc(&qdev->irq_received_io_cmd);
41077+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41078 wake_up_all(&qdev->io_cmd_event);
41079 }
41080 if (pending & QXL_INTERRUPT_ERROR) {
41081@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41082 init_waitqueue_head(&qdev->io_cmd_event);
41083 INIT_WORK(&qdev->client_monitors_config_work,
41084 qxl_client_monitors_config_work_func);
41085- atomic_set(&qdev->irq_received, 0);
41086- atomic_set(&qdev->irq_received_display, 0);
41087- atomic_set(&qdev->irq_received_cursor, 0);
41088- atomic_set(&qdev->irq_received_io_cmd, 0);
41089+ atomic_set_unchecked(&qdev->irq_received, 0);
41090+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41091+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41092+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41093 qdev->irq_received_error = 0;
41094 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41095 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41096diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41097index 0cbc4c9..0e46686 100644
41098--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41099+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41100@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41101 }
41102 }
41103
41104-static struct vm_operations_struct qxl_ttm_vm_ops;
41105+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41106 static const struct vm_operations_struct *ttm_vm_ops;
41107
41108 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41109@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41110 return r;
41111 if (unlikely(ttm_vm_ops == NULL)) {
41112 ttm_vm_ops = vma->vm_ops;
41113+ pax_open_kernel();
41114 qxl_ttm_vm_ops = *ttm_vm_ops;
41115 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41116+ pax_close_kernel();
41117 }
41118 vma->vm_ops = &qxl_ttm_vm_ops;
41119 return 0;
41120@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41121 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41122 {
41123 #if defined(CONFIG_DEBUG_FS)
41124- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41125- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41126- unsigned i;
41127+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41128+ {
41129+ .name = "qxl_mem_mm",
41130+ .show = &qxl_mm_dump_table,
41131+ },
41132+ {
41133+ .name = "qxl_surf_mm",
41134+ .show = &qxl_mm_dump_table,
41135+ }
41136+ };
41137
41138- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41139- if (i == 0)
41140- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41141- else
41142- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41143- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41144- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41145- qxl_mem_types_list[i].driver_features = 0;
41146- if (i == 0)
41147- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41148- else
41149- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41150+ pax_open_kernel();
41151+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41152+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41153+ pax_close_kernel();
41154
41155- }
41156- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41157+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41158 #else
41159 return 0;
41160 #endif
41161diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41162index 2c45ac9..5d740f8 100644
41163--- a/drivers/gpu/drm/r128/r128_cce.c
41164+++ b/drivers/gpu/drm/r128/r128_cce.c
41165@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41166
41167 /* GH: Simple idle check.
41168 */
41169- atomic_set(&dev_priv->idle_count, 0);
41170+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41171
41172 /* We don't support anything other than bus-mastering ring mode,
41173 * but the ring can be in either AGP or PCI space for the ring
41174diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41175index 723e5d6..102dbaf 100644
41176--- a/drivers/gpu/drm/r128/r128_drv.h
41177+++ b/drivers/gpu/drm/r128/r128_drv.h
41178@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41179 int is_pci;
41180 unsigned long cce_buffers_offset;
41181
41182- atomic_t idle_count;
41183+ atomic_unchecked_t idle_count;
41184
41185 int page_flipping;
41186 int current_page;
41187 u32 crtc_offset;
41188 u32 crtc_offset_cntl;
41189
41190- atomic_t vbl_received;
41191+ atomic_unchecked_t vbl_received;
41192
41193 u32 color_fmt;
41194 unsigned int front_offset;
41195diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41196index 663f38c..c689495 100644
41197--- a/drivers/gpu/drm/r128/r128_ioc32.c
41198+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41199@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41200 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41201 }
41202
41203-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41204+drm_ioctl_compat_t r128_compat_ioctls[] = {
41205 [DRM_R128_INIT] = compat_r128_init,
41206 [DRM_R128_DEPTH] = compat_r128_depth,
41207 [DRM_R128_STIPPLE] = compat_r128_stipple,
41208@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41209 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41210 {
41211 unsigned int nr = DRM_IOCTL_NR(cmd);
41212- drm_ioctl_compat_t *fn = NULL;
41213 int ret;
41214
41215 if (nr < DRM_COMMAND_BASE)
41216 return drm_compat_ioctl(filp, cmd, arg);
41217
41218- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41219- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41220-
41221- if (fn != NULL)
41222+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41223+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41224 ret = (*fn) (filp, cmd, arg);
41225- else
41226+ } else
41227 ret = drm_ioctl(filp, cmd, arg);
41228
41229 return ret;
41230diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41231index c2ae496..30b5993 100644
41232--- a/drivers/gpu/drm/r128/r128_irq.c
41233+++ b/drivers/gpu/drm/r128/r128_irq.c
41234@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41235 if (crtc != 0)
41236 return 0;
41237
41238- return atomic_read(&dev_priv->vbl_received);
41239+ return atomic_read_unchecked(&dev_priv->vbl_received);
41240 }
41241
41242 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41243@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41244 /* VBLANK interrupt */
41245 if (status & R128_CRTC_VBLANK_INT) {
41246 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41247- atomic_inc(&dev_priv->vbl_received);
41248+ atomic_inc_unchecked(&dev_priv->vbl_received);
41249 drm_handle_vblank(dev, 0);
41250 return IRQ_HANDLED;
41251 }
41252diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41253index 8fd2d9f..18c9660 100644
41254--- a/drivers/gpu/drm/r128/r128_state.c
41255+++ b/drivers/gpu/drm/r128/r128_state.c
41256@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41257
41258 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41259 {
41260- if (atomic_read(&dev_priv->idle_count) == 0)
41261+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41262 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41263 else
41264- atomic_set(&dev_priv->idle_count, 0);
41265+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41266 }
41267
41268 #endif
41269diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41270index b928c17..e5d9400 100644
41271--- a/drivers/gpu/drm/radeon/mkregtable.c
41272+++ b/drivers/gpu/drm/radeon/mkregtable.c
41273@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41274 regex_t mask_rex;
41275 regmatch_t match[4];
41276 char buf[1024];
41277- size_t end;
41278+ long end;
41279 int len;
41280 int done = 0;
41281 int r;
41282 unsigned o;
41283 struct offset *offset;
41284 char last_reg_s[10];
41285- int last_reg;
41286+ unsigned long last_reg;
41287
41288 if (regcomp
41289 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41290diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41291index bd7519f..e1c2cd95 100644
41292--- a/drivers/gpu/drm/radeon/radeon_device.c
41293+++ b/drivers/gpu/drm/radeon/radeon_device.c
41294@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41295 * locking inversion with the driver load path. And the access here is
41296 * completely racy anyway. So don't bother with locking for now.
41297 */
41298- return dev->open_count == 0;
41299+ return local_read(&dev->open_count) == 0;
41300 }
41301
41302 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41303diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41304index 46bd393..6ae4719 100644
41305--- a/drivers/gpu/drm/radeon/radeon_drv.h
41306+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41307@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41308
41309 /* SW interrupt */
41310 wait_queue_head_t swi_queue;
41311- atomic_t swi_emitted;
41312+ atomic_unchecked_t swi_emitted;
41313 int vblank_crtc;
41314 uint32_t irq_enable_reg;
41315 uint32_t r500_disp_irq_reg;
41316diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41317index 0b98ea1..0881827 100644
41318--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41319+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41320@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41321 request = compat_alloc_user_space(sizeof(*request));
41322 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41323 || __put_user(req32.param, &request->param)
41324- || __put_user((void __user *)(unsigned long)req32.value,
41325+ || __put_user((unsigned long)req32.value,
41326 &request->value))
41327 return -EFAULT;
41328
41329@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41330 #define compat_radeon_cp_setparam NULL
41331 #endif /* X86_64 || IA64 */
41332
41333-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41334+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41335 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41336 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41337 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41338@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41339 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41340 {
41341 unsigned int nr = DRM_IOCTL_NR(cmd);
41342- drm_ioctl_compat_t *fn = NULL;
41343 int ret;
41344
41345 if (nr < DRM_COMMAND_BASE)
41346 return drm_compat_ioctl(filp, cmd, arg);
41347
41348- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41349- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41350-
41351- if (fn != NULL)
41352+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41353+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41354 ret = (*fn) (filp, cmd, arg);
41355- else
41356+ } else
41357 ret = drm_ioctl(filp, cmd, arg);
41358
41359 return ret;
41360diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41361index 244b19b..c19226d 100644
41362--- a/drivers/gpu/drm/radeon/radeon_irq.c
41363+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41364@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41365 unsigned int ret;
41366 RING_LOCALS;
41367
41368- atomic_inc(&dev_priv->swi_emitted);
41369- ret = atomic_read(&dev_priv->swi_emitted);
41370+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41371+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41372
41373 BEGIN_RING(4);
41374 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41375@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41376 drm_radeon_private_t *dev_priv =
41377 (drm_radeon_private_t *) dev->dev_private;
41378
41379- atomic_set(&dev_priv->swi_emitted, 0);
41380+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41381 init_waitqueue_head(&dev_priv->swi_queue);
41382
41383 dev->max_vblank_count = 0x001fffff;
41384diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41385index 15aee72..cda326e 100644
41386--- a/drivers/gpu/drm/radeon/radeon_state.c
41387+++ b/drivers/gpu/drm/radeon/radeon_state.c
41388@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41389 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41390 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41391
41392- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41393+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41394 sarea_priv->nbox * sizeof(depth_boxes[0])))
41395 return -EFAULT;
41396
41397@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41398 {
41399 drm_radeon_private_t *dev_priv = dev->dev_private;
41400 drm_radeon_getparam_t *param = data;
41401- int value;
41402+ int value = 0;
41403
41404 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41405
41406diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41407index b292aca..4e338b5 100644
41408--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41409+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41410@@ -963,7 +963,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41411 man->size = size >> PAGE_SHIFT;
41412 }
41413
41414-static struct vm_operations_struct radeon_ttm_vm_ops;
41415+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41416 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41417
41418 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41419@@ -1004,8 +1004,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41420 }
41421 if (unlikely(ttm_vm_ops == NULL)) {
41422 ttm_vm_ops = vma->vm_ops;
41423+ pax_open_kernel();
41424 radeon_ttm_vm_ops = *ttm_vm_ops;
41425 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41426+ pax_close_kernel();
41427 }
41428 vma->vm_ops = &radeon_ttm_vm_ops;
41429 return 0;
41430diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41431index 978993f..e36e50e 100644
41432--- a/drivers/gpu/drm/tegra/dc.c
41433+++ b/drivers/gpu/drm/tegra/dc.c
41434@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41435 }
41436
41437 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41438- dc->debugfs_files[i].data = dc;
41439+ *(void **)&dc->debugfs_files[i].data = dc;
41440
41441 err = drm_debugfs_create_files(dc->debugfs_files,
41442 ARRAY_SIZE(debugfs_files),
41443diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41444index 33f67fd..55ee9761 100644
41445--- a/drivers/gpu/drm/tegra/dsi.c
41446+++ b/drivers/gpu/drm/tegra/dsi.c
41447@@ -39,7 +39,7 @@ struct tegra_dsi {
41448 struct clk *clk_lp;
41449 struct clk *clk;
41450
41451- struct drm_info_list *debugfs_files;
41452+ drm_info_list_no_const *debugfs_files;
41453 struct drm_minor *minor;
41454 struct dentry *debugfs;
41455
41456diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41457index ffe2654..03c7b1c 100644
41458--- a/drivers/gpu/drm/tegra/hdmi.c
41459+++ b/drivers/gpu/drm/tegra/hdmi.c
41460@@ -60,7 +60,7 @@ struct tegra_hdmi {
41461 bool stereo;
41462 bool dvi;
41463
41464- struct drm_info_list *debugfs_files;
41465+ drm_info_list_no_const *debugfs_files;
41466 struct drm_minor *minor;
41467 struct dentry *debugfs;
41468 };
41469diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41470index aa0bd054..aea6a01 100644
41471--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41472+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41473@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41474 }
41475
41476 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41477- ttm_bo_man_init,
41478- ttm_bo_man_takedown,
41479- ttm_bo_man_get_node,
41480- ttm_bo_man_put_node,
41481- ttm_bo_man_debug
41482+ .init = ttm_bo_man_init,
41483+ .takedown = ttm_bo_man_takedown,
41484+ .get_node = ttm_bo_man_get_node,
41485+ .put_node = ttm_bo_man_put_node,
41486+ .debug = ttm_bo_man_debug
41487 };
41488 EXPORT_SYMBOL(ttm_bo_manager_func);
41489diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41490index a1803fb..c53f6b0 100644
41491--- a/drivers/gpu/drm/ttm/ttm_memory.c
41492+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41493@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41494 zone->glob = glob;
41495 glob->zone_kernel = zone;
41496 ret = kobject_init_and_add(
41497- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41498+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41499 if (unlikely(ret != 0)) {
41500 kobject_put(&zone->kobj);
41501 return ret;
41502@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41503 zone->glob = glob;
41504 glob->zone_dma32 = zone;
41505 ret = kobject_init_and_add(
41506- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41507+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41508 if (unlikely(ret != 0)) {
41509 kobject_put(&zone->kobj);
41510 return ret;
41511diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41512index 025c429..314062f 100644
41513--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41514+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41515@@ -54,7 +54,7 @@
41516
41517 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41518 #define SMALL_ALLOCATION 16
41519-#define FREE_ALL_PAGES (~0U)
41520+#define FREE_ALL_PAGES (~0UL)
41521 /* times are in msecs */
41522 #define PAGE_FREE_INTERVAL 1000
41523
41524@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41525 * @free_all: If set to true will free all pages in pool
41526 * @use_static: Safe to use static buffer
41527 **/
41528-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41529+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41530 bool use_static)
41531 {
41532 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41533 unsigned long irq_flags;
41534 struct page *p;
41535 struct page **pages_to_free;
41536- unsigned freed_pages = 0,
41537- npages_to_free = nr_free;
41538+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41539
41540 if (NUM_PAGES_TO_ALLOC < nr_free)
41541 npages_to_free = NUM_PAGES_TO_ALLOC;
41542@@ -371,7 +370,8 @@ restart:
41543 __list_del(&p->lru, &pool->list);
41544
41545 ttm_pool_update_free_locked(pool, freed_pages);
41546- nr_free -= freed_pages;
41547+ if (likely(nr_free != FREE_ALL_PAGES))
41548+ nr_free -= freed_pages;
41549 }
41550
41551 spin_unlock_irqrestore(&pool->lock, irq_flags);
41552@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41553 unsigned i;
41554 unsigned pool_offset;
41555 struct ttm_page_pool *pool;
41556- int shrink_pages = sc->nr_to_scan;
41557+ unsigned long shrink_pages = sc->nr_to_scan;
41558 unsigned long freed = 0;
41559
41560 if (!mutex_trylock(&lock))
41561@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41562 pool_offset = ++start_pool % NUM_POOLS;
41563 /* select start pool in round robin fashion */
41564 for (i = 0; i < NUM_POOLS; ++i) {
41565- unsigned nr_free = shrink_pages;
41566+ unsigned long nr_free = shrink_pages;
41567 if (shrink_pages == 0)
41568 break;
41569 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41570@@ -673,7 +673,7 @@ out:
41571 }
41572
41573 /* Put all pages in pages list to correct pool to wait for reuse */
41574-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41575+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41576 enum ttm_caching_state cstate)
41577 {
41578 unsigned long irq_flags;
41579@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41580 struct list_head plist;
41581 struct page *p = NULL;
41582 gfp_t gfp_flags = GFP_USER;
41583- unsigned count;
41584+ unsigned long count;
41585 int r;
41586
41587 /* set zero flag for page allocation if required */
41588diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41589index 01e1d27..aaa018a 100644
41590--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41591+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41592@@ -56,7 +56,7 @@
41593
41594 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41595 #define SMALL_ALLOCATION 4
41596-#define FREE_ALL_PAGES (~0U)
41597+#define FREE_ALL_PAGES (~0UL)
41598 /* times are in msecs */
41599 #define IS_UNDEFINED (0)
41600 #define IS_WC (1<<1)
41601@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41602 * @nr_free: If set to true will free all pages in pool
41603 * @use_static: Safe to use static buffer
41604 **/
41605-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41606+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41607 bool use_static)
41608 {
41609 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41610@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41611 struct dma_page *dma_p, *tmp;
41612 struct page **pages_to_free;
41613 struct list_head d_pages;
41614- unsigned freed_pages = 0,
41615- npages_to_free = nr_free;
41616+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41617
41618 if (NUM_PAGES_TO_ALLOC < nr_free)
41619 npages_to_free = NUM_PAGES_TO_ALLOC;
41620@@ -499,7 +498,8 @@ restart:
41621 /* remove range of pages from the pool */
41622 if (freed_pages) {
41623 ttm_pool_update_free_locked(pool, freed_pages);
41624- nr_free -= freed_pages;
41625+ if (likely(nr_free != FREE_ALL_PAGES))
41626+ nr_free -= freed_pages;
41627 }
41628
41629 spin_unlock_irqrestore(&pool->lock, irq_flags);
41630@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41631 struct dma_page *d_page, *next;
41632 enum pool_type type;
41633 bool is_cached = false;
41634- unsigned count = 0, i, npages = 0;
41635+ unsigned long count = 0, i, npages = 0;
41636 unsigned long irq_flags;
41637
41638 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41639@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41640 static unsigned start_pool;
41641 unsigned idx = 0;
41642 unsigned pool_offset;
41643- unsigned shrink_pages = sc->nr_to_scan;
41644+ unsigned long shrink_pages = sc->nr_to_scan;
41645 struct device_pools *p;
41646 unsigned long freed = 0;
41647
41648@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41649 goto out;
41650 pool_offset = ++start_pool % _manager->npools;
41651 list_for_each_entry(p, &_manager->pools, pools) {
41652- unsigned nr_free;
41653+ unsigned long nr_free;
41654
41655 if (!p->dev)
41656 continue;
41657@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41658 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41659 freed += nr_free - shrink_pages;
41660
41661- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41662+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41663 p->pool->dev_name, p->pool->name, current->pid,
41664 nr_free, shrink_pages);
41665 }
41666diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41667index 8cbcb45..a4d9cf7 100644
41668--- a/drivers/gpu/drm/udl/udl_fb.c
41669+++ b/drivers/gpu/drm/udl/udl_fb.c
41670@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41671 fb_deferred_io_cleanup(info);
41672 kfree(info->fbdefio);
41673 info->fbdefio = NULL;
41674- info->fbops->fb_mmap = udl_fb_mmap;
41675 }
41676
41677 pr_warn("released /dev/fb%d user=%d count=%d\n",
41678diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41679index ef8c500..01030c8 100644
41680--- a/drivers/gpu/drm/via/via_drv.h
41681+++ b/drivers/gpu/drm/via/via_drv.h
41682@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41683 typedef uint32_t maskarray_t[5];
41684
41685 typedef struct drm_via_irq {
41686- atomic_t irq_received;
41687+ atomic_unchecked_t irq_received;
41688 uint32_t pending_mask;
41689 uint32_t enable_mask;
41690 wait_queue_head_t irq_queue;
41691@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41692 struct timeval last_vblank;
41693 int last_vblank_valid;
41694 unsigned usec_per_vblank;
41695- atomic_t vbl_received;
41696+ atomic_unchecked_t vbl_received;
41697 drm_via_state_t hc_state;
41698 char pci_buf[VIA_PCI_BUF_SIZE];
41699 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41700diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41701index 1319433..a993b0c 100644
41702--- a/drivers/gpu/drm/via/via_irq.c
41703+++ b/drivers/gpu/drm/via/via_irq.c
41704@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41705 if (crtc != 0)
41706 return 0;
41707
41708- return atomic_read(&dev_priv->vbl_received);
41709+ return atomic_read_unchecked(&dev_priv->vbl_received);
41710 }
41711
41712 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41713@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41714
41715 status = VIA_READ(VIA_REG_INTERRUPT);
41716 if (status & VIA_IRQ_VBLANK_PENDING) {
41717- atomic_inc(&dev_priv->vbl_received);
41718- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41719+ atomic_inc_unchecked(&dev_priv->vbl_received);
41720+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41721 do_gettimeofday(&cur_vblank);
41722 if (dev_priv->last_vblank_valid) {
41723 dev_priv->usec_per_vblank =
41724@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41725 dev_priv->last_vblank = cur_vblank;
41726 dev_priv->last_vblank_valid = 1;
41727 }
41728- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41729+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41730 DRM_DEBUG("US per vblank is: %u\n",
41731 dev_priv->usec_per_vblank);
41732 }
41733@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41734
41735 for (i = 0; i < dev_priv->num_irqs; ++i) {
41736 if (status & cur_irq->pending_mask) {
41737- atomic_inc(&cur_irq->irq_received);
41738+ atomic_inc_unchecked(&cur_irq->irq_received);
41739 wake_up(&cur_irq->irq_queue);
41740 handled = 1;
41741 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41742@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41743 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41744 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41745 masks[irq][4]));
41746- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41747+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41748 } else {
41749 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41750 (((cur_irq_sequence =
41751- atomic_read(&cur_irq->irq_received)) -
41752+ atomic_read_unchecked(&cur_irq->irq_received)) -
41753 *sequence) <= (1 << 23)));
41754 }
41755 *sequence = cur_irq_sequence;
41756@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41757 }
41758
41759 for (i = 0; i < dev_priv->num_irqs; ++i) {
41760- atomic_set(&cur_irq->irq_received, 0);
41761+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41762 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41763 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41764 init_waitqueue_head(&cur_irq->irq_queue);
41765@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41766 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41767 case VIA_IRQ_RELATIVE:
41768 irqwait->request.sequence +=
41769- atomic_read(&cur_irq->irq_received);
41770+ atomic_read_unchecked(&cur_irq->irq_received);
41771 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41772 case VIA_IRQ_ABSOLUTE:
41773 break;
41774diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41775index d26a6da..5fa41ed 100644
41776--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41777+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41778@@ -447,7 +447,7 @@ struct vmw_private {
41779 * Fencing and IRQs.
41780 */
41781
41782- atomic_t marker_seq;
41783+ atomic_unchecked_t marker_seq;
41784 wait_queue_head_t fence_queue;
41785 wait_queue_head_t fifo_queue;
41786 spinlock_t waiter_lock;
41787diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41788index 39f2b03..d1b0a64 100644
41789--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41790+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41791@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41792 (unsigned int) min,
41793 (unsigned int) fifo->capabilities);
41794
41795- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41796+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41797 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41798 vmw_marker_queue_init(&fifo->marker_queue);
41799 return vmw_fifo_send_fence(dev_priv, &dummy);
41800@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41801 if (reserveable)
41802 iowrite32(bytes, fifo_mem +
41803 SVGA_FIFO_RESERVED);
41804- return fifo_mem + (next_cmd >> 2);
41805+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41806 } else {
41807 need_bounce = true;
41808 }
41809@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41810
41811 fm = vmw_fifo_reserve(dev_priv, bytes);
41812 if (unlikely(fm == NULL)) {
41813- *seqno = atomic_read(&dev_priv->marker_seq);
41814+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41815 ret = -ENOMEM;
41816 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41817 false, 3*HZ);
41818@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41819 }
41820
41821 do {
41822- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41823+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41824 } while (*seqno == 0);
41825
41826 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41827diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41828index 170b61b..fec7348 100644
41829--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41830+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41831@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41832 }
41833
41834 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41835- vmw_gmrid_man_init,
41836- vmw_gmrid_man_takedown,
41837- vmw_gmrid_man_get_node,
41838- vmw_gmrid_man_put_node,
41839- vmw_gmrid_man_debug
41840+ .init = vmw_gmrid_man_init,
41841+ .takedown = vmw_gmrid_man_takedown,
41842+ .get_node = vmw_gmrid_man_get_node,
41843+ .put_node = vmw_gmrid_man_put_node,
41844+ .debug = vmw_gmrid_man_debug
41845 };
41846diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41847index 69c8ce2..cacb0ab 100644
41848--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41849+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41850@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41851 int ret;
41852
41853 num_clips = arg->num_clips;
41854- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41855+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41856
41857 if (unlikely(num_clips == 0))
41858 return 0;
41859@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41860 int ret;
41861
41862 num_clips = arg->num_clips;
41863- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41864+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41865
41866 if (unlikely(num_clips == 0))
41867 return 0;
41868diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41869index 9fe9827..0aa2fc0 100644
41870--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41871+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41872@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41873 * emitted. Then the fence is stale and signaled.
41874 */
41875
41876- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41877+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41878 > VMW_FENCE_WRAP);
41879
41880 return ret;
41881@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41882
41883 if (fifo_idle)
41884 down_read(&fifo_state->rwsem);
41885- signal_seq = atomic_read(&dev_priv->marker_seq);
41886+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41887 ret = 0;
41888
41889 for (;;) {
41890diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41891index efd1ffd..0ae13ca 100644
41892--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41893+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41894@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41895 while (!vmw_lag_lt(queue, us)) {
41896 spin_lock(&queue->lock);
41897 if (list_empty(&queue->head))
41898- seqno = atomic_read(&dev_priv->marker_seq);
41899+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41900 else {
41901 marker = list_first_entry(&queue->head,
41902 struct vmw_marker, head);
41903diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41904index 37ac7b5..d52a5c9 100644
41905--- a/drivers/gpu/vga/vga_switcheroo.c
41906+++ b/drivers/gpu/vga/vga_switcheroo.c
41907@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41908
41909 /* this version is for the case where the power switch is separate
41910 to the device being powered down. */
41911-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41912+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41913 {
41914 /* copy over all the bus versions */
41915 if (dev->bus && dev->bus->pm) {
41916@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41917 return ret;
41918 }
41919
41920-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41921+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41922 {
41923 /* copy over all the bus versions */
41924 if (dev->bus && dev->bus->pm) {
41925diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41926index 8b63879..a5a5e72 100644
41927--- a/drivers/hid/hid-core.c
41928+++ b/drivers/hid/hid-core.c
41929@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41930
41931 int hid_add_device(struct hid_device *hdev)
41932 {
41933- static atomic_t id = ATOMIC_INIT(0);
41934+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41935 int ret;
41936
41937 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41938@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41939 /* XXX hack, any other cleaner solution after the driver core
41940 * is converted to allow more than 20 bytes as the device name? */
41941 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41942- hdev->vendor, hdev->product, atomic_inc_return(&id));
41943+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41944
41945 hid_debug_register(hdev, dev_name(&hdev->dev));
41946 ret = device_add(&hdev->dev);
41947diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41948index 5bc6d80..e47b55a 100644
41949--- a/drivers/hid/hid-logitech-dj.c
41950+++ b/drivers/hid/hid-logitech-dj.c
41951@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41952 * case we forward it to the correct hid device (via hid_input_report()
41953 * ) and return 1 so hid-core does not anything else with it.
41954 */
41955+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41956+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41957+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41958+ __func__, dj_report->device_index);
41959+ return false;
41960+ }
41961
41962 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41963 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41964diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41965index c13fb5b..55a3802 100644
41966--- a/drivers/hid/hid-wiimote-debug.c
41967+++ b/drivers/hid/hid-wiimote-debug.c
41968@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41969 else if (size == 0)
41970 return -EIO;
41971
41972- if (copy_to_user(u, buf, size))
41973+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41974 return -EFAULT;
41975
41976 *off += size;
41977diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41978index 433f72a..2926005 100644
41979--- a/drivers/hv/channel.c
41980+++ b/drivers/hv/channel.c
41981@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41982 unsigned long flags;
41983 int ret = 0;
41984
41985- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41986- atomic_inc(&vmbus_connection.next_gpadl_handle);
41987+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41988+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41989
41990 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41991 if (ret)
41992diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41993index 3e4235c..877d0e5 100644
41994--- a/drivers/hv/hv.c
41995+++ b/drivers/hv/hv.c
41996@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41997 u64 output_address = (output) ? virt_to_phys(output) : 0;
41998 u32 output_address_hi = output_address >> 32;
41999 u32 output_address_lo = output_address & 0xFFFFFFFF;
42000- void *hypercall_page = hv_context.hypercall_page;
42001+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42002
42003 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42004 "=a"(hv_status_lo) : "d" (control_hi),
42005@@ -156,7 +156,7 @@ int hv_init(void)
42006 /* See if the hypercall page is already set */
42007 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42008
42009- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42010+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42011
42012 if (!virtaddr)
42013 goto cleanup;
42014diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42015index b958ded..b2452bb 100644
42016--- a/drivers/hv/hv_balloon.c
42017+++ b/drivers/hv/hv_balloon.c
42018@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42019
42020 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42021 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42022-static atomic_t trans_id = ATOMIC_INIT(0);
42023+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42024
42025 static int dm_ring_size = (5 * PAGE_SIZE);
42026
42027@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
42028 pr_info("Memory hot add failed\n");
42029
42030 dm->state = DM_INITIALIZED;
42031- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42032+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42033 vmbus_sendpacket(dm->dev->channel, &resp,
42034 sizeof(struct dm_hot_add_response),
42035 (unsigned long)NULL,
42036@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
42037 memset(&status, 0, sizeof(struct dm_status));
42038 status.hdr.type = DM_STATUS_REPORT;
42039 status.hdr.size = sizeof(struct dm_status);
42040- status.hdr.trans_id = atomic_inc_return(&trans_id);
42041+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42042
42043 /*
42044 * The host expects the guest to report free memory.
42045@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
42046 * send the status. This can happen if we were interrupted
42047 * after we picked our transaction ID.
42048 */
42049- if (status.hdr.trans_id != atomic_read(&trans_id))
42050+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42051 return;
42052
42053 /*
42054@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
42055 */
42056
42057 do {
42058- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42059+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42060 ret = vmbus_sendpacket(dm_device.dev->channel,
42061 bl_resp,
42062 bl_resp->hdr.size,
42063@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42064
42065 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42066 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42067- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42068+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42069 resp.hdr.size = sizeof(struct dm_unballoon_response);
42070
42071 vmbus_sendpacket(dm_device.dev->channel, &resp,
42072@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42073 memset(&version_req, 0, sizeof(struct dm_version_request));
42074 version_req.hdr.type = DM_VERSION_REQUEST;
42075 version_req.hdr.size = sizeof(struct dm_version_request);
42076- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42077+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42078 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42079 version_req.is_last_attempt = 1;
42080
42081@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
42082 memset(&version_req, 0, sizeof(struct dm_version_request));
42083 version_req.hdr.type = DM_VERSION_REQUEST;
42084 version_req.hdr.size = sizeof(struct dm_version_request);
42085- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42086+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42087 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42088 version_req.is_last_attempt = 0;
42089
42090@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
42091 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42092 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42093 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42094- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42095+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42096
42097 cap_msg.caps.cap_bits.balloon = 1;
42098 cap_msg.caps.cap_bits.hot_add = 1;
42099diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42100index c386d8d..d6004c4 100644
42101--- a/drivers/hv/hyperv_vmbus.h
42102+++ b/drivers/hv/hyperv_vmbus.h
42103@@ -611,7 +611,7 @@ enum vmbus_connect_state {
42104 struct vmbus_connection {
42105 enum vmbus_connect_state conn_state;
42106
42107- atomic_t next_gpadl_handle;
42108+ atomic_unchecked_t next_gpadl_handle;
42109
42110 /*
42111 * Represents channel interrupts. Each bit position represents a
42112diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42113index 4d6b269..2e23b86 100644
42114--- a/drivers/hv/vmbus_drv.c
42115+++ b/drivers/hv/vmbus_drv.c
42116@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42117 {
42118 int ret = 0;
42119
42120- static atomic_t device_num = ATOMIC_INIT(0);
42121+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42122
42123 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42124- atomic_inc_return(&device_num));
42125+ atomic_inc_return_unchecked(&device_num));
42126
42127 child_device_obj->device.bus = &hv_bus;
42128 child_device_obj->device.parent = &hv_acpi_dev->dev;
42129diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42130index 579bdf9..75118b5 100644
42131--- a/drivers/hwmon/acpi_power_meter.c
42132+++ b/drivers/hwmon/acpi_power_meter.c
42133@@ -116,7 +116,7 @@ struct sensor_template {
42134 struct device_attribute *devattr,
42135 const char *buf, size_t count);
42136 int index;
42137-};
42138+} __do_const;
42139
42140 /* Averaging interval */
42141 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42142@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42143 struct sensor_template *attrs)
42144 {
42145 struct device *dev = &resource->acpi_dev->dev;
42146- struct sensor_device_attribute *sensors =
42147+ sensor_device_attribute_no_const *sensors =
42148 &resource->sensors[resource->num_sensors];
42149 int res = 0;
42150
42151diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42152index 0af63da..05a183a 100644
42153--- a/drivers/hwmon/applesmc.c
42154+++ b/drivers/hwmon/applesmc.c
42155@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42156 {
42157 struct applesmc_node_group *grp;
42158 struct applesmc_dev_attr *node;
42159- struct attribute *attr;
42160+ attribute_no_const *attr;
42161 int ret, i;
42162
42163 for (grp = groups; grp->format; grp++) {
42164diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42165index cccef87..06ce8ec 100644
42166--- a/drivers/hwmon/asus_atk0110.c
42167+++ b/drivers/hwmon/asus_atk0110.c
42168@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42169 struct atk_sensor_data {
42170 struct list_head list;
42171 struct atk_data *data;
42172- struct device_attribute label_attr;
42173- struct device_attribute input_attr;
42174- struct device_attribute limit1_attr;
42175- struct device_attribute limit2_attr;
42176+ device_attribute_no_const label_attr;
42177+ device_attribute_no_const input_attr;
42178+ device_attribute_no_const limit1_attr;
42179+ device_attribute_no_const limit2_attr;
42180 char label_attr_name[ATTR_NAME_SIZE];
42181 char input_attr_name[ATTR_NAME_SIZE];
42182 char limit1_attr_name[ATTR_NAME_SIZE];
42183@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42184 static struct device_attribute atk_name_attr =
42185 __ATTR(name, 0444, atk_name_show, NULL);
42186
42187-static void atk_init_attribute(struct device_attribute *attr, char *name,
42188+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42189 sysfs_show_func show)
42190 {
42191 sysfs_attr_init(&attr->attr);
42192diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42193index 5b7fec8..05c957a 100644
42194--- a/drivers/hwmon/coretemp.c
42195+++ b/drivers/hwmon/coretemp.c
42196@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42197 return NOTIFY_OK;
42198 }
42199
42200-static struct notifier_block coretemp_cpu_notifier __refdata = {
42201+static struct notifier_block coretemp_cpu_notifier = {
42202 .notifier_call = coretemp_cpu_callback,
42203 };
42204
42205diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42206index 7a8a6fb..015c1fd 100644
42207--- a/drivers/hwmon/ibmaem.c
42208+++ b/drivers/hwmon/ibmaem.c
42209@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42210 struct aem_rw_sensor_template *rw)
42211 {
42212 struct device *dev = &data->pdev->dev;
42213- struct sensor_device_attribute *sensors = data->sensors;
42214+ sensor_device_attribute_no_const *sensors = data->sensors;
42215 int err;
42216
42217 /* Set up read-only sensors */
42218diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42219index 17ae2eb..21b71dd 100644
42220--- a/drivers/hwmon/iio_hwmon.c
42221+++ b/drivers/hwmon/iio_hwmon.c
42222@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42223 {
42224 struct device *dev = &pdev->dev;
42225 struct iio_hwmon_state *st;
42226- struct sensor_device_attribute *a;
42227+ sensor_device_attribute_no_const *a;
42228 int ret, i;
42229 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42230 enum iio_chan_type type;
42231diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42232index f3830db..9f4d6d5 100644
42233--- a/drivers/hwmon/nct6683.c
42234+++ b/drivers/hwmon/nct6683.c
42235@@ -397,11 +397,11 @@ static struct attribute_group *
42236 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42237 int repeat)
42238 {
42239- struct sensor_device_attribute_2 *a2;
42240- struct sensor_device_attribute *a;
42241+ sensor_device_attribute_2_no_const *a2;
42242+ sensor_device_attribute_no_const *a;
42243 struct sensor_device_template **t;
42244 struct sensor_device_attr_u *su;
42245- struct attribute_group *group;
42246+ attribute_group_no_const *group;
42247 struct attribute **attrs;
42248 int i, j, count;
42249
42250diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42251index 1be4117..88ae1e1 100644
42252--- a/drivers/hwmon/nct6775.c
42253+++ b/drivers/hwmon/nct6775.c
42254@@ -952,10 +952,10 @@ static struct attribute_group *
42255 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42256 int repeat)
42257 {
42258- struct attribute_group *group;
42259+ attribute_group_no_const *group;
42260 struct sensor_device_attr_u *su;
42261- struct sensor_device_attribute *a;
42262- struct sensor_device_attribute_2 *a2;
42263+ sensor_device_attribute_no_const *a;
42264+ sensor_device_attribute_2_no_const *a2;
42265 struct attribute **attrs;
42266 struct sensor_device_template **t;
42267 int i, count;
42268diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42269index f2e47c7..45d7941 100644
42270--- a/drivers/hwmon/pmbus/pmbus_core.c
42271+++ b/drivers/hwmon/pmbus/pmbus_core.c
42272@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42273 return 0;
42274 }
42275
42276-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42277+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42278 const char *name,
42279 umode_t mode,
42280 ssize_t (*show)(struct device *dev,
42281@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42282 dev_attr->store = store;
42283 }
42284
42285-static void pmbus_attr_init(struct sensor_device_attribute *a,
42286+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42287 const char *name,
42288 umode_t mode,
42289 ssize_t (*show)(struct device *dev,
42290@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42291 u16 reg, u8 mask)
42292 {
42293 struct pmbus_boolean *boolean;
42294- struct sensor_device_attribute *a;
42295+ sensor_device_attribute_no_const *a;
42296
42297 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42298 if (!boolean)
42299@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42300 bool update, bool readonly)
42301 {
42302 struct pmbus_sensor *sensor;
42303- struct device_attribute *a;
42304+ device_attribute_no_const *a;
42305
42306 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42307 if (!sensor)
42308@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42309 const char *lstring, int index)
42310 {
42311 struct pmbus_label *label;
42312- struct device_attribute *a;
42313+ device_attribute_no_const *a;
42314
42315 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42316 if (!label)
42317diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42318index d4f0935..7420593 100644
42319--- a/drivers/hwmon/sht15.c
42320+++ b/drivers/hwmon/sht15.c
42321@@ -169,7 +169,7 @@ struct sht15_data {
42322 int supply_uv;
42323 bool supply_uv_valid;
42324 struct work_struct update_supply_work;
42325- atomic_t interrupt_handled;
42326+ atomic_unchecked_t interrupt_handled;
42327 };
42328
42329 /**
42330@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42331 ret = gpio_direction_input(data->pdata->gpio_data);
42332 if (ret)
42333 return ret;
42334- atomic_set(&data->interrupt_handled, 0);
42335+ atomic_set_unchecked(&data->interrupt_handled, 0);
42336
42337 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42338 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42339 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42340 /* Only relevant if the interrupt hasn't occurred. */
42341- if (!atomic_read(&data->interrupt_handled))
42342+ if (!atomic_read_unchecked(&data->interrupt_handled))
42343 schedule_work(&data->read_work);
42344 }
42345 ret = wait_event_timeout(data->wait_queue,
42346@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42347
42348 /* First disable the interrupt */
42349 disable_irq_nosync(irq);
42350- atomic_inc(&data->interrupt_handled);
42351+ atomic_inc_unchecked(&data->interrupt_handled);
42352 /* Then schedule a reading work struct */
42353 if (data->state != SHT15_READING_NOTHING)
42354 schedule_work(&data->read_work);
42355@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42356 * If not, then start the interrupt again - care here as could
42357 * have gone low in meantime so verify it hasn't!
42358 */
42359- atomic_set(&data->interrupt_handled, 0);
42360+ atomic_set_unchecked(&data->interrupt_handled, 0);
42361 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42362 /* If still not occurred or another handler was scheduled */
42363 if (gpio_get_value(data->pdata->gpio_data)
42364- || atomic_read(&data->interrupt_handled))
42365+ || atomic_read_unchecked(&data->interrupt_handled))
42366 return;
42367 }
42368
42369diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42370index ac91c07..8e69663 100644
42371--- a/drivers/hwmon/via-cputemp.c
42372+++ b/drivers/hwmon/via-cputemp.c
42373@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42374 return NOTIFY_OK;
42375 }
42376
42377-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42378+static struct notifier_block via_cputemp_cpu_notifier = {
42379 .notifier_call = via_cputemp_cpu_callback,
42380 };
42381
42382diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42383index 65e3240..e6c511d 100644
42384--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42385+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42386@@ -39,7 +39,7 @@
42387 extern struct i2c_adapter amd756_smbus;
42388
42389 static struct i2c_adapter *s4882_adapter;
42390-static struct i2c_algorithm *s4882_algo;
42391+static i2c_algorithm_no_const *s4882_algo;
42392
42393 /* Wrapper access functions for multiplexed SMBus */
42394 static DEFINE_MUTEX(amd756_lock);
42395diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42396index b19a310..d6eece0 100644
42397--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42398+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42399@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42400 /* usb layer */
42401
42402 /* Send command to device, and get response. */
42403-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42404+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42405 {
42406 int ret = 0;
42407 int actual;
42408diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42409index 88eda09..cf40434 100644
42410--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42411+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42412@@ -37,7 +37,7 @@
42413 extern struct i2c_adapter *nforce2_smbus;
42414
42415 static struct i2c_adapter *s4985_adapter;
42416-static struct i2c_algorithm *s4985_algo;
42417+static i2c_algorithm_no_const *s4985_algo;
42418
42419 /* Wrapper access functions for multiplexed SMBus */
42420 static DEFINE_MUTEX(nforce2_lock);
42421diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42422index 71c7a39..71dd3e0 100644
42423--- a/drivers/i2c/i2c-dev.c
42424+++ b/drivers/i2c/i2c-dev.c
42425@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42426 break;
42427 }
42428
42429- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42430+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42431 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42432 if (IS_ERR(rdwr_pa[i].buf)) {
42433 res = PTR_ERR(rdwr_pa[i].buf);
42434diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42435index 0b510ba..4fbb5085 100644
42436--- a/drivers/ide/ide-cd.c
42437+++ b/drivers/ide/ide-cd.c
42438@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42439 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42440 if ((unsigned long)buf & alignment
42441 || blk_rq_bytes(rq) & q->dma_pad_mask
42442- || object_is_on_stack(buf))
42443+ || object_starts_on_stack(buf))
42444 drive->dma = 0;
42445 }
42446 }
42447diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42448index f009d05..d95b613 100644
42449--- a/drivers/iio/industrialio-core.c
42450+++ b/drivers/iio/industrialio-core.c
42451@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42452 }
42453
42454 static
42455-int __iio_device_attr_init(struct device_attribute *dev_attr,
42456+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42457 const char *postfix,
42458 struct iio_chan_spec const *chan,
42459 ssize_t (*readfunc)(struct device *dev,
42460diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42461index e28a494..f7c2671 100644
42462--- a/drivers/infiniband/core/cm.c
42463+++ b/drivers/infiniband/core/cm.c
42464@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42465
42466 struct cm_counter_group {
42467 struct kobject obj;
42468- atomic_long_t counter[CM_ATTR_COUNT];
42469+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42470 };
42471
42472 struct cm_counter_attribute {
42473@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42474 struct ib_mad_send_buf *msg = NULL;
42475 int ret;
42476
42477- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42478+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42479 counter[CM_REQ_COUNTER]);
42480
42481 /* Quick state check to discard duplicate REQs. */
42482@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42483 if (!cm_id_priv)
42484 return;
42485
42486- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42487+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42488 counter[CM_REP_COUNTER]);
42489 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42490 if (ret)
42491@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42492 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42493 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42494 spin_unlock_irq(&cm_id_priv->lock);
42495- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42496+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42497 counter[CM_RTU_COUNTER]);
42498 goto out;
42499 }
42500@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42501 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42502 dreq_msg->local_comm_id);
42503 if (!cm_id_priv) {
42504- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42505+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42506 counter[CM_DREQ_COUNTER]);
42507 cm_issue_drep(work->port, work->mad_recv_wc);
42508 return -EINVAL;
42509@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42510 case IB_CM_MRA_REP_RCVD:
42511 break;
42512 case IB_CM_TIMEWAIT:
42513- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42514+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42515 counter[CM_DREQ_COUNTER]);
42516 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42517 goto unlock;
42518@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42519 cm_free_msg(msg);
42520 goto deref;
42521 case IB_CM_DREQ_RCVD:
42522- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42523+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42524 counter[CM_DREQ_COUNTER]);
42525 goto unlock;
42526 default:
42527@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42528 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42529 cm_id_priv->msg, timeout)) {
42530 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42531- atomic_long_inc(&work->port->
42532+ atomic_long_inc_unchecked(&work->port->
42533 counter_group[CM_RECV_DUPLICATES].
42534 counter[CM_MRA_COUNTER]);
42535 goto out;
42536@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42537 break;
42538 case IB_CM_MRA_REQ_RCVD:
42539 case IB_CM_MRA_REP_RCVD:
42540- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42541+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42542 counter[CM_MRA_COUNTER]);
42543 /* fall through */
42544 default:
42545@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42546 case IB_CM_LAP_IDLE:
42547 break;
42548 case IB_CM_MRA_LAP_SENT:
42549- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42550+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42551 counter[CM_LAP_COUNTER]);
42552 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42553 goto unlock;
42554@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42555 cm_free_msg(msg);
42556 goto deref;
42557 case IB_CM_LAP_RCVD:
42558- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42559+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42560 counter[CM_LAP_COUNTER]);
42561 goto unlock;
42562 default:
42563@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42564 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42565 if (cur_cm_id_priv) {
42566 spin_unlock_irq(&cm.lock);
42567- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42568+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42569 counter[CM_SIDR_REQ_COUNTER]);
42570 goto out; /* Duplicate message. */
42571 }
42572@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42573 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42574 msg->retries = 1;
42575
42576- atomic_long_add(1 + msg->retries,
42577+ atomic_long_add_unchecked(1 + msg->retries,
42578 &port->counter_group[CM_XMIT].counter[attr_index]);
42579 if (msg->retries)
42580- atomic_long_add(msg->retries,
42581+ atomic_long_add_unchecked(msg->retries,
42582 &port->counter_group[CM_XMIT_RETRIES].
42583 counter[attr_index]);
42584
42585@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42586 }
42587
42588 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42589- atomic_long_inc(&port->counter_group[CM_RECV].
42590+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42591 counter[attr_id - CM_ATTR_ID_OFFSET]);
42592
42593 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42594@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42595 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42596
42597 return sprintf(buf, "%ld\n",
42598- atomic_long_read(&group->counter[cm_attr->index]));
42599+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42600 }
42601
42602 static const struct sysfs_ops cm_counter_ops = {
42603diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42604index 9f5ad7c..588cd84 100644
42605--- a/drivers/infiniband/core/fmr_pool.c
42606+++ b/drivers/infiniband/core/fmr_pool.c
42607@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42608
42609 struct task_struct *thread;
42610
42611- atomic_t req_ser;
42612- atomic_t flush_ser;
42613+ atomic_unchecked_t req_ser;
42614+ atomic_unchecked_t flush_ser;
42615
42616 wait_queue_head_t force_wait;
42617 };
42618@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42619 struct ib_fmr_pool *pool = pool_ptr;
42620
42621 do {
42622- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42623+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42624 ib_fmr_batch_release(pool);
42625
42626- atomic_inc(&pool->flush_ser);
42627+ atomic_inc_unchecked(&pool->flush_ser);
42628 wake_up_interruptible(&pool->force_wait);
42629
42630 if (pool->flush_function)
42631@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42632 }
42633
42634 set_current_state(TASK_INTERRUPTIBLE);
42635- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42636+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42637 !kthread_should_stop())
42638 schedule();
42639 __set_current_state(TASK_RUNNING);
42640@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42641 pool->dirty_watermark = params->dirty_watermark;
42642 pool->dirty_len = 0;
42643 spin_lock_init(&pool->pool_lock);
42644- atomic_set(&pool->req_ser, 0);
42645- atomic_set(&pool->flush_ser, 0);
42646+ atomic_set_unchecked(&pool->req_ser, 0);
42647+ atomic_set_unchecked(&pool->flush_ser, 0);
42648 init_waitqueue_head(&pool->force_wait);
42649
42650 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42651@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42652 }
42653 spin_unlock_irq(&pool->pool_lock);
42654
42655- serial = atomic_inc_return(&pool->req_ser);
42656+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42657 wake_up_process(pool->thread);
42658
42659 if (wait_event_interruptible(pool->force_wait,
42660- atomic_read(&pool->flush_ser) - serial >= 0))
42661+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42662 return -EINTR;
42663
42664 return 0;
42665@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42666 } else {
42667 list_add_tail(&fmr->list, &pool->dirty_list);
42668 if (++pool->dirty_len >= pool->dirty_watermark) {
42669- atomic_inc(&pool->req_ser);
42670+ atomic_inc_unchecked(&pool->req_ser);
42671 wake_up_process(pool->thread);
42672 }
42673 }
42674diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
42675index 6c52e72..6303e3f 100644
42676--- a/drivers/infiniband/core/uverbs_cmd.c
42677+++ b/drivers/infiniband/core/uverbs_cmd.c
42678@@ -945,6 +945,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
42679 if (copy_from_user(&cmd, buf, sizeof cmd))
42680 return -EFAULT;
42681
42682+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
42683+ return -EFAULT;
42684+
42685 INIT_UDATA(&udata, buf + sizeof cmd,
42686 (unsigned long) cmd.response + sizeof resp,
42687 in_len - sizeof cmd, out_len - sizeof resp);
42688diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42689index cb43c22..2e12dd7 100644
42690--- a/drivers/infiniband/hw/cxgb4/mem.c
42691+++ b/drivers/infiniband/hw/cxgb4/mem.c
42692@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42693 int err;
42694 struct fw_ri_tpte tpt;
42695 u32 stag_idx;
42696- static atomic_t key;
42697+ static atomic_unchecked_t key;
42698
42699 if (c4iw_fatal_error(rdev))
42700 return -EIO;
42701@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42702 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42703 rdev->stats.stag.max = rdev->stats.stag.cur;
42704 mutex_unlock(&rdev->stats.lock);
42705- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42706+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42707 }
42708 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42709 __func__, stag_state, type, pdid, stag_idx);
42710diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42711index 79b3dbc..96e5fcc 100644
42712--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42713+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42714@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42715 struct ib_atomic_eth *ateth;
42716 struct ipath_ack_entry *e;
42717 u64 vaddr;
42718- atomic64_t *maddr;
42719+ atomic64_unchecked_t *maddr;
42720 u64 sdata;
42721 u32 rkey;
42722 u8 next;
42723@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42724 IB_ACCESS_REMOTE_ATOMIC)))
42725 goto nack_acc_unlck;
42726 /* Perform atomic OP and save result. */
42727- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42728+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42729 sdata = be64_to_cpu(ateth->swap_data);
42730 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42731 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42732- (u64) atomic64_add_return(sdata, maddr) - sdata :
42733+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42734 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42735 be64_to_cpu(ateth->compare_data),
42736 sdata);
42737diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42738index 1f95bba..9530f87 100644
42739--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42740+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42741@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42742 unsigned long flags;
42743 struct ib_wc wc;
42744 u64 sdata;
42745- atomic64_t *maddr;
42746+ atomic64_unchecked_t *maddr;
42747 enum ib_wc_status send_status;
42748
42749 /*
42750@@ -382,11 +382,11 @@ again:
42751 IB_ACCESS_REMOTE_ATOMIC)))
42752 goto acc_err;
42753 /* Perform atomic OP and save result. */
42754- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42755+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42756 sdata = wqe->wr.wr.atomic.compare_add;
42757 *(u64 *) sqp->s_sge.sge.vaddr =
42758 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42759- (u64) atomic64_add_return(sdata, maddr) - sdata :
42760+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42761 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42762 sdata, wqe->wr.wr.atomic.swap);
42763 goto send_comp;
42764diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42765index 729382c..2f82b8d 100644
42766--- a/drivers/infiniband/hw/mlx4/mad.c
42767+++ b/drivers/infiniband/hw/mlx4/mad.c
42768@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42769
42770 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42771 {
42772- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42773+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42774 cpu_to_be64(0xff00000000000000LL);
42775 }
42776
42777diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42778index ed327e6..ca1739e0 100644
42779--- a/drivers/infiniband/hw/mlx4/mcg.c
42780+++ b/drivers/infiniband/hw/mlx4/mcg.c
42781@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42782 {
42783 char name[20];
42784
42785- atomic_set(&ctx->tid, 0);
42786+ atomic_set_unchecked(&ctx->tid, 0);
42787 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42788 ctx->mcg_wq = create_singlethread_workqueue(name);
42789 if (!ctx->mcg_wq)
42790diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42791index 6eb743f..a7b0f6d 100644
42792--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42793+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42794@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42795 struct list_head mcg_mgid0_list;
42796 struct workqueue_struct *mcg_wq;
42797 struct mlx4_ib_demux_pv_ctx **tun;
42798- atomic_t tid;
42799+ atomic_unchecked_t tid;
42800 int flushing; /* flushing the work queue */
42801 };
42802
42803diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42804index 9d3e5c1..6f166df 100644
42805--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42806+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42807@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42808 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42809 }
42810
42811-int mthca_QUERY_FW(struct mthca_dev *dev)
42812+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42813 {
42814 struct mthca_mailbox *mailbox;
42815 u32 *outbox;
42816@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42817 CMD_TIME_CLASS_B);
42818 }
42819
42820-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42821+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42822 int num_mtt)
42823 {
42824 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42825@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42826 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42827 }
42828
42829-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42830+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42831 int eq_num)
42832 {
42833 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42834@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42835 CMD_TIME_CLASS_B);
42836 }
42837
42838-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42839+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42840 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42841 void *in_mad, void *response_mad)
42842 {
42843diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42844index ded76c1..0cf0a08 100644
42845--- a/drivers/infiniband/hw/mthca/mthca_main.c
42846+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42847@@ -692,7 +692,7 @@ err_close:
42848 return err;
42849 }
42850
42851-static int mthca_setup_hca(struct mthca_dev *dev)
42852+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42853 {
42854 int err;
42855
42856diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42857index ed9a989..6aa5dc2 100644
42858--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42859+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42860@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42861 * through the bitmaps)
42862 */
42863
42864-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42865+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42866 {
42867 int o;
42868 int m;
42869@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42870 return key;
42871 }
42872
42873-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42874+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42875 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42876 {
42877 struct mthca_mailbox *mailbox;
42878@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42879 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42880 }
42881
42882-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42883+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42884 u64 *buffer_list, int buffer_size_shift,
42885 int list_len, u64 iova, u64 total_size,
42886 u32 access, struct mthca_mr *mr)
42887diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42888index 415f8e1..e34214e 100644
42889--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42890+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42891@@ -764,7 +764,7 @@ unlock:
42892 return 0;
42893 }
42894
42895-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42896+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42897 {
42898 struct mthca_dev *dev = to_mdev(ibcq->device);
42899 struct mthca_cq *cq = to_mcq(ibcq);
42900diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42901index 3b2a6dc..bce26ff 100644
42902--- a/drivers/infiniband/hw/nes/nes.c
42903+++ b/drivers/infiniband/hw/nes/nes.c
42904@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42905 LIST_HEAD(nes_adapter_list);
42906 static LIST_HEAD(nes_dev_list);
42907
42908-atomic_t qps_destroyed;
42909+atomic_unchecked_t qps_destroyed;
42910
42911 static unsigned int ee_flsh_adapter;
42912 static unsigned int sysfs_nonidx_addr;
42913@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42914 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42915 struct nes_adapter *nesadapter = nesdev->nesadapter;
42916
42917- atomic_inc(&qps_destroyed);
42918+ atomic_inc_unchecked(&qps_destroyed);
42919
42920 /* Free the control structures */
42921
42922diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42923index bd9d132..70d84f4 100644
42924--- a/drivers/infiniband/hw/nes/nes.h
42925+++ b/drivers/infiniband/hw/nes/nes.h
42926@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42927 extern unsigned int wqm_quanta;
42928 extern struct list_head nes_adapter_list;
42929
42930-extern atomic_t cm_connects;
42931-extern atomic_t cm_accepts;
42932-extern atomic_t cm_disconnects;
42933-extern atomic_t cm_closes;
42934-extern atomic_t cm_connecteds;
42935-extern atomic_t cm_connect_reqs;
42936-extern atomic_t cm_rejects;
42937-extern atomic_t mod_qp_timouts;
42938-extern atomic_t qps_created;
42939-extern atomic_t qps_destroyed;
42940-extern atomic_t sw_qps_destroyed;
42941+extern atomic_unchecked_t cm_connects;
42942+extern atomic_unchecked_t cm_accepts;
42943+extern atomic_unchecked_t cm_disconnects;
42944+extern atomic_unchecked_t cm_closes;
42945+extern atomic_unchecked_t cm_connecteds;
42946+extern atomic_unchecked_t cm_connect_reqs;
42947+extern atomic_unchecked_t cm_rejects;
42948+extern atomic_unchecked_t mod_qp_timouts;
42949+extern atomic_unchecked_t qps_created;
42950+extern atomic_unchecked_t qps_destroyed;
42951+extern atomic_unchecked_t sw_qps_destroyed;
42952 extern u32 mh_detected;
42953 extern u32 mh_pauses_sent;
42954 extern u32 cm_packets_sent;
42955@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42956 extern u32 cm_packets_received;
42957 extern u32 cm_packets_dropped;
42958 extern u32 cm_packets_retrans;
42959-extern atomic_t cm_listens_created;
42960-extern atomic_t cm_listens_destroyed;
42961+extern atomic_unchecked_t cm_listens_created;
42962+extern atomic_unchecked_t cm_listens_destroyed;
42963 extern u32 cm_backlog_drops;
42964-extern atomic_t cm_loopbacks;
42965-extern atomic_t cm_nodes_created;
42966-extern atomic_t cm_nodes_destroyed;
42967-extern atomic_t cm_accel_dropped_pkts;
42968-extern atomic_t cm_resets_recvd;
42969-extern atomic_t pau_qps_created;
42970-extern atomic_t pau_qps_destroyed;
42971+extern atomic_unchecked_t cm_loopbacks;
42972+extern atomic_unchecked_t cm_nodes_created;
42973+extern atomic_unchecked_t cm_nodes_destroyed;
42974+extern atomic_unchecked_t cm_accel_dropped_pkts;
42975+extern atomic_unchecked_t cm_resets_recvd;
42976+extern atomic_unchecked_t pau_qps_created;
42977+extern atomic_unchecked_t pau_qps_destroyed;
42978
42979 extern u32 int_mod_timer_init;
42980 extern u32 int_mod_cq_depth_256;
42981diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42982index 6f09a72..cf4399d 100644
42983--- a/drivers/infiniband/hw/nes/nes_cm.c
42984+++ b/drivers/infiniband/hw/nes/nes_cm.c
42985@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42986 u32 cm_packets_retrans;
42987 u32 cm_packets_created;
42988 u32 cm_packets_received;
42989-atomic_t cm_listens_created;
42990-atomic_t cm_listens_destroyed;
42991+atomic_unchecked_t cm_listens_created;
42992+atomic_unchecked_t cm_listens_destroyed;
42993 u32 cm_backlog_drops;
42994-atomic_t cm_loopbacks;
42995-atomic_t cm_nodes_created;
42996-atomic_t cm_nodes_destroyed;
42997-atomic_t cm_accel_dropped_pkts;
42998-atomic_t cm_resets_recvd;
42999+atomic_unchecked_t cm_loopbacks;
43000+atomic_unchecked_t cm_nodes_created;
43001+atomic_unchecked_t cm_nodes_destroyed;
43002+atomic_unchecked_t cm_accel_dropped_pkts;
43003+atomic_unchecked_t cm_resets_recvd;
43004
43005 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43006 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43007@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43008 /* instance of function pointers for client API */
43009 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43010 static struct nes_cm_ops nes_cm_api = {
43011- mini_cm_accelerated,
43012- mini_cm_listen,
43013- mini_cm_del_listen,
43014- mini_cm_connect,
43015- mini_cm_close,
43016- mini_cm_accept,
43017- mini_cm_reject,
43018- mini_cm_recv_pkt,
43019- mini_cm_dealloc_core,
43020- mini_cm_get,
43021- mini_cm_set
43022+ .accelerated = mini_cm_accelerated,
43023+ .listen = mini_cm_listen,
43024+ .stop_listener = mini_cm_del_listen,
43025+ .connect = mini_cm_connect,
43026+ .close = mini_cm_close,
43027+ .accept = mini_cm_accept,
43028+ .reject = mini_cm_reject,
43029+ .recv_pkt = mini_cm_recv_pkt,
43030+ .destroy_cm_core = mini_cm_dealloc_core,
43031+ .get = mini_cm_get,
43032+ .set = mini_cm_set
43033 };
43034
43035 static struct nes_cm_core *g_cm_core;
43036
43037-atomic_t cm_connects;
43038-atomic_t cm_accepts;
43039-atomic_t cm_disconnects;
43040-atomic_t cm_closes;
43041-atomic_t cm_connecteds;
43042-atomic_t cm_connect_reqs;
43043-atomic_t cm_rejects;
43044+atomic_unchecked_t cm_connects;
43045+atomic_unchecked_t cm_accepts;
43046+atomic_unchecked_t cm_disconnects;
43047+atomic_unchecked_t cm_closes;
43048+atomic_unchecked_t cm_connecteds;
43049+atomic_unchecked_t cm_connect_reqs;
43050+atomic_unchecked_t cm_rejects;
43051
43052 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43053 {
43054@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43055 kfree(listener);
43056 listener = NULL;
43057 ret = 0;
43058- atomic_inc(&cm_listens_destroyed);
43059+ atomic_inc_unchecked(&cm_listens_destroyed);
43060 } else {
43061 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43062 }
43063@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43064 cm_node->rem_mac);
43065
43066 add_hte_node(cm_core, cm_node);
43067- atomic_inc(&cm_nodes_created);
43068+ atomic_inc_unchecked(&cm_nodes_created);
43069
43070 return cm_node;
43071 }
43072@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43073 }
43074
43075 atomic_dec(&cm_core->node_cnt);
43076- atomic_inc(&cm_nodes_destroyed);
43077+ atomic_inc_unchecked(&cm_nodes_destroyed);
43078 nesqp = cm_node->nesqp;
43079 if (nesqp) {
43080 nesqp->cm_node = NULL;
43081@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43082
43083 static void drop_packet(struct sk_buff *skb)
43084 {
43085- atomic_inc(&cm_accel_dropped_pkts);
43086+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43087 dev_kfree_skb_any(skb);
43088 }
43089
43090@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43091 {
43092
43093 int reset = 0; /* whether to send reset in case of err.. */
43094- atomic_inc(&cm_resets_recvd);
43095+ atomic_inc_unchecked(&cm_resets_recvd);
43096 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43097 " refcnt=%d\n", cm_node, cm_node->state,
43098 atomic_read(&cm_node->ref_count));
43099@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43100 rem_ref_cm_node(cm_node->cm_core, cm_node);
43101 return NULL;
43102 }
43103- atomic_inc(&cm_loopbacks);
43104+ atomic_inc_unchecked(&cm_loopbacks);
43105 loopbackremotenode->loopbackpartner = cm_node;
43106 loopbackremotenode->tcp_cntxt.rcv_wscale =
43107 NES_CM_DEFAULT_RCV_WND_SCALE;
43108@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43109 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43110 else {
43111 rem_ref_cm_node(cm_core, cm_node);
43112- atomic_inc(&cm_accel_dropped_pkts);
43113+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43114 dev_kfree_skb_any(skb);
43115 }
43116 break;
43117@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43118
43119 if ((cm_id) && (cm_id->event_handler)) {
43120 if (issue_disconn) {
43121- atomic_inc(&cm_disconnects);
43122+ atomic_inc_unchecked(&cm_disconnects);
43123 cm_event.event = IW_CM_EVENT_DISCONNECT;
43124 cm_event.status = disconn_status;
43125 cm_event.local_addr = cm_id->local_addr;
43126@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43127 }
43128
43129 if (issue_close) {
43130- atomic_inc(&cm_closes);
43131+ atomic_inc_unchecked(&cm_closes);
43132 nes_disconnect(nesqp, 1);
43133
43134 cm_id->provider_data = nesqp;
43135@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43136
43137 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43138 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43139- atomic_inc(&cm_accepts);
43140+ atomic_inc_unchecked(&cm_accepts);
43141
43142 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43143 netdev_refcnt_read(nesvnic->netdev));
43144@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43145 struct nes_cm_core *cm_core;
43146 u8 *start_buff;
43147
43148- atomic_inc(&cm_rejects);
43149+ atomic_inc_unchecked(&cm_rejects);
43150 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43151 loopback = cm_node->loopbackpartner;
43152 cm_core = cm_node->cm_core;
43153@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43154 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43155 ntohs(laddr->sin_port));
43156
43157- atomic_inc(&cm_connects);
43158+ atomic_inc_unchecked(&cm_connects);
43159 nesqp->active_conn = 1;
43160
43161 /* cache the cm_id in the qp */
43162@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43163 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43164 return err;
43165 }
43166- atomic_inc(&cm_listens_created);
43167+ atomic_inc_unchecked(&cm_listens_created);
43168 }
43169
43170 cm_id->add_ref(cm_id);
43171@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43172
43173 if (nesqp->destroyed)
43174 return;
43175- atomic_inc(&cm_connecteds);
43176+ atomic_inc_unchecked(&cm_connecteds);
43177 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43178 " local port 0x%04X. jiffies = %lu.\n",
43179 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43180@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43181
43182 cm_id->add_ref(cm_id);
43183 ret = cm_id->event_handler(cm_id, &cm_event);
43184- atomic_inc(&cm_closes);
43185+ atomic_inc_unchecked(&cm_closes);
43186 cm_event.event = IW_CM_EVENT_CLOSE;
43187 cm_event.status = 0;
43188 cm_event.provider_data = cm_id->provider_data;
43189@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43190 return;
43191 cm_id = cm_node->cm_id;
43192
43193- atomic_inc(&cm_connect_reqs);
43194+ atomic_inc_unchecked(&cm_connect_reqs);
43195 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43196 cm_node, cm_id, jiffies);
43197
43198@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43199 return;
43200 cm_id = cm_node->cm_id;
43201
43202- atomic_inc(&cm_connect_reqs);
43203+ atomic_inc_unchecked(&cm_connect_reqs);
43204 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43205 cm_node, cm_id, jiffies);
43206
43207diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43208index 4166452..fc952c3 100644
43209--- a/drivers/infiniband/hw/nes/nes_mgt.c
43210+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43211@@ -40,8 +40,8 @@
43212 #include "nes.h"
43213 #include "nes_mgt.h"
43214
43215-atomic_t pau_qps_created;
43216-atomic_t pau_qps_destroyed;
43217+atomic_unchecked_t pau_qps_created;
43218+atomic_unchecked_t pau_qps_destroyed;
43219
43220 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43221 {
43222@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43223 {
43224 struct sk_buff *skb;
43225 unsigned long flags;
43226- atomic_inc(&pau_qps_destroyed);
43227+ atomic_inc_unchecked(&pau_qps_destroyed);
43228
43229 /* Free packets that have not yet been forwarded */
43230 /* Lock is acquired by skb_dequeue when removing the skb */
43231@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43232 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43233 skb_queue_head_init(&nesqp->pau_list);
43234 spin_lock_init(&nesqp->pau_lock);
43235- atomic_inc(&pau_qps_created);
43236+ atomic_inc_unchecked(&pau_qps_created);
43237 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43238 }
43239
43240diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43241index 49eb511..a774366 100644
43242--- a/drivers/infiniband/hw/nes/nes_nic.c
43243+++ b/drivers/infiniband/hw/nes/nes_nic.c
43244@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43245 target_stat_values[++index] = mh_detected;
43246 target_stat_values[++index] = mh_pauses_sent;
43247 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43248- target_stat_values[++index] = atomic_read(&cm_connects);
43249- target_stat_values[++index] = atomic_read(&cm_accepts);
43250- target_stat_values[++index] = atomic_read(&cm_disconnects);
43251- target_stat_values[++index] = atomic_read(&cm_connecteds);
43252- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43253- target_stat_values[++index] = atomic_read(&cm_rejects);
43254- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43255- target_stat_values[++index] = atomic_read(&qps_created);
43256- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43257- target_stat_values[++index] = atomic_read(&qps_destroyed);
43258- target_stat_values[++index] = atomic_read(&cm_closes);
43259+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43260+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43261+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43262+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43263+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43264+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43265+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43266+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43267+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43268+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43269+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43270 target_stat_values[++index] = cm_packets_sent;
43271 target_stat_values[++index] = cm_packets_bounced;
43272 target_stat_values[++index] = cm_packets_created;
43273 target_stat_values[++index] = cm_packets_received;
43274 target_stat_values[++index] = cm_packets_dropped;
43275 target_stat_values[++index] = cm_packets_retrans;
43276- target_stat_values[++index] = atomic_read(&cm_listens_created);
43277- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43278+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43279+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43280 target_stat_values[++index] = cm_backlog_drops;
43281- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43282- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43283- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43284- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43285- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43286+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43287+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43288+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43289+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43290+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43291 target_stat_values[++index] = nesadapter->free_4kpbl;
43292 target_stat_values[++index] = nesadapter->free_256pbl;
43293 target_stat_values[++index] = int_mod_timer_init;
43294 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43295 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43296 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43297- target_stat_values[++index] = atomic_read(&pau_qps_created);
43298- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43299+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43300+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43301 }
43302
43303 /**
43304diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43305index c0d0296..3185f57 100644
43306--- a/drivers/infiniband/hw/nes/nes_verbs.c
43307+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43308@@ -46,9 +46,9 @@
43309
43310 #include <rdma/ib_umem.h>
43311
43312-atomic_t mod_qp_timouts;
43313-atomic_t qps_created;
43314-atomic_t sw_qps_destroyed;
43315+atomic_unchecked_t mod_qp_timouts;
43316+atomic_unchecked_t qps_created;
43317+atomic_unchecked_t sw_qps_destroyed;
43318
43319 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43320
43321@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43322 if (init_attr->create_flags)
43323 return ERR_PTR(-EINVAL);
43324
43325- atomic_inc(&qps_created);
43326+ atomic_inc_unchecked(&qps_created);
43327 switch (init_attr->qp_type) {
43328 case IB_QPT_RC:
43329 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43330@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43331 struct iw_cm_event cm_event;
43332 int ret = 0;
43333
43334- atomic_inc(&sw_qps_destroyed);
43335+ atomic_inc_unchecked(&sw_qps_destroyed);
43336 nesqp->destroyed = 1;
43337
43338 /* Blow away the connection if it exists. */
43339diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43340index b218254..1d1aa3c 100644
43341--- a/drivers/infiniband/hw/qib/qib.h
43342+++ b/drivers/infiniband/hw/qib/qib.h
43343@@ -52,6 +52,7 @@
43344 #include <linux/kref.h>
43345 #include <linux/sched.h>
43346 #include <linux/kthread.h>
43347+#include <linux/slab.h>
43348
43349 #include "qib_common.h"
43350 #include "qib_verbs.h"
43351diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43352index cdc7df4..a2fdfdb 100644
43353--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43354+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43355@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43356 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43357 }
43358
43359-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43360+static struct rtnl_link_ops ipoib_link_ops = {
43361 .kind = "ipoib",
43362 .maxtype = IFLA_IPOIB_MAX,
43363 .policy = ipoib_policy,
43364diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43365index e853a21..56fc5a8 100644
43366--- a/drivers/input/gameport/gameport.c
43367+++ b/drivers/input/gameport/gameport.c
43368@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43369 */
43370 static void gameport_init_port(struct gameport *gameport)
43371 {
43372- static atomic_t gameport_no = ATOMIC_INIT(-1);
43373+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43374
43375 __module_get(THIS_MODULE);
43376
43377 mutex_init(&gameport->drv_mutex);
43378 device_initialize(&gameport->dev);
43379 dev_set_name(&gameport->dev, "gameport%lu",
43380- (unsigned long)atomic_inc_return(&gameport_no));
43381+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43382 gameport->dev.bus = &gameport_bus;
43383 gameport->dev.release = gameport_release_port;
43384 if (gameport->parent)
43385diff --git a/drivers/input/input.c b/drivers/input/input.c
43386index 213e3a1..4fea837 100644
43387--- a/drivers/input/input.c
43388+++ b/drivers/input/input.c
43389@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43390 */
43391 struct input_dev *input_allocate_device(void)
43392 {
43393- static atomic_t input_no = ATOMIC_INIT(-1);
43394+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43395 struct input_dev *dev;
43396
43397 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43398@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43399 INIT_LIST_HEAD(&dev->node);
43400
43401 dev_set_name(&dev->dev, "input%lu",
43402- (unsigned long)atomic_inc_return(&input_no));
43403+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43404
43405 __module_get(THIS_MODULE);
43406 }
43407diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43408index 4a95b22..874c182 100644
43409--- a/drivers/input/joystick/sidewinder.c
43410+++ b/drivers/input/joystick/sidewinder.c
43411@@ -30,6 +30,7 @@
43412 #include <linux/kernel.h>
43413 #include <linux/module.h>
43414 #include <linux/slab.h>
43415+#include <linux/sched.h>
43416 #include <linux/input.h>
43417 #include <linux/gameport.h>
43418 #include <linux/jiffies.h>
43419diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43420index 3aa2f3f..53c00ea 100644
43421--- a/drivers/input/joystick/xpad.c
43422+++ b/drivers/input/joystick/xpad.c
43423@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43424
43425 static int xpad_led_probe(struct usb_xpad *xpad)
43426 {
43427- static atomic_t led_seq = ATOMIC_INIT(-1);
43428+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43429 unsigned long led_no;
43430 struct xpad_led *led;
43431 struct led_classdev *led_cdev;
43432@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43433 if (!led)
43434 return -ENOMEM;
43435
43436- led_no = atomic_inc_return(&led_seq);
43437+ led_no = atomic_inc_return_unchecked(&led_seq);
43438
43439 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43440 led->xpad = xpad;
43441diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43442index ac1fa5f..5f7502c 100644
43443--- a/drivers/input/misc/ims-pcu.c
43444+++ b/drivers/input/misc/ims-pcu.c
43445@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43446
43447 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43448 {
43449- static atomic_t device_no = ATOMIC_INIT(-1);
43450+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43451
43452 const struct ims_pcu_device_info *info;
43453 int error;
43454@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43455 }
43456
43457 /* Device appears to be operable, complete initialization */
43458- pcu->device_no = atomic_inc_return(&device_no);
43459+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43460
43461 /*
43462 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43463diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43464index f4cf664..3204fda 100644
43465--- a/drivers/input/mouse/psmouse.h
43466+++ b/drivers/input/mouse/psmouse.h
43467@@ -117,7 +117,7 @@ struct psmouse_attribute {
43468 ssize_t (*set)(struct psmouse *psmouse, void *data,
43469 const char *buf, size_t count);
43470 bool protect;
43471-};
43472+} __do_const;
43473 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43474
43475 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43476diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43477index b604564..3f14ae4 100644
43478--- a/drivers/input/mousedev.c
43479+++ b/drivers/input/mousedev.c
43480@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43481
43482 spin_unlock_irq(&client->packet_lock);
43483
43484- if (copy_to_user(buffer, data, count))
43485+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43486 return -EFAULT;
43487
43488 return count;
43489diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43490index a05a517..323a2fd 100644
43491--- a/drivers/input/serio/serio.c
43492+++ b/drivers/input/serio/serio.c
43493@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43494 */
43495 static void serio_init_port(struct serio *serio)
43496 {
43497- static atomic_t serio_no = ATOMIC_INIT(-1);
43498+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43499
43500 __module_get(THIS_MODULE);
43501
43502@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43503 mutex_init(&serio->drv_mutex);
43504 device_initialize(&serio->dev);
43505 dev_set_name(&serio->dev, "serio%lu",
43506- (unsigned long)atomic_inc_return(&serio_no));
43507+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43508 serio->dev.bus = &serio_bus;
43509 serio->dev.release = serio_release_port;
43510 serio->dev.groups = serio_device_attr_groups;
43511diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43512index 71ef5d6..93380a9 100644
43513--- a/drivers/input/serio/serio_raw.c
43514+++ b/drivers/input/serio/serio_raw.c
43515@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43516
43517 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43518 {
43519- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43520+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43521 struct serio_raw *serio_raw;
43522 int err;
43523
43524@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43525 }
43526
43527 snprintf(serio_raw->name, sizeof(serio_raw->name),
43528- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43529+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43530 kref_init(&serio_raw->kref);
43531 INIT_LIST_HEAD(&serio_raw->client_list);
43532 init_waitqueue_head(&serio_raw->wait);
43533diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43534index 9802485..2e9941d 100644
43535--- a/drivers/iommu/amd_iommu.c
43536+++ b/drivers/iommu/amd_iommu.c
43537@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43538
43539 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43540 {
43541+ phys_addr_t physaddr;
43542 WARN_ON(address & 0x7ULL);
43543
43544 memset(cmd, 0, sizeof(*cmd));
43545- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43546- cmd->data[1] = upper_32_bits(__pa(address));
43547+
43548+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43549+ if (object_starts_on_stack((void *)address)) {
43550+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43551+ physaddr = __pa((u64)adjbuf);
43552+ } else
43553+#endif
43554+ physaddr = __pa(address);
43555+
43556+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43557+ cmd->data[1] = upper_32_bits(physaddr);
43558 cmd->data[2] = 1;
43559 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43560 }
43561diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43562index 6cd47b7..264d14a 100644
43563--- a/drivers/iommu/arm-smmu.c
43564+++ b/drivers/iommu/arm-smmu.c
43565@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43566 cfg->irptndx = cfg->cbndx;
43567 }
43568
43569- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43570+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43571 arm_smmu_init_context_bank(smmu_domain);
43572 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43573
43574diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43575index f7718d7..3ef740b 100644
43576--- a/drivers/iommu/iommu.c
43577+++ b/drivers/iommu/iommu.c
43578@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43579 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43580 {
43581 int err;
43582- struct notifier_block *nb;
43583+ notifier_block_no_const *nb;
43584 struct iommu_callback_data cb = {
43585 .ops = ops,
43586 };
43587diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43588index 89c4846..1de796f 100644
43589--- a/drivers/iommu/irq_remapping.c
43590+++ b/drivers/iommu/irq_remapping.c
43591@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43592 void panic_if_irq_remap(const char *msg)
43593 {
43594 if (irq_remapping_enabled)
43595- panic(msg);
43596+ panic("%s", msg);
43597 }
43598
43599 static void ir_ack_apic_edge(struct irq_data *data)
43600@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43601
43602 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43603 {
43604- chip->irq_print_chip = ir_print_prefix;
43605- chip->irq_ack = ir_ack_apic_edge;
43606- chip->irq_eoi = ir_ack_apic_level;
43607- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43608+ pax_open_kernel();
43609+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43610+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43611+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43612+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43613+ pax_close_kernel();
43614 }
43615
43616 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43617diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43618index d617ee5..df8be8b 100644
43619--- a/drivers/irqchip/irq-gic.c
43620+++ b/drivers/irqchip/irq-gic.c
43621@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43622 * Supported arch specific GIC irq extension.
43623 * Default make them NULL.
43624 */
43625-struct irq_chip gic_arch_extn = {
43626+irq_chip_no_const gic_arch_extn = {
43627 .irq_eoi = NULL,
43628 .irq_mask = NULL,
43629 .irq_unmask = NULL,
43630@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43631 chained_irq_exit(chip, desc);
43632 }
43633
43634-static struct irq_chip gic_chip = {
43635+static irq_chip_no_const gic_chip __read_only = {
43636 .name = "GIC",
43637 .irq_mask = gic_mask_irq,
43638 .irq_unmask = gic_unmask_irq,
43639diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43640index 078cac5..fb0f846 100644
43641--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43642+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43643@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43644 struct intc_irqpin_iomem *i;
43645 struct resource *io[INTC_IRQPIN_REG_NR];
43646 struct resource *irq;
43647- struct irq_chip *irq_chip;
43648+ irq_chip_no_const *irq_chip;
43649 void (*enable_fn)(struct irq_data *d);
43650 void (*disable_fn)(struct irq_data *d);
43651 const char *name = dev_name(dev);
43652diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43653index 384e6ed..7a771b2 100644
43654--- a/drivers/irqchip/irq-renesas-irqc.c
43655+++ b/drivers/irqchip/irq-renesas-irqc.c
43656@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43657 struct irqc_priv *p;
43658 struct resource *io;
43659 struct resource *irq;
43660- struct irq_chip *irq_chip;
43661+ irq_chip_no_const *irq_chip;
43662 const char *name = dev_name(&pdev->dev);
43663 int ret;
43664 int k;
43665diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43666index 6a2df32..dc962f1 100644
43667--- a/drivers/isdn/capi/capi.c
43668+++ b/drivers/isdn/capi/capi.c
43669@@ -81,8 +81,8 @@ struct capiminor {
43670
43671 struct capi20_appl *ap;
43672 u32 ncci;
43673- atomic_t datahandle;
43674- atomic_t msgid;
43675+ atomic_unchecked_t datahandle;
43676+ atomic_unchecked_t msgid;
43677
43678 struct tty_port port;
43679 int ttyinstop;
43680@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43681 capimsg_setu16(s, 2, mp->ap->applid);
43682 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43683 capimsg_setu8 (s, 5, CAPI_RESP);
43684- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43685+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43686 capimsg_setu32(s, 8, mp->ncci);
43687 capimsg_setu16(s, 12, datahandle);
43688 }
43689@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43690 mp->outbytes -= len;
43691 spin_unlock_bh(&mp->outlock);
43692
43693- datahandle = atomic_inc_return(&mp->datahandle);
43694+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43695 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43696 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43697 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43698 capimsg_setu16(skb->data, 2, mp->ap->applid);
43699 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43700 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43701- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43702+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43703 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43704 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43705 capimsg_setu16(skb->data, 16, len); /* Data length */
43706diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43707index aecec6d..11e13c5 100644
43708--- a/drivers/isdn/gigaset/bas-gigaset.c
43709+++ b/drivers/isdn/gigaset/bas-gigaset.c
43710@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43711
43712
43713 static const struct gigaset_ops gigops = {
43714- gigaset_write_cmd,
43715- gigaset_write_room,
43716- gigaset_chars_in_buffer,
43717- gigaset_brkchars,
43718- gigaset_init_bchannel,
43719- gigaset_close_bchannel,
43720- gigaset_initbcshw,
43721- gigaset_freebcshw,
43722- gigaset_reinitbcshw,
43723- gigaset_initcshw,
43724- gigaset_freecshw,
43725- gigaset_set_modem_ctrl,
43726- gigaset_baud_rate,
43727- gigaset_set_line_ctrl,
43728- gigaset_isoc_send_skb,
43729- gigaset_isoc_input,
43730+ .write_cmd = gigaset_write_cmd,
43731+ .write_room = gigaset_write_room,
43732+ .chars_in_buffer = gigaset_chars_in_buffer,
43733+ .brkchars = gigaset_brkchars,
43734+ .init_bchannel = gigaset_init_bchannel,
43735+ .close_bchannel = gigaset_close_bchannel,
43736+ .initbcshw = gigaset_initbcshw,
43737+ .freebcshw = gigaset_freebcshw,
43738+ .reinitbcshw = gigaset_reinitbcshw,
43739+ .initcshw = gigaset_initcshw,
43740+ .freecshw = gigaset_freecshw,
43741+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43742+ .baud_rate = gigaset_baud_rate,
43743+ .set_line_ctrl = gigaset_set_line_ctrl,
43744+ .send_skb = gigaset_isoc_send_skb,
43745+ .handle_input = gigaset_isoc_input,
43746 };
43747
43748 /* bas_gigaset_init
43749diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43750index 600c79b..3752bab 100644
43751--- a/drivers/isdn/gigaset/interface.c
43752+++ b/drivers/isdn/gigaset/interface.c
43753@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43754 }
43755 tty->driver_data = cs;
43756
43757- ++cs->port.count;
43758+ atomic_inc(&cs->port.count);
43759
43760- if (cs->port.count == 1) {
43761+ if (atomic_read(&cs->port.count) == 1) {
43762 tty_port_tty_set(&cs->port, tty);
43763 cs->port.low_latency = 1;
43764 }
43765@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43766
43767 if (!cs->connected)
43768 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43769- else if (!cs->port.count)
43770+ else if (!atomic_read(&cs->port.count))
43771 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43772- else if (!--cs->port.count)
43773+ else if (!atomic_dec_return(&cs->port.count))
43774 tty_port_tty_set(&cs->port, NULL);
43775
43776 mutex_unlock(&cs->mutex);
43777diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43778index 8c91fd5..14f13ce 100644
43779--- a/drivers/isdn/gigaset/ser-gigaset.c
43780+++ b/drivers/isdn/gigaset/ser-gigaset.c
43781@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43782 }
43783
43784 static const struct gigaset_ops ops = {
43785- gigaset_write_cmd,
43786- gigaset_write_room,
43787- gigaset_chars_in_buffer,
43788- gigaset_brkchars,
43789- gigaset_init_bchannel,
43790- gigaset_close_bchannel,
43791- gigaset_initbcshw,
43792- gigaset_freebcshw,
43793- gigaset_reinitbcshw,
43794- gigaset_initcshw,
43795- gigaset_freecshw,
43796- gigaset_set_modem_ctrl,
43797- gigaset_baud_rate,
43798- gigaset_set_line_ctrl,
43799- gigaset_m10x_send_skb, /* asyncdata.c */
43800- gigaset_m10x_input, /* asyncdata.c */
43801+ .write_cmd = gigaset_write_cmd,
43802+ .write_room = gigaset_write_room,
43803+ .chars_in_buffer = gigaset_chars_in_buffer,
43804+ .brkchars = gigaset_brkchars,
43805+ .init_bchannel = gigaset_init_bchannel,
43806+ .close_bchannel = gigaset_close_bchannel,
43807+ .initbcshw = gigaset_initbcshw,
43808+ .freebcshw = gigaset_freebcshw,
43809+ .reinitbcshw = gigaset_reinitbcshw,
43810+ .initcshw = gigaset_initcshw,
43811+ .freecshw = gigaset_freecshw,
43812+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43813+ .baud_rate = gigaset_baud_rate,
43814+ .set_line_ctrl = gigaset_set_line_ctrl,
43815+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43816+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43817 };
43818
43819
43820diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43821index 5f306e2..5342f88 100644
43822--- a/drivers/isdn/gigaset/usb-gigaset.c
43823+++ b/drivers/isdn/gigaset/usb-gigaset.c
43824@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43825 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43826 memcpy(cs->hw.usb->bchars, buf, 6);
43827 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43828- 0, 0, &buf, 6, 2000);
43829+ 0, 0, buf, 6, 2000);
43830 }
43831
43832 static void gigaset_freebcshw(struct bc_state *bcs)
43833@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43834 }
43835
43836 static const struct gigaset_ops ops = {
43837- gigaset_write_cmd,
43838- gigaset_write_room,
43839- gigaset_chars_in_buffer,
43840- gigaset_brkchars,
43841- gigaset_init_bchannel,
43842- gigaset_close_bchannel,
43843- gigaset_initbcshw,
43844- gigaset_freebcshw,
43845- gigaset_reinitbcshw,
43846- gigaset_initcshw,
43847- gigaset_freecshw,
43848- gigaset_set_modem_ctrl,
43849- gigaset_baud_rate,
43850- gigaset_set_line_ctrl,
43851- gigaset_m10x_send_skb,
43852- gigaset_m10x_input,
43853+ .write_cmd = gigaset_write_cmd,
43854+ .write_room = gigaset_write_room,
43855+ .chars_in_buffer = gigaset_chars_in_buffer,
43856+ .brkchars = gigaset_brkchars,
43857+ .init_bchannel = gigaset_init_bchannel,
43858+ .close_bchannel = gigaset_close_bchannel,
43859+ .initbcshw = gigaset_initbcshw,
43860+ .freebcshw = gigaset_freebcshw,
43861+ .reinitbcshw = gigaset_reinitbcshw,
43862+ .initcshw = gigaset_initcshw,
43863+ .freecshw = gigaset_freecshw,
43864+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43865+ .baud_rate = gigaset_baud_rate,
43866+ .set_line_ctrl = gigaset_set_line_ctrl,
43867+ .send_skb = gigaset_m10x_send_skb,
43868+ .handle_input = gigaset_m10x_input,
43869 };
43870
43871 /*
43872diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43873index 4d9b195..455075c 100644
43874--- a/drivers/isdn/hardware/avm/b1.c
43875+++ b/drivers/isdn/hardware/avm/b1.c
43876@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43877 }
43878 if (left) {
43879 if (t4file->user) {
43880- if (copy_from_user(buf, dp, left))
43881+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43882 return -EFAULT;
43883 } else {
43884 memcpy(buf, dp, left);
43885@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43886 }
43887 if (left) {
43888 if (config->user) {
43889- if (copy_from_user(buf, dp, left))
43890+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43891 return -EFAULT;
43892 } else {
43893 memcpy(buf, dp, left);
43894diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43895index 9b856e1..fa03c92 100644
43896--- a/drivers/isdn/i4l/isdn_common.c
43897+++ b/drivers/isdn/i4l/isdn_common.c
43898@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43899 } else
43900 return -EINVAL;
43901 case IIOCDBGVAR:
43902+ if (!capable(CAP_SYS_RAWIO))
43903+ return -EPERM;
43904 if (arg) {
43905 if (copy_to_user(argp, &dev, sizeof(ulong)))
43906 return -EFAULT;
43907diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43908index 91d5730..336523e 100644
43909--- a/drivers/isdn/i4l/isdn_concap.c
43910+++ b/drivers/isdn/i4l/isdn_concap.c
43911@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43912 }
43913
43914 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43915- &isdn_concap_dl_data_req,
43916- &isdn_concap_dl_connect_req,
43917- &isdn_concap_dl_disconn_req
43918+ .data_req = &isdn_concap_dl_data_req,
43919+ .connect_req = &isdn_concap_dl_connect_req,
43920+ .disconn_req = &isdn_concap_dl_disconn_req
43921 };
43922
43923 /* The following should better go into a dedicated source file such that
43924diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43925index bc91261..2ef7e36 100644
43926--- a/drivers/isdn/i4l/isdn_tty.c
43927+++ b/drivers/isdn/i4l/isdn_tty.c
43928@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43929
43930 #ifdef ISDN_DEBUG_MODEM_OPEN
43931 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43932- port->count);
43933+ atomic_read(&port->count));
43934 #endif
43935- port->count++;
43936+ atomic_inc(&port->count);
43937 port->tty = tty;
43938 /*
43939 * Start up serial port
43940@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43941 #endif
43942 return;
43943 }
43944- if ((tty->count == 1) && (port->count != 1)) {
43945+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43946 /*
43947 * Uh, oh. tty->count is 1, which means that the tty
43948 * structure will be freed. Info->count should always
43949@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43950 * serial port won't be shutdown.
43951 */
43952 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43953- "info->count is %d\n", port->count);
43954- port->count = 1;
43955+ "info->count is %d\n", atomic_read(&port->count));
43956+ atomic_set(&port->count, 1);
43957 }
43958- if (--port->count < 0) {
43959+ if (atomic_dec_return(&port->count) < 0) {
43960 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43961- info->line, port->count);
43962- port->count = 0;
43963+ info->line, atomic_read(&port->count));
43964+ atomic_set(&port->count, 0);
43965 }
43966- if (port->count) {
43967+ if (atomic_read(&port->count)) {
43968 #ifdef ISDN_DEBUG_MODEM_OPEN
43969 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43970 #endif
43971@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43972 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43973 return;
43974 isdn_tty_shutdown(info);
43975- port->count = 0;
43976+ atomic_set(&port->count, 0);
43977 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43978 port->tty = NULL;
43979 wake_up_interruptible(&port->open_wait);
43980@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43981 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43982 modem_info *info = &dev->mdm.info[i];
43983
43984- if (info->port.count == 0)
43985+ if (atomic_read(&info->port.count) == 0)
43986 continue;
43987 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43988 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43989diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43990index e2d4e58..40cd045 100644
43991--- a/drivers/isdn/i4l/isdn_x25iface.c
43992+++ b/drivers/isdn/i4l/isdn_x25iface.c
43993@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43994
43995
43996 static struct concap_proto_ops ix25_pops = {
43997- &isdn_x25iface_proto_new,
43998- &isdn_x25iface_proto_del,
43999- &isdn_x25iface_proto_restart,
44000- &isdn_x25iface_proto_close,
44001- &isdn_x25iface_xmit,
44002- &isdn_x25iface_receive,
44003- &isdn_x25iface_connect_ind,
44004- &isdn_x25iface_disconn_ind
44005+ .proto_new = &isdn_x25iface_proto_new,
44006+ .proto_del = &isdn_x25iface_proto_del,
44007+ .restart = &isdn_x25iface_proto_restart,
44008+ .close = &isdn_x25iface_proto_close,
44009+ .encap_and_xmit = &isdn_x25iface_xmit,
44010+ .data_ind = &isdn_x25iface_receive,
44011+ .connect_ind = &isdn_x25iface_connect_ind,
44012+ .disconn_ind = &isdn_x25iface_disconn_ind
44013 };
44014
44015 /* error message helper function */
44016diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44017index 6a7447c..b4987ea 100644
44018--- a/drivers/isdn/icn/icn.c
44019+++ b/drivers/isdn/icn/icn.c
44020@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44021 if (count > len)
44022 count = len;
44023 if (user) {
44024- if (copy_from_user(msg, buf, count))
44025+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44026 return -EFAULT;
44027 } else
44028 memcpy(msg, buf, count);
44029@@ -1609,7 +1609,7 @@ icn_setup(char *line)
44030 if (ints[0] > 1)
44031 membase = (unsigned long)ints[2];
44032 if (str && *str) {
44033- strcpy(sid, str);
44034+ strlcpy(sid, str, sizeof(sid));
44035 icn_id = sid;
44036 if ((p = strchr(sid, ','))) {
44037 *p++ = 0;
44038diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44039index 87f7dff..7300125 100644
44040--- a/drivers/isdn/mISDN/dsp_cmx.c
44041+++ b/drivers/isdn/mISDN/dsp_cmx.c
44042@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44043 static u16 dsp_count; /* last sample count */
44044 static int dsp_count_valid; /* if we have last sample count */
44045
44046-void
44047+void __intentional_overflow(-1)
44048 dsp_cmx_send(void *arg)
44049 {
44050 struct dsp_conf *conf;
44051diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44052index 0f9ed1e..2715d6f 100644
44053--- a/drivers/leds/leds-clevo-mail.c
44054+++ b/drivers/leds/leds-clevo-mail.c
44055@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44056 * detected as working, but in reality it is not) as low as
44057 * possible.
44058 */
44059-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44060+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44061 {
44062 .callback = clevo_mail_led_dmi_callback,
44063 .ident = "Clevo D410J",
44064diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44065index 046cb70..6b20d39 100644
44066--- a/drivers/leds/leds-ss4200.c
44067+++ b/drivers/leds/leds-ss4200.c
44068@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44069 * detected as working, but in reality it is not) as low as
44070 * possible.
44071 */
44072-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44073+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44074 {
44075 .callback = ss4200_led_dmi_callback,
44076 .ident = "Intel SS4200-E",
44077diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44078index 6590558..a74c5dd 100644
44079--- a/drivers/lguest/core.c
44080+++ b/drivers/lguest/core.c
44081@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44082 * The end address needs +1 because __get_vm_area allocates an
44083 * extra guard page, so we need space for that.
44084 */
44085+
44086+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44087+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44088+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44089+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44090+#else
44091 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44092 VM_ALLOC, switcher_addr, switcher_addr
44093 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44094+#endif
44095+
44096 if (!switcher_vma) {
44097 err = -ENOMEM;
44098 printk("lguest: could not map switcher pages high\n");
44099@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44100 * Now the Switcher is mapped at the right address, we can't fail!
44101 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44102 */
44103- memcpy(switcher_vma->addr, start_switcher_text,
44104+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44105 end_switcher_text - start_switcher_text);
44106
44107 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44108diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44109index e8b55c3..3514c37 100644
44110--- a/drivers/lguest/page_tables.c
44111+++ b/drivers/lguest/page_tables.c
44112@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44113 /*:*/
44114
44115 #ifdef CONFIG_X86_PAE
44116-static void release_pmd(pmd_t *spmd)
44117+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44118 {
44119 /* If the entry's not present, there's nothing to release. */
44120 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44121diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44122index 922a1ac..9dd0c2a 100644
44123--- a/drivers/lguest/x86/core.c
44124+++ b/drivers/lguest/x86/core.c
44125@@ -59,7 +59,7 @@ static struct {
44126 /* Offset from where switcher.S was compiled to where we've copied it */
44127 static unsigned long switcher_offset(void)
44128 {
44129- return switcher_addr - (unsigned long)start_switcher_text;
44130+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44131 }
44132
44133 /* This cpu's struct lguest_pages (after the Switcher text page) */
44134@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44135 * These copies are pretty cheap, so we do them unconditionally: */
44136 /* Save the current Host top-level page directory.
44137 */
44138+
44139+#ifdef CONFIG_PAX_PER_CPU_PGD
44140+ pages->state.host_cr3 = read_cr3();
44141+#else
44142 pages->state.host_cr3 = __pa(current->mm->pgd);
44143+#endif
44144+
44145 /*
44146 * Set up the Guest's page tables to see this CPU's pages (and no
44147 * other CPU's pages).
44148@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44149 * compiled-in switcher code and the high-mapped copy we just made.
44150 */
44151 for (i = 0; i < IDT_ENTRIES; i++)
44152- default_idt_entries[i] += switcher_offset();
44153+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44154
44155 /*
44156 * Set up the Switcher's per-cpu areas.
44157@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44158 * it will be undisturbed when we switch. To change %cs and jump we
44159 * need this structure to feed to Intel's "lcall" instruction.
44160 */
44161- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44162+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44163 lguest_entry.segment = LGUEST_CS;
44164
44165 /*
44166diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44167index 40634b0..4f5855e 100644
44168--- a/drivers/lguest/x86/switcher_32.S
44169+++ b/drivers/lguest/x86/switcher_32.S
44170@@ -87,6 +87,7 @@
44171 #include <asm/page.h>
44172 #include <asm/segment.h>
44173 #include <asm/lguest.h>
44174+#include <asm/processor-flags.h>
44175
44176 // We mark the start of the code to copy
44177 // It's placed in .text tho it's never run here
44178@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44179 // Changes type when we load it: damn Intel!
44180 // For after we switch over our page tables
44181 // That entry will be read-only: we'd crash.
44182+
44183+#ifdef CONFIG_PAX_KERNEXEC
44184+ mov %cr0, %edx
44185+ xor $X86_CR0_WP, %edx
44186+ mov %edx, %cr0
44187+#endif
44188+
44189 movl $(GDT_ENTRY_TSS*8), %edx
44190 ltr %dx
44191
44192@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44193 // Let's clear it again for our return.
44194 // The GDT descriptor of the Host
44195 // Points to the table after two "size" bytes
44196- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44197+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44198 // Clear "used" from type field (byte 5, bit 2)
44199- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44200+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44201+
44202+#ifdef CONFIG_PAX_KERNEXEC
44203+ mov %cr0, %eax
44204+ xor $X86_CR0_WP, %eax
44205+ mov %eax, %cr0
44206+#endif
44207
44208 // Once our page table's switched, the Guest is live!
44209 // The Host fades as we run this final step.
44210@@ -295,13 +309,12 @@ deliver_to_host:
44211 // I consulted gcc, and it gave
44212 // These instructions, which I gladly credit:
44213 leal (%edx,%ebx,8), %eax
44214- movzwl (%eax),%edx
44215- movl 4(%eax), %eax
44216- xorw %ax, %ax
44217- orl %eax, %edx
44218+ movl 4(%eax), %edx
44219+ movw (%eax), %dx
44220 // Now the address of the handler's in %edx
44221 // We call it now: its "iret" drops us home.
44222- jmp *%edx
44223+ ljmp $__KERNEL_CS, $1f
44224+1: jmp *%edx
44225
44226 // Every interrupt can come to us here
44227 // But we must truly tell each apart.
44228diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44229index a08e3ee..df8ade2 100644
44230--- a/drivers/md/bcache/closure.h
44231+++ b/drivers/md/bcache/closure.h
44232@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44233 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44234 struct workqueue_struct *wq)
44235 {
44236- BUG_ON(object_is_on_stack(cl));
44237+ BUG_ON(object_starts_on_stack(cl));
44238 closure_set_ip(cl);
44239 cl->fn = fn;
44240 cl->wq = wq;
44241diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44242index 1695ee5..89f18ab 100644
44243--- a/drivers/md/bitmap.c
44244+++ b/drivers/md/bitmap.c
44245@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44246 chunk_kb ? "KB" : "B");
44247 if (bitmap->storage.file) {
44248 seq_printf(seq, ", file: ");
44249- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44250+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44251 }
44252
44253 seq_printf(seq, "\n");
44254diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44255index 73f791b..8c5d3ac 100644
44256--- a/drivers/md/dm-ioctl.c
44257+++ b/drivers/md/dm-ioctl.c
44258@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44259 cmd == DM_LIST_VERSIONS_CMD)
44260 return 0;
44261
44262- if ((cmd == DM_DEV_CREATE_CMD)) {
44263+ if (cmd == DM_DEV_CREATE_CMD) {
44264 if (!*param->name) {
44265 DMWARN("name not supplied when creating device");
44266 return -EINVAL;
44267diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44268index 089d627..ef7352e 100644
44269--- a/drivers/md/dm-raid1.c
44270+++ b/drivers/md/dm-raid1.c
44271@@ -40,7 +40,7 @@ enum dm_raid1_error {
44272
44273 struct mirror {
44274 struct mirror_set *ms;
44275- atomic_t error_count;
44276+ atomic_unchecked_t error_count;
44277 unsigned long error_type;
44278 struct dm_dev *dev;
44279 sector_t offset;
44280@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44281 struct mirror *m;
44282
44283 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44284- if (!atomic_read(&m->error_count))
44285+ if (!atomic_read_unchecked(&m->error_count))
44286 return m;
44287
44288 return NULL;
44289@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44290 * simple way to tell if a device has encountered
44291 * errors.
44292 */
44293- atomic_inc(&m->error_count);
44294+ atomic_inc_unchecked(&m->error_count);
44295
44296 if (test_and_set_bit(error_type, &m->error_type))
44297 return;
44298@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44299 struct mirror *m = get_default_mirror(ms);
44300
44301 do {
44302- if (likely(!atomic_read(&m->error_count)))
44303+ if (likely(!atomic_read_unchecked(&m->error_count)))
44304 return m;
44305
44306 if (m-- == ms->mirror)
44307@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44308 {
44309 struct mirror *default_mirror = get_default_mirror(m->ms);
44310
44311- return !atomic_read(&default_mirror->error_count);
44312+ return !atomic_read_unchecked(&default_mirror->error_count);
44313 }
44314
44315 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44316@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44317 */
44318 if (likely(region_in_sync(ms, region, 1)))
44319 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44320- else if (m && atomic_read(&m->error_count))
44321+ else if (m && atomic_read_unchecked(&m->error_count))
44322 m = NULL;
44323
44324 if (likely(m))
44325@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44326 }
44327
44328 ms->mirror[mirror].ms = ms;
44329- atomic_set(&(ms->mirror[mirror].error_count), 0);
44330+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44331 ms->mirror[mirror].error_type = 0;
44332 ms->mirror[mirror].offset = offset;
44333
44334@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44335 */
44336 static char device_status_char(struct mirror *m)
44337 {
44338- if (!atomic_read(&(m->error_count)))
44339+ if (!atomic_read_unchecked(&(m->error_count)))
44340 return 'A';
44341
44342 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44343diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44344index f478a4c..4b8e5ef 100644
44345--- a/drivers/md/dm-stats.c
44346+++ b/drivers/md/dm-stats.c
44347@@ -382,7 +382,7 @@ do_sync_free:
44348 synchronize_rcu_expedited();
44349 dm_stat_free(&s->rcu_head);
44350 } else {
44351- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44352+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44353 call_rcu(&s->rcu_head, dm_stat_free);
44354 }
44355 return 0;
44356@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44357 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44358 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44359 ));
44360- ACCESS_ONCE(last->last_sector) = end_sector;
44361- ACCESS_ONCE(last->last_rw) = bi_rw;
44362+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44363+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44364 }
44365
44366 rcu_read_lock();
44367diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44368index f8b37d4..5c5cafd 100644
44369--- a/drivers/md/dm-stripe.c
44370+++ b/drivers/md/dm-stripe.c
44371@@ -21,7 +21,7 @@ struct stripe {
44372 struct dm_dev *dev;
44373 sector_t physical_start;
44374
44375- atomic_t error_count;
44376+ atomic_unchecked_t error_count;
44377 };
44378
44379 struct stripe_c {
44380@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44381 kfree(sc);
44382 return r;
44383 }
44384- atomic_set(&(sc->stripe[i].error_count), 0);
44385+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44386 }
44387
44388 ti->private = sc;
44389@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44390 DMEMIT("%d ", sc->stripes);
44391 for (i = 0; i < sc->stripes; i++) {
44392 DMEMIT("%s ", sc->stripe[i].dev->name);
44393- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44394+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44395 'D' : 'A';
44396 }
44397 buffer[i] = '\0';
44398@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44399 */
44400 for (i = 0; i < sc->stripes; i++)
44401 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44402- atomic_inc(&(sc->stripe[i].error_count));
44403- if (atomic_read(&(sc->stripe[i].error_count)) <
44404+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44405+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44406 DM_IO_ERROR_THRESHOLD)
44407 schedule_work(&sc->trigger_event);
44408 }
44409diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44410index 3afae9e..4e1c954 100644
44411--- a/drivers/md/dm-table.c
44412+++ b/drivers/md/dm-table.c
44413@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44414 if (!dev_size)
44415 return 0;
44416
44417- if ((start >= dev_size) || (start + len > dev_size)) {
44418+ if ((start >= dev_size) || (len > dev_size - start)) {
44419 DMWARN("%s: %s too small for target: "
44420 "start=%llu, len=%llu, dev_size=%llu",
44421 dm_device_name(ti->table->md), bdevname(bdev, b),
44422diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44423index 43adbb8..7b34305 100644
44424--- a/drivers/md/dm-thin-metadata.c
44425+++ b/drivers/md/dm-thin-metadata.c
44426@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44427 {
44428 pmd->info.tm = pmd->tm;
44429 pmd->info.levels = 2;
44430- pmd->info.value_type.context = pmd->data_sm;
44431+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44432 pmd->info.value_type.size = sizeof(__le64);
44433 pmd->info.value_type.inc = data_block_inc;
44434 pmd->info.value_type.dec = data_block_dec;
44435@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44436
44437 pmd->bl_info.tm = pmd->tm;
44438 pmd->bl_info.levels = 1;
44439- pmd->bl_info.value_type.context = pmd->data_sm;
44440+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44441 pmd->bl_info.value_type.size = sizeof(__le64);
44442 pmd->bl_info.value_type.inc = data_block_inc;
44443 pmd->bl_info.value_type.dec = data_block_dec;
44444diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44445index b71c600..d0b85b3 100644
44446--- a/drivers/md/dm.c
44447+++ b/drivers/md/dm.c
44448@@ -185,9 +185,9 @@ struct mapped_device {
44449 /*
44450 * Event handling.
44451 */
44452- atomic_t event_nr;
44453+ atomic_unchecked_t event_nr;
44454 wait_queue_head_t eventq;
44455- atomic_t uevent_seq;
44456+ atomic_unchecked_t uevent_seq;
44457 struct list_head uevent_list;
44458 spinlock_t uevent_lock; /* Protect access to uevent_list */
44459
44460@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44461 spin_lock_init(&md->deferred_lock);
44462 atomic_set(&md->holders, 1);
44463 atomic_set(&md->open_count, 0);
44464- atomic_set(&md->event_nr, 0);
44465- atomic_set(&md->uevent_seq, 0);
44466+ atomic_set_unchecked(&md->event_nr, 0);
44467+ atomic_set_unchecked(&md->uevent_seq, 0);
44468 INIT_LIST_HEAD(&md->uevent_list);
44469 INIT_LIST_HEAD(&md->table_devices);
44470 spin_lock_init(&md->uevent_lock);
44471@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44472
44473 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44474
44475- atomic_inc(&md->event_nr);
44476+ atomic_inc_unchecked(&md->event_nr);
44477 wake_up(&md->eventq);
44478 }
44479
44480@@ -3055,18 +3055,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44481
44482 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44483 {
44484- return atomic_add_return(1, &md->uevent_seq);
44485+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44486 }
44487
44488 uint32_t dm_get_event_nr(struct mapped_device *md)
44489 {
44490- return atomic_read(&md->event_nr);
44491+ return atomic_read_unchecked(&md->event_nr);
44492 }
44493
44494 int dm_wait_event(struct mapped_device *md, int event_nr)
44495 {
44496 return wait_event_interruptible(md->eventq,
44497- (event_nr != atomic_read(&md->event_nr)));
44498+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44499 }
44500
44501 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44502diff --git a/drivers/md/md.c b/drivers/md/md.c
44503index 709755f..5bc3fa4 100644
44504--- a/drivers/md/md.c
44505+++ b/drivers/md/md.c
44506@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44507 * start build, activate spare
44508 */
44509 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44510-static atomic_t md_event_count;
44511+static atomic_unchecked_t md_event_count;
44512 void md_new_event(struct mddev *mddev)
44513 {
44514- atomic_inc(&md_event_count);
44515+ atomic_inc_unchecked(&md_event_count);
44516 wake_up(&md_event_waiters);
44517 }
44518 EXPORT_SYMBOL_GPL(md_new_event);
44519@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44520 */
44521 static void md_new_event_inintr(struct mddev *mddev)
44522 {
44523- atomic_inc(&md_event_count);
44524+ atomic_inc_unchecked(&md_event_count);
44525 wake_up(&md_event_waiters);
44526 }
44527
44528@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44529 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44530 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44531 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44532- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44533+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44534
44535 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44536 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44537@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44538 else
44539 sb->resync_offset = cpu_to_le64(0);
44540
44541- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44542+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44543
44544 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44545 sb->size = cpu_to_le64(mddev->dev_sectors);
44546@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44547 static ssize_t
44548 errors_show(struct md_rdev *rdev, char *page)
44549 {
44550- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44551+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44552 }
44553
44554 static ssize_t
44555@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44556 char *e;
44557 unsigned long n = simple_strtoul(buf, &e, 10);
44558 if (*buf && (*e == 0 || *e == '\n')) {
44559- atomic_set(&rdev->corrected_errors, n);
44560+ atomic_set_unchecked(&rdev->corrected_errors, n);
44561 return len;
44562 }
44563 return -EINVAL;
44564@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44565 rdev->sb_loaded = 0;
44566 rdev->bb_page = NULL;
44567 atomic_set(&rdev->nr_pending, 0);
44568- atomic_set(&rdev->read_errors, 0);
44569- atomic_set(&rdev->corrected_errors, 0);
44570+ atomic_set_unchecked(&rdev->read_errors, 0);
44571+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44572
44573 INIT_LIST_HEAD(&rdev->same_set);
44574 init_waitqueue_head(&rdev->blocked_wait);
44575@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44576
44577 spin_unlock(&pers_lock);
44578 seq_printf(seq, "\n");
44579- seq->poll_event = atomic_read(&md_event_count);
44580+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44581 return 0;
44582 }
44583 if (v == (void*)2) {
44584@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44585 return error;
44586
44587 seq = file->private_data;
44588- seq->poll_event = atomic_read(&md_event_count);
44589+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44590 return error;
44591 }
44592
44593@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44594 /* always allow read */
44595 mask = POLLIN | POLLRDNORM;
44596
44597- if (seq->poll_event != atomic_read(&md_event_count))
44598+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44599 mask |= POLLERR | POLLPRI;
44600 return mask;
44601 }
44602@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44603 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44604 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44605 (int)part_stat_read(&disk->part0, sectors[1]) -
44606- atomic_read(&disk->sync_io);
44607+ atomic_read_unchecked(&disk->sync_io);
44608 /* sync IO will cause sync_io to increase before the disk_stats
44609 * as sync_io is counted when a request starts, and
44610 * disk_stats is counted when it completes.
44611diff --git a/drivers/md/md.h b/drivers/md/md.h
44612index 03cec5b..0a658c1 100644
44613--- a/drivers/md/md.h
44614+++ b/drivers/md/md.h
44615@@ -94,13 +94,13 @@ struct md_rdev {
44616 * only maintained for arrays that
44617 * support hot removal
44618 */
44619- atomic_t read_errors; /* number of consecutive read errors that
44620+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44621 * we have tried to ignore.
44622 */
44623 struct timespec last_read_error; /* monotonic time since our
44624 * last read error
44625 */
44626- atomic_t corrected_errors; /* number of corrected read errors,
44627+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44628 * for reporting to userspace and storing
44629 * in superblock.
44630 */
44631@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44632
44633 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44634 {
44635- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44636+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44637 }
44638
44639 struct md_personality
44640diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44641index e8a9042..35bd145 100644
44642--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44643+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44644@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44645 * Flick into a mode where all blocks get allocated in the new area.
44646 */
44647 smm->begin = old_len;
44648- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44649+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44650
44651 /*
44652 * Extend.
44653@@ -714,7 +714,7 @@ out:
44654 /*
44655 * Switch back to normal behaviour.
44656 */
44657- memcpy(sm, &ops, sizeof(*sm));
44658+ memcpy((void *)sm, &ops, sizeof(*sm));
44659 return r;
44660 }
44661
44662diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44663index 3e6d115..ffecdeb 100644
44664--- a/drivers/md/persistent-data/dm-space-map.h
44665+++ b/drivers/md/persistent-data/dm-space-map.h
44666@@ -71,6 +71,7 @@ struct dm_space_map {
44667 dm_sm_threshold_fn fn,
44668 void *context);
44669 };
44670+typedef struct dm_space_map __no_const dm_space_map_no_const;
44671
44672 /*----------------------------------------------------------------*/
44673
44674diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44675index 2f2f38f..f6a8ebe 100644
44676--- a/drivers/md/raid1.c
44677+++ b/drivers/md/raid1.c
44678@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44679 if (r1_sync_page_io(rdev, sect, s,
44680 bio->bi_io_vec[idx].bv_page,
44681 READ) != 0)
44682- atomic_add(s, &rdev->corrected_errors);
44683+ atomic_add_unchecked(s, &rdev->corrected_errors);
44684 }
44685 sectors -= s;
44686 sect += s;
44687@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44688 !test_bit(Faulty, &rdev->flags)) {
44689 if (r1_sync_page_io(rdev, sect, s,
44690 conf->tmppage, READ)) {
44691- atomic_add(s, &rdev->corrected_errors);
44692+ atomic_add_unchecked(s, &rdev->corrected_errors);
44693 printk(KERN_INFO
44694 "md/raid1:%s: read error corrected "
44695 "(%d sectors at %llu on %s)\n",
44696diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44697index 32e282f..5cec803 100644
44698--- a/drivers/md/raid10.c
44699+++ b/drivers/md/raid10.c
44700@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44701 /* The write handler will notice the lack of
44702 * R10BIO_Uptodate and record any errors etc
44703 */
44704- atomic_add(r10_bio->sectors,
44705+ atomic_add_unchecked(r10_bio->sectors,
44706 &conf->mirrors[d].rdev->corrected_errors);
44707
44708 /* for reconstruct, we always reschedule after a read.
44709@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44710 {
44711 struct timespec cur_time_mon;
44712 unsigned long hours_since_last;
44713- unsigned int read_errors = atomic_read(&rdev->read_errors);
44714+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44715
44716 ktime_get_ts(&cur_time_mon);
44717
44718@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44719 * overflowing the shift of read_errors by hours_since_last.
44720 */
44721 if (hours_since_last >= 8 * sizeof(read_errors))
44722- atomic_set(&rdev->read_errors, 0);
44723+ atomic_set_unchecked(&rdev->read_errors, 0);
44724 else
44725- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44726+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44727 }
44728
44729 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44730@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44731 return;
44732
44733 check_decay_read_errors(mddev, rdev);
44734- atomic_inc(&rdev->read_errors);
44735- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44736+ atomic_inc_unchecked(&rdev->read_errors);
44737+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44738 char b[BDEVNAME_SIZE];
44739 bdevname(rdev->bdev, b);
44740
44741@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44742 "md/raid10:%s: %s: Raid device exceeded "
44743 "read_error threshold [cur %d:max %d]\n",
44744 mdname(mddev), b,
44745- atomic_read(&rdev->read_errors), max_read_errors);
44746+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44747 printk(KERN_NOTICE
44748 "md/raid10:%s: %s: Failing raid device\n",
44749 mdname(mddev), b);
44750@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44751 sect +
44752 choose_data_offset(r10_bio, rdev)),
44753 bdevname(rdev->bdev, b));
44754- atomic_add(s, &rdev->corrected_errors);
44755+ atomic_add_unchecked(s, &rdev->corrected_errors);
44756 }
44757
44758 rdev_dec_pending(rdev, mddev);
44759diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44760index 8577cc7..5779d5b 100644
44761--- a/drivers/md/raid5.c
44762+++ b/drivers/md/raid5.c
44763@@ -950,23 +950,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
44764 struct bio_vec bvl;
44765 struct bvec_iter iter;
44766 struct page *bio_page;
44767- int page_offset;
44768+ s64 page_offset;
44769 struct async_submit_ctl submit;
44770 enum async_tx_flags flags = 0;
44771
44772 if (bio->bi_iter.bi_sector >= sector)
44773- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
44774+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
44775 else
44776- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
44777+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
44778
44779 if (frombio)
44780 flags |= ASYNC_TX_FENCE;
44781 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
44782
44783 bio_for_each_segment(bvl, bio, iter) {
44784- int len = bvl.bv_len;
44785- int clen;
44786- int b_offset = 0;
44787+ s64 len = bvl.bv_len;
44788+ s64 clen;
44789+ s64 b_offset = 0;
44790
44791 if (page_offset < 0) {
44792 b_offset = -page_offset;
44793@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44794 return 1;
44795 }
44796
44797+#ifdef CONFIG_GRKERNSEC_HIDESYM
44798+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44799+#endif
44800+
44801 static int grow_stripes(struct r5conf *conf, int num)
44802 {
44803 struct kmem_cache *sc;
44804@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44805 "raid%d-%s", conf->level, mdname(conf->mddev));
44806 else
44807 sprintf(conf->cache_name[0],
44808+#ifdef CONFIG_GRKERNSEC_HIDESYM
44809+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44810+#else
44811 "raid%d-%p", conf->level, conf->mddev);
44812+#endif
44813 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44814
44815 conf->active_name = 0;
44816@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44817 mdname(conf->mddev), STRIPE_SECTORS,
44818 (unsigned long long)s,
44819 bdevname(rdev->bdev, b));
44820- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44821+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44822 clear_bit(R5_ReadError, &sh->dev[i].flags);
44823 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44824 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44825 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44826
44827- if (atomic_read(&rdev->read_errors))
44828- atomic_set(&rdev->read_errors, 0);
44829+ if (atomic_read_unchecked(&rdev->read_errors))
44830+ atomic_set_unchecked(&rdev->read_errors, 0);
44831 } else {
44832 const char *bdn = bdevname(rdev->bdev, b);
44833 int retry = 0;
44834 int set_bad = 0;
44835
44836 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44837- atomic_inc(&rdev->read_errors);
44838+ atomic_inc_unchecked(&rdev->read_errors);
44839 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44840 printk_ratelimited(
44841 KERN_WARNING
44842@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44843 mdname(conf->mddev),
44844 (unsigned long long)s,
44845 bdn);
44846- } else if (atomic_read(&rdev->read_errors)
44847+ } else if (atomic_read_unchecked(&rdev->read_errors)
44848 > conf->max_nr_stripes)
44849 printk(KERN_WARNING
44850 "md/raid:%s: Too many read errors, failing device %s.\n",
44851diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44852index 983db75..ef9248c 100644
44853--- a/drivers/media/dvb-core/dvbdev.c
44854+++ b/drivers/media/dvb-core/dvbdev.c
44855@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44856 const struct dvb_device *template, void *priv, int type)
44857 {
44858 struct dvb_device *dvbdev;
44859- struct file_operations *dvbdevfops;
44860+ file_operations_no_const *dvbdevfops;
44861 struct device *clsdev;
44862 int minor;
44863 int id;
44864diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44865index 6ad22b6..6e90e2a 100644
44866--- a/drivers/media/dvb-frontends/af9033.h
44867+++ b/drivers/media/dvb-frontends/af9033.h
44868@@ -96,6 +96,6 @@ struct af9033_ops {
44869 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44870 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44871 int onoff);
44872-};
44873+} __no_const;
44874
44875 #endif /* AF9033_H */
44876diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44877index 9b6c3bb..baeb5c7 100644
44878--- a/drivers/media/dvb-frontends/dib3000.h
44879+++ b/drivers/media/dvb-frontends/dib3000.h
44880@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44881 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44882 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44883 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44884-};
44885+} __no_const;
44886
44887 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44888 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44889diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44890index 1fea0e9..321ce8f 100644
44891--- a/drivers/media/dvb-frontends/dib7000p.h
44892+++ b/drivers/media/dvb-frontends/dib7000p.h
44893@@ -64,7 +64,7 @@ struct dib7000p_ops {
44894 int (*get_adc_power)(struct dvb_frontend *fe);
44895 int (*slave_reset)(struct dvb_frontend *fe);
44896 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44897-};
44898+} __no_const;
44899
44900 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44901 void *dib7000p_attach(struct dib7000p_ops *ops);
44902diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44903index 84cc103..5780c54 100644
44904--- a/drivers/media/dvb-frontends/dib8000.h
44905+++ b/drivers/media/dvb-frontends/dib8000.h
44906@@ -61,7 +61,7 @@ struct dib8000_ops {
44907 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44908 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44909 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44910-};
44911+} __no_const;
44912
44913 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44914 void *dib8000_attach(struct dib8000_ops *ops);
44915diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44916index 860c98fc..497fa25 100644
44917--- a/drivers/media/pci/cx88/cx88-video.c
44918+++ b/drivers/media/pci/cx88/cx88-video.c
44919@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44920
44921 /* ------------------------------------------------------------------ */
44922
44923-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44924-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44925-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44926+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44927+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44928+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44929
44930 module_param_array(video_nr, int, NULL, 0444);
44931 module_param_array(vbi_nr, int, NULL, 0444);
44932diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44933index 802642d..5534900 100644
44934--- a/drivers/media/pci/ivtv/ivtv-driver.c
44935+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44936@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44937 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44938
44939 /* ivtv instance counter */
44940-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44941+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44942
44943 /* Parameter declarations */
44944 static int cardtype[IVTV_MAX_CARDS];
44945diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44946index 8cbe6b4..ea3601c 100644
44947--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44948+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44949@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44950
44951 static int solo_sysfs_init(struct solo_dev *solo_dev)
44952 {
44953- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44954+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44955 struct device *dev = &solo_dev->dev;
44956 const char *driver;
44957 int i;
44958diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44959index c7141f2..5301fec 100644
44960--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44961+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44962@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44963
44964 int solo_g723_init(struct solo_dev *solo_dev)
44965 {
44966- static struct snd_device_ops ops = { NULL };
44967+ static struct snd_device_ops ops = { };
44968 struct snd_card *card;
44969 struct snd_kcontrol_new kctl;
44970 char name[32];
44971diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44972index 8c84846..27b4f83 100644
44973--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44974+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44975@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44976
44977 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44978 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44979- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44980+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44981 if (p2m_id < 0)
44982 p2m_id = -p2m_id;
44983 }
44984diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44985index bd8edfa..e82ed85 100644
44986--- a/drivers/media/pci/solo6x10/solo6x10.h
44987+++ b/drivers/media/pci/solo6x10/solo6x10.h
44988@@ -220,7 +220,7 @@ struct solo_dev {
44989
44990 /* P2M DMA Engine */
44991 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44992- atomic_t p2m_count;
44993+ atomic_unchecked_t p2m_count;
44994 int p2m_jiffies;
44995 unsigned int p2m_timeouts;
44996
44997diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44998index c135165..dc69499 100644
44999--- a/drivers/media/pci/tw68/tw68-core.c
45000+++ b/drivers/media/pci/tw68/tw68-core.c
45001@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45002 module_param_array(card, int, NULL, 0444);
45003 MODULE_PARM_DESC(card, "card type");
45004
45005-static atomic_t tw68_instance = ATOMIC_INIT(0);
45006+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45007
45008 /* ------------------------------------------------------------------ */
45009
45010diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45011index ba2d8f9..1566684 100644
45012--- a/drivers/media/platform/omap/omap_vout.c
45013+++ b/drivers/media/platform/omap/omap_vout.c
45014@@ -63,7 +63,6 @@ enum omap_vout_channels {
45015 OMAP_VIDEO2,
45016 };
45017
45018-static struct videobuf_queue_ops video_vbq_ops;
45019 /* Variables configurable through module params*/
45020 static u32 video1_numbuffers = 3;
45021 static u32 video2_numbuffers = 3;
45022@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45023 {
45024 struct videobuf_queue *q;
45025 struct omap_vout_device *vout = NULL;
45026+ static struct videobuf_queue_ops video_vbq_ops = {
45027+ .buf_setup = omap_vout_buffer_setup,
45028+ .buf_prepare = omap_vout_buffer_prepare,
45029+ .buf_release = omap_vout_buffer_release,
45030+ .buf_queue = omap_vout_buffer_queue,
45031+ };
45032
45033 vout = video_drvdata(file);
45034 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45035@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
45036 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45037
45038 q = &vout->vbq;
45039- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45040- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45041- video_vbq_ops.buf_release = omap_vout_buffer_release;
45042- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45043 spin_lock_init(&vout->vbq_lock);
45044
45045 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45046diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45047index fb2acc5..a2fcbdc4 100644
45048--- a/drivers/media/platform/s5p-tv/mixer.h
45049+++ b/drivers/media/platform/s5p-tv/mixer.h
45050@@ -156,7 +156,7 @@ struct mxr_layer {
45051 /** layer index (unique identifier) */
45052 int idx;
45053 /** callbacks for layer methods */
45054- struct mxr_layer_ops ops;
45055+ struct mxr_layer_ops *ops;
45056 /** format array */
45057 const struct mxr_format **fmt_array;
45058 /** size of format array */
45059diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45060index 74344c7..a39e70e 100644
45061--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45062+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45063@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45064 {
45065 struct mxr_layer *layer;
45066 int ret;
45067- struct mxr_layer_ops ops = {
45068+ static struct mxr_layer_ops ops = {
45069 .release = mxr_graph_layer_release,
45070 .buffer_set = mxr_graph_buffer_set,
45071 .stream_set = mxr_graph_stream_set,
45072diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45073index b713403..53cb5ad 100644
45074--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45075+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45076@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45077 layer->update_buf = next;
45078 }
45079
45080- layer->ops.buffer_set(layer, layer->update_buf);
45081+ layer->ops->buffer_set(layer, layer->update_buf);
45082
45083 if (done && done != layer->shadow_buf)
45084 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45085diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45086index b4d2696..91df48e 100644
45087--- a/drivers/media/platform/s5p-tv/mixer_video.c
45088+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45089@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45090 layer->geo.src.height = layer->geo.src.full_height;
45091
45092 mxr_geometry_dump(mdev, &layer->geo);
45093- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45094+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45095 mxr_geometry_dump(mdev, &layer->geo);
45096 }
45097
45098@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45099 layer->geo.dst.full_width = mbus_fmt.width;
45100 layer->geo.dst.full_height = mbus_fmt.height;
45101 layer->geo.dst.field = mbus_fmt.field;
45102- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45103+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45104
45105 mxr_geometry_dump(mdev, &layer->geo);
45106 }
45107@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45108 /* set source size to highest accepted value */
45109 geo->src.full_width = max(geo->dst.full_width, pix->width);
45110 geo->src.full_height = max(geo->dst.full_height, pix->height);
45111- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45112+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45113 mxr_geometry_dump(mdev, &layer->geo);
45114 /* set cropping to total visible screen */
45115 geo->src.width = pix->width;
45116@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45117 geo->src.x_offset = 0;
45118 geo->src.y_offset = 0;
45119 /* assure consistency of geometry */
45120- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45121+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45122 mxr_geometry_dump(mdev, &layer->geo);
45123 /* set full size to lowest possible value */
45124 geo->src.full_width = 0;
45125 geo->src.full_height = 0;
45126- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45127+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45128 mxr_geometry_dump(mdev, &layer->geo);
45129
45130 /* returning results */
45131@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45132 target->width = s->r.width;
45133 target->height = s->r.height;
45134
45135- layer->ops.fix_geometry(layer, stage, s->flags);
45136+ layer->ops->fix_geometry(layer, stage, s->flags);
45137
45138 /* retrieve update selection rectangle */
45139 res.left = target->x_offset;
45140@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45141 mxr_output_get(mdev);
45142
45143 mxr_layer_update_output(layer);
45144- layer->ops.format_set(layer);
45145+ layer->ops->format_set(layer);
45146 /* enabling layer in hardware */
45147 spin_lock_irqsave(&layer->enq_slock, flags);
45148 layer->state = MXR_LAYER_STREAMING;
45149 spin_unlock_irqrestore(&layer->enq_slock, flags);
45150
45151- layer->ops.stream_set(layer, MXR_ENABLE);
45152+ layer->ops->stream_set(layer, MXR_ENABLE);
45153 mxr_streamer_get(mdev);
45154
45155 return 0;
45156@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
45157 spin_unlock_irqrestore(&layer->enq_slock, flags);
45158
45159 /* disabling layer in hardware */
45160- layer->ops.stream_set(layer, MXR_DISABLE);
45161+ layer->ops->stream_set(layer, MXR_DISABLE);
45162 /* remove one streamer */
45163 mxr_streamer_put(mdev);
45164 /* allow changes in output configuration */
45165@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45166
45167 void mxr_layer_release(struct mxr_layer *layer)
45168 {
45169- if (layer->ops.release)
45170- layer->ops.release(layer);
45171+ if (layer->ops->release)
45172+ layer->ops->release(layer);
45173 }
45174
45175 void mxr_base_layer_release(struct mxr_layer *layer)
45176@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45177
45178 layer->mdev = mdev;
45179 layer->idx = idx;
45180- layer->ops = *ops;
45181+ layer->ops = ops;
45182
45183 spin_lock_init(&layer->enq_slock);
45184 INIT_LIST_HEAD(&layer->enq_list);
45185diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45186index c9388c4..ce71ece 100644
45187--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45188+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45189@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45190 {
45191 struct mxr_layer *layer;
45192 int ret;
45193- struct mxr_layer_ops ops = {
45194+ static struct mxr_layer_ops ops = {
45195 .release = mxr_vp_layer_release,
45196 .buffer_set = mxr_vp_buffer_set,
45197 .stream_set = mxr_vp_stream_set,
45198diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45199index 82affae..42833ec 100644
45200--- a/drivers/media/radio/radio-cadet.c
45201+++ b/drivers/media/radio/radio-cadet.c
45202@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45203 unsigned char readbuf[RDS_BUFFER];
45204 int i = 0;
45205
45206+ if (count > RDS_BUFFER)
45207+ return -EFAULT;
45208 mutex_lock(&dev->lock);
45209 if (dev->rdsstat == 0)
45210 cadet_start_rds(dev);
45211@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45212 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45213 mutex_unlock(&dev->lock);
45214
45215- if (i && copy_to_user(data, readbuf, i))
45216- return -EFAULT;
45217+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45218+ i = -EFAULT;
45219+
45220 return i;
45221 }
45222
45223diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45224index 5236035..c622c74 100644
45225--- a/drivers/media/radio/radio-maxiradio.c
45226+++ b/drivers/media/radio/radio-maxiradio.c
45227@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45228 /* TEA5757 pin mappings */
45229 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45230
45231-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45232+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45233
45234 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45235 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45236diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45237index 050b3bb..79f62b9 100644
45238--- a/drivers/media/radio/radio-shark.c
45239+++ b/drivers/media/radio/radio-shark.c
45240@@ -79,7 +79,7 @@ struct shark_device {
45241 u32 last_val;
45242 };
45243
45244-static atomic_t shark_instance = ATOMIC_INIT(0);
45245+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45246
45247 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45248 {
45249diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45250index 8654e0d..0608a64 100644
45251--- a/drivers/media/radio/radio-shark2.c
45252+++ b/drivers/media/radio/radio-shark2.c
45253@@ -74,7 +74,7 @@ struct shark_device {
45254 u8 *transfer_buffer;
45255 };
45256
45257-static atomic_t shark_instance = ATOMIC_INIT(0);
45258+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45259
45260 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45261 {
45262diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45263index dccf586..d5db411 100644
45264--- a/drivers/media/radio/radio-si476x.c
45265+++ b/drivers/media/radio/radio-si476x.c
45266@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45267 struct si476x_radio *radio;
45268 struct v4l2_ctrl *ctrl;
45269
45270- static atomic_t instance = ATOMIC_INIT(0);
45271+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45272
45273 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45274 if (!radio)
45275diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45276index 704397f..4d05977 100644
45277--- a/drivers/media/radio/wl128x/fmdrv_common.c
45278+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45279@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45280 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45281
45282 /* Radio Nr */
45283-static u32 radio_nr = -1;
45284+static int radio_nr = -1;
45285 module_param(radio_nr, int, 0444);
45286 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45287
45288diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45289index 9fd1527..8927230 100644
45290--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45291+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45292@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45293
45294 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45295 {
45296- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45297- char result[64];
45298- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45299- sizeof(result), 0);
45300+ char *buf;
45301+ char *result;
45302+ int retval;
45303+
45304+ buf = kmalloc(2, GFP_KERNEL);
45305+ if (buf == NULL)
45306+ return -ENOMEM;
45307+ result = kmalloc(64, GFP_KERNEL);
45308+ if (result == NULL) {
45309+ kfree(buf);
45310+ return -ENOMEM;
45311+ }
45312+
45313+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45314+ buf[1] = enable ? 1 : 0;
45315+
45316+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45317+
45318+ kfree(buf);
45319+ kfree(result);
45320+ return retval;
45321 }
45322
45323 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45324 {
45325- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45326- char state[3];
45327- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45328+ char *buf;
45329+ char *state;
45330+ int retval;
45331+
45332+ buf = kmalloc(2, GFP_KERNEL);
45333+ if (buf == NULL)
45334+ return -ENOMEM;
45335+ state = kmalloc(3, GFP_KERNEL);
45336+ if (state == NULL) {
45337+ kfree(buf);
45338+ return -ENOMEM;
45339+ }
45340+
45341+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45342+ buf[1] = enable ? 1 : 0;
45343+
45344+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45345+
45346+ kfree(buf);
45347+ kfree(state);
45348+ return retval;
45349 }
45350
45351 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45352 {
45353- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45354- char state[3];
45355+ char *query;
45356+ char *state;
45357 int ret;
45358+ query = kmalloc(1, GFP_KERNEL);
45359+ if (query == NULL)
45360+ return -ENOMEM;
45361+ state = kmalloc(3, GFP_KERNEL);
45362+ if (state == NULL) {
45363+ kfree(query);
45364+ return -ENOMEM;
45365+ }
45366+
45367+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45368
45369 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45370
45371- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45372- sizeof(state), 0);
45373+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45374 if (ret < 0) {
45375 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45376 "state info\n");
45377@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45378
45379 /* Copy this pointer as we are gonna need it in the release phase */
45380 cinergyt2_usb_device = adap->dev;
45381-
45382+ kfree(query);
45383+ kfree(state);
45384 return 0;
45385 }
45386
45387@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45388 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45389 {
45390 struct cinergyt2_state *st = d->priv;
45391- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45392+ u8 *key, *cmd;
45393 int i;
45394
45395+ cmd = kmalloc(1, GFP_KERNEL);
45396+ if (cmd == NULL)
45397+ return -EINVAL;
45398+ key = kzalloc(5, GFP_KERNEL);
45399+ if (key == NULL) {
45400+ kfree(cmd);
45401+ return -EINVAL;
45402+ }
45403+
45404+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45405+
45406 *state = REMOTE_NO_KEY_PRESSED;
45407
45408- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45409+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45410 if (key[4] == 0xff) {
45411 /* key repeat */
45412 st->rc_counter++;
45413@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45414 *event = d->last_event;
45415 deb_rc("repeat key, event %x\n",
45416 *event);
45417- return 0;
45418+ goto out;
45419 }
45420 }
45421 deb_rc("repeated key (non repeatable)\n");
45422 }
45423- return 0;
45424+ goto out;
45425 }
45426
45427 /* hack to pass checksum on the custom field */
45428@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45429
45430 deb_rc("key: %*ph\n", 5, key);
45431 }
45432+out:
45433+ kfree(cmd);
45434+ kfree(key);
45435 return 0;
45436 }
45437
45438diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45439index c890fe4..f9b2ae6 100644
45440--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45441+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45442@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45443 fe_status_t *status)
45444 {
45445 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45446- struct dvbt_get_status_msg result;
45447- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45448+ struct dvbt_get_status_msg *result;
45449+ u8 *cmd;
45450 int ret;
45451
45452- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45453- sizeof(result), 0);
45454+ cmd = kmalloc(1, GFP_KERNEL);
45455+ if (cmd == NULL)
45456+ return -ENOMEM;
45457+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45458+ if (result == NULL) {
45459+ kfree(cmd);
45460+ return -ENOMEM;
45461+ }
45462+
45463+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45464+
45465+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45466+ sizeof(*result), 0);
45467 if (ret < 0)
45468- return ret;
45469+ goto out;
45470
45471 *status = 0;
45472
45473- if (0xffff - le16_to_cpu(result.gain) > 30)
45474+ if (0xffff - le16_to_cpu(result->gain) > 30)
45475 *status |= FE_HAS_SIGNAL;
45476- if (result.lock_bits & (1 << 6))
45477+ if (result->lock_bits & (1 << 6))
45478 *status |= FE_HAS_LOCK;
45479- if (result.lock_bits & (1 << 5))
45480+ if (result->lock_bits & (1 << 5))
45481 *status |= FE_HAS_SYNC;
45482- if (result.lock_bits & (1 << 4))
45483+ if (result->lock_bits & (1 << 4))
45484 *status |= FE_HAS_CARRIER;
45485- if (result.lock_bits & (1 << 1))
45486+ if (result->lock_bits & (1 << 1))
45487 *status |= FE_HAS_VITERBI;
45488
45489 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45490 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45491 *status &= ~FE_HAS_LOCK;
45492
45493- return 0;
45494+out:
45495+ kfree(cmd);
45496+ kfree(result);
45497+ return ret;
45498 }
45499
45500 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45501 {
45502 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45503- struct dvbt_get_status_msg status;
45504- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45505+ struct dvbt_get_status_msg *status;
45506+ char *cmd;
45507 int ret;
45508
45509- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45510- sizeof(status), 0);
45511+ cmd = kmalloc(1, GFP_KERNEL);
45512+ if (cmd == NULL)
45513+ return -ENOMEM;
45514+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45515+ if (status == NULL) {
45516+ kfree(cmd);
45517+ return -ENOMEM;
45518+ }
45519+
45520+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45521+
45522+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45523+ sizeof(*status), 0);
45524 if (ret < 0)
45525- return ret;
45526+ goto out;
45527
45528- *ber = le32_to_cpu(status.viterbi_error_rate);
45529+ *ber = le32_to_cpu(status->viterbi_error_rate);
45530+out:
45531+ kfree(cmd);
45532+ kfree(status);
45533 return 0;
45534 }
45535
45536 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45537 {
45538 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45539- struct dvbt_get_status_msg status;
45540- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45541+ struct dvbt_get_status_msg *status;
45542+ u8 *cmd;
45543 int ret;
45544
45545- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45546- sizeof(status), 0);
45547+ cmd = kmalloc(1, GFP_KERNEL);
45548+ if (cmd == NULL)
45549+ return -ENOMEM;
45550+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45551+ if (status == NULL) {
45552+ kfree(cmd);
45553+ return -ENOMEM;
45554+ }
45555+
45556+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45557+
45558+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45559+ sizeof(*status), 0);
45560 if (ret < 0) {
45561 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45562 ret);
45563- return ret;
45564+ goto out;
45565 }
45566- *unc = le32_to_cpu(status.uncorrected_block_count);
45567- return 0;
45568+ *unc = le32_to_cpu(status->uncorrected_block_count);
45569+
45570+out:
45571+ kfree(cmd);
45572+ kfree(status);
45573+ return ret;
45574 }
45575
45576 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45577 u16 *strength)
45578 {
45579 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45580- struct dvbt_get_status_msg status;
45581- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45582+ struct dvbt_get_status_msg *status;
45583+ char *cmd;
45584 int ret;
45585
45586- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45587- sizeof(status), 0);
45588+ cmd = kmalloc(1, GFP_KERNEL);
45589+ if (cmd == NULL)
45590+ return -ENOMEM;
45591+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45592+ if (status == NULL) {
45593+ kfree(cmd);
45594+ return -ENOMEM;
45595+ }
45596+
45597+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45598+
45599+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45600+ sizeof(*status), 0);
45601 if (ret < 0) {
45602 err("cinergyt2_fe_read_signal_strength() Failed!"
45603 " (Error=%d)\n", ret);
45604- return ret;
45605+ goto out;
45606 }
45607- *strength = (0xffff - le16_to_cpu(status.gain));
45608+ *strength = (0xffff - le16_to_cpu(status->gain));
45609+
45610+out:
45611+ kfree(cmd);
45612+ kfree(status);
45613 return 0;
45614 }
45615
45616 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45617 {
45618 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45619- struct dvbt_get_status_msg status;
45620- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45621+ struct dvbt_get_status_msg *status;
45622+ char *cmd;
45623 int ret;
45624
45625- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45626- sizeof(status), 0);
45627+ cmd = kmalloc(1, GFP_KERNEL);
45628+ if (cmd == NULL)
45629+ return -ENOMEM;
45630+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45631+ if (status == NULL) {
45632+ kfree(cmd);
45633+ return -ENOMEM;
45634+ }
45635+
45636+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45637+
45638+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45639+ sizeof(*status), 0);
45640 if (ret < 0) {
45641 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45642- return ret;
45643+ goto out;
45644 }
45645- *snr = (status.snr << 8) | status.snr;
45646- return 0;
45647+ *snr = (status->snr << 8) | status->snr;
45648+
45649+out:
45650+ kfree(cmd);
45651+ kfree(status);
45652+ return ret;
45653 }
45654
45655 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45656@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45657 {
45658 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45659 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45660- struct dvbt_set_parameters_msg param;
45661- char result[2];
45662+ struct dvbt_set_parameters_msg *param;
45663+ char *result;
45664 int err;
45665
45666- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45667- param.tps = cpu_to_le16(compute_tps(fep));
45668- param.freq = cpu_to_le32(fep->frequency / 1000);
45669- param.flags = 0;
45670+ result = kmalloc(2, GFP_KERNEL);
45671+ if (result == NULL)
45672+ return -ENOMEM;
45673+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45674+ if (param == NULL) {
45675+ kfree(result);
45676+ return -ENOMEM;
45677+ }
45678+
45679+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45680+ param->tps = cpu_to_le16(compute_tps(fep));
45681+ param->freq = cpu_to_le32(fep->frequency / 1000);
45682+ param->flags = 0;
45683
45684 switch (fep->bandwidth_hz) {
45685 default:
45686 case 8000000:
45687- param.bandwidth = 8;
45688+ param->bandwidth = 8;
45689 break;
45690 case 7000000:
45691- param.bandwidth = 7;
45692+ param->bandwidth = 7;
45693 break;
45694 case 6000000:
45695- param.bandwidth = 6;
45696+ param->bandwidth = 6;
45697 break;
45698 }
45699
45700 err = dvb_usb_generic_rw(state->d,
45701- (char *)&param, sizeof(param),
45702- result, sizeof(result), 0);
45703+ (char *)param, sizeof(*param),
45704+ result, 2, 0);
45705 if (err < 0)
45706 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45707
45708- return (err < 0) ? err : 0;
45709+ kfree(result);
45710+ kfree(param);
45711+ return err;
45712 }
45713
45714 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45715diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45716index 733a7ff..f8b52e3 100644
45717--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45718+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45719@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45720
45721 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45722 {
45723- struct hexline hx;
45724- u8 reset;
45725+ struct hexline *hx;
45726+ u8 *reset;
45727 int ret,pos=0;
45728
45729+ reset = kmalloc(1, GFP_KERNEL);
45730+ if (reset == NULL)
45731+ return -ENOMEM;
45732+
45733+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45734+ if (hx == NULL) {
45735+ kfree(reset);
45736+ return -ENOMEM;
45737+ }
45738+
45739 /* stop the CPU */
45740- reset = 1;
45741- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45742+ reset[0] = 1;
45743+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45744 err("could not stop the USB controller CPU.");
45745
45746- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45747- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45748- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45749+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45750+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45751+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45752
45753- if (ret != hx.len) {
45754+ if (ret != hx->len) {
45755 err("error while transferring firmware "
45756 "(transferred size: %d, block size: %d)",
45757- ret,hx.len);
45758+ ret,hx->len);
45759 ret = -EINVAL;
45760 break;
45761 }
45762 }
45763 if (ret < 0) {
45764 err("firmware download failed at %d with %d",pos,ret);
45765+ kfree(reset);
45766+ kfree(hx);
45767 return ret;
45768 }
45769
45770 if (ret == 0) {
45771 /* restart the CPU */
45772- reset = 0;
45773- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45774+ reset[0] = 0;
45775+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45776 err("could not restart the USB controller CPU.");
45777 ret = -EINVAL;
45778 }
45779 } else
45780 ret = -EIO;
45781
45782+ kfree(reset);
45783+ kfree(hx);
45784+
45785 return ret;
45786 }
45787 EXPORT_SYMBOL(usb_cypress_load_firmware);
45788diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45789index 1a3df10..57997a5 100644
45790--- a/drivers/media/usb/dvb-usb/dw2102.c
45791+++ b/drivers/media/usb/dvb-usb/dw2102.c
45792@@ -118,7 +118,7 @@ struct su3000_state {
45793
45794 struct s6x0_state {
45795 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45796-};
45797+} __no_const;
45798
45799 /* debug */
45800 static int dvb_usb_dw2102_debug;
45801diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45802index 5801ae7..83f71fa 100644
45803--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45804+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45805@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45806 static int technisat_usb2_i2c_access(struct usb_device *udev,
45807 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45808 {
45809- u8 b[64];
45810- int ret, actual_length;
45811+ u8 *b = kmalloc(64, GFP_KERNEL);
45812+ int ret, actual_length, error = 0;
45813+
45814+ if (b == NULL)
45815+ return -ENOMEM;
45816
45817 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45818 debug_dump(tx, txlen, deb_i2c);
45819@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45820
45821 if (ret < 0) {
45822 err("i2c-error: out failed %02x = %d", device_addr, ret);
45823- return -ENODEV;
45824+ error = -ENODEV;
45825+ goto out;
45826 }
45827
45828 ret = usb_bulk_msg(udev,
45829@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45830 b, 64, &actual_length, 1000);
45831 if (ret < 0) {
45832 err("i2c-error: in failed %02x = %d", device_addr, ret);
45833- return -ENODEV;
45834+ error = -ENODEV;
45835+ goto out;
45836 }
45837
45838 if (b[0] != I2C_STATUS_OK) {
45839@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45840 /* handle tuner-i2c-nak */
45841 if (!(b[0] == I2C_STATUS_NAK &&
45842 device_addr == 0x60
45843- /* && device_is_technisat_usb2 */))
45844- return -ENODEV;
45845+ /* && device_is_technisat_usb2 */)) {
45846+ error = -ENODEV;
45847+ goto out;
45848+ }
45849 }
45850
45851 deb_i2c("status: %d, ", b[0]);
45852@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45853
45854 deb_i2c("\n");
45855
45856- return 0;
45857+out:
45858+ kfree(b);
45859+ return error;
45860 }
45861
45862 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45863@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45864 {
45865 int ret;
45866
45867- u8 led[8] = {
45868- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45869- 0
45870- };
45871+ u8 *led = kzalloc(8, GFP_KERNEL);
45872+
45873+ if (led == NULL)
45874+ return -ENOMEM;
45875
45876 if (disable_led_control && state != TECH_LED_OFF)
45877 return 0;
45878
45879+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45880+
45881 switch (state) {
45882 case TECH_LED_ON:
45883 led[1] = 0x82;
45884@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45885 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45886 USB_TYPE_VENDOR | USB_DIR_OUT,
45887 0, 0,
45888- led, sizeof(led), 500);
45889+ led, 8, 500);
45890
45891 mutex_unlock(&d->i2c_mutex);
45892+
45893+ kfree(led);
45894+
45895 return ret;
45896 }
45897
45898 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45899 {
45900 int ret;
45901- u8 b = 0;
45902+ u8 *b = kzalloc(1, GFP_KERNEL);
45903+
45904+ if (b == NULL)
45905+ return -ENOMEM;
45906
45907 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45908 return -EAGAIN;
45909@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45910 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45911 USB_TYPE_VENDOR | USB_DIR_OUT,
45912 (red << 8) | green, 0,
45913- &b, 1, 500);
45914+ b, 1, 500);
45915
45916 mutex_unlock(&d->i2c_mutex);
45917
45918+ kfree(b);
45919+
45920 return ret;
45921 }
45922
45923@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45924 struct dvb_usb_device_description **desc, int *cold)
45925 {
45926 int ret;
45927- u8 version[3];
45928+ u8 *version = kmalloc(3, GFP_KERNEL);
45929
45930 /* first select the interface */
45931 if (usb_set_interface(udev, 0, 1) != 0)
45932@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45933
45934 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45935
45936+ if (version == NULL)
45937+ return 0;
45938+
45939 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45940 GET_VERSION_INFO_VENDOR_REQUEST,
45941 USB_TYPE_VENDOR | USB_DIR_IN,
45942 0, 0,
45943- version, sizeof(version), 500);
45944+ version, 3, 500);
45945
45946 if (ret < 0)
45947 *cold = 1;
45948@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45949 *cold = 0;
45950 }
45951
45952+ kfree(version);
45953+
45954 return 0;
45955 }
45956
45957@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45958
45959 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45960 {
45961- u8 buf[62], *b;
45962+ u8 *buf, *b;
45963 int ret;
45964 struct ir_raw_event ev;
45965
45966+ buf = kmalloc(62, GFP_KERNEL);
45967+
45968+ if (buf == NULL)
45969+ return -ENOMEM;
45970+
45971 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45972 buf[1] = 0x08;
45973 buf[2] = 0x8f;
45974@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45975 GET_IR_DATA_VENDOR_REQUEST,
45976 USB_TYPE_VENDOR | USB_DIR_IN,
45977 0x8080, 0,
45978- buf, sizeof(buf), 500);
45979+ buf, 62, 500);
45980
45981 unlock:
45982 mutex_unlock(&d->i2c_mutex);
45983
45984- if (ret < 0)
45985+ if (ret < 0) {
45986+ kfree(buf);
45987 return ret;
45988+ }
45989
45990- if (ret == 1)
45991+ if (ret == 1) {
45992+ kfree(buf);
45993 return 0; /* no key pressed */
45994+ }
45995
45996 /* decoding */
45997 b = buf+1;
45998@@ -656,6 +689,8 @@ unlock:
45999
46000 ir_raw_event_handle(d->rc_dev);
46001
46002+ kfree(buf);
46003+
46004 return 1;
46005 }
46006
46007diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46008index af63543..0436f20 100644
46009--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46010+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46011@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46012 * by passing a very big num_planes value */
46013 uplane = compat_alloc_user_space(num_planes *
46014 sizeof(struct v4l2_plane));
46015- kp->m.planes = (__force struct v4l2_plane *)uplane;
46016+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46017
46018 while (--num_planes >= 0) {
46019 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46020@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46021 if (num_planes == 0)
46022 return 0;
46023
46024- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
46025+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46026 if (get_user(p, &up->m.planes))
46027 return -EFAULT;
46028 uplane32 = compat_ptr(p);
46029@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46030 get_user(kp->flags, &up->flags) ||
46031 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46032 return -EFAULT;
46033- kp->base = (__force void *)compat_ptr(tmp);
46034+ kp->base = (__force_kernel void *)compat_ptr(tmp);
46035 return 0;
46036 }
46037
46038@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46039 n * sizeof(struct v4l2_ext_control32)))
46040 return -EFAULT;
46041 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46042- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
46043+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
46044 while (--n >= 0) {
46045 u32 id;
46046
46047@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46048 {
46049 struct v4l2_ext_control32 __user *ucontrols;
46050 struct v4l2_ext_control __user *kcontrols =
46051- (__force struct v4l2_ext_control __user *)kp->controls;
46052+ (struct v4l2_ext_control __force_user *)kp->controls;
46053 int n = kp->count;
46054 compat_caddr_t p;
46055
46056@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46057 get_user(tmp, &up->edid) ||
46058 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46059 return -EFAULT;
46060- kp->edid = (__force u8 *)compat_ptr(tmp);
46061+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
46062 return 0;
46063 }
46064
46065diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46066index 015f92a..59e311e 100644
46067--- a/drivers/media/v4l2-core/v4l2-device.c
46068+++ b/drivers/media/v4l2-core/v4l2-device.c
46069@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46070 EXPORT_SYMBOL_GPL(v4l2_device_put);
46071
46072 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46073- atomic_t *instance)
46074+ atomic_unchecked_t *instance)
46075 {
46076- int num = atomic_inc_return(instance) - 1;
46077+ int num = atomic_inc_return_unchecked(instance) - 1;
46078 int len = strlen(basename);
46079
46080 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46081diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46082index faac2f4..e39dcd9 100644
46083--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46084+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46085@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
46086 struct file *file, void *fh, void *p);
46087 } u;
46088 void (*debug)(const void *arg, bool write_only);
46089-};
46090+} __do_const;
46091+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46092
46093 /* This control needs a priority check */
46094 #define INFO_FL_PRIO (1 << 0)
46095@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
46096 struct video_device *vfd = video_devdata(file);
46097 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46098 bool write_only = false;
46099- struct v4l2_ioctl_info default_info;
46100+ v4l2_ioctl_info_no_const default_info;
46101 const struct v4l2_ioctl_info *info;
46102 void *fh = file->private_data;
46103 struct v4l2_fh *vfh = NULL;
46104@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46105 ret = -EINVAL;
46106 break;
46107 }
46108- *user_ptr = (void __user *)buf->m.planes;
46109+ *user_ptr = (void __force_user *)buf->m.planes;
46110 *kernel_ptr = (void **)&buf->m.planes;
46111 *array_size = sizeof(struct v4l2_plane) * buf->length;
46112 ret = 1;
46113@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46114 ret = -EINVAL;
46115 break;
46116 }
46117- *user_ptr = (void __user *)edid->edid;
46118+ *user_ptr = (void __force_user *)edid->edid;
46119 *kernel_ptr = (void **)&edid->edid;
46120 *array_size = edid->blocks * 128;
46121 ret = 1;
46122@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46123 ret = -EINVAL;
46124 break;
46125 }
46126- *user_ptr = (void __user *)ctrls->controls;
46127+ *user_ptr = (void __force_user *)ctrls->controls;
46128 *kernel_ptr = (void **)&ctrls->controls;
46129 *array_size = sizeof(struct v4l2_ext_control)
46130 * ctrls->count;
46131@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46132 }
46133
46134 if (has_array_args) {
46135- *kernel_ptr = (void __force *)user_ptr;
46136+ *kernel_ptr = (void __force_kernel *)user_ptr;
46137 if (copy_to_user(user_ptr, mbuf, array_size))
46138 err = -EFAULT;
46139 goto out_array_args;
46140diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
46141index 24696f5..3637780 100644
46142--- a/drivers/memory/omap-gpmc.c
46143+++ b/drivers/memory/omap-gpmc.c
46144@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
46145 };
46146
46147 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
46148-static struct irq_chip gpmc_irq_chip;
46149 static int gpmc_irq_start;
46150
46151 static struct resource gpmc_mem_root;
46152@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
46153
46154 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
46155
46156+static struct irq_chip gpmc_irq_chip = {
46157+ .name = "gpmc",
46158+ .irq_startup = gpmc_irq_noop_ret,
46159+ .irq_enable = gpmc_irq_enable,
46160+ .irq_disable = gpmc_irq_disable,
46161+ .irq_shutdown = gpmc_irq_noop,
46162+ .irq_ack = gpmc_irq_noop,
46163+ .irq_mask = gpmc_irq_noop,
46164+ .irq_unmask = gpmc_irq_noop,
46165+};
46166+
46167 static int gpmc_setup_irq(void)
46168 {
46169 int i;
46170@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46171 return gpmc_irq_start;
46172 }
46173
46174- gpmc_irq_chip.name = "gpmc";
46175- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46176- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46177- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46178- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46179- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46180- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46181- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46182-
46183 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46184 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46185
46186diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46187index 187f836..679544b 100644
46188--- a/drivers/message/fusion/mptbase.c
46189+++ b/drivers/message/fusion/mptbase.c
46190@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46191 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46192 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46193
46194+#ifdef CONFIG_GRKERNSEC_HIDESYM
46195+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46196+#else
46197 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46198 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46199+#endif
46200+
46201 /*
46202 * Rounding UP to nearest 4-kB boundary here...
46203 */
46204@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46205 ioc->facts.GlobalCredits);
46206
46207 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46208+#ifdef CONFIG_GRKERNSEC_HIDESYM
46209+ NULL, NULL);
46210+#else
46211 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46212+#endif
46213 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46214 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46215 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46216diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46217index 5bdaae1..eced16f 100644
46218--- a/drivers/message/fusion/mptsas.c
46219+++ b/drivers/message/fusion/mptsas.c
46220@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46221 return 0;
46222 }
46223
46224+static inline void
46225+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46226+{
46227+ if (phy_info->port_details) {
46228+ phy_info->port_details->rphy = rphy;
46229+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46230+ ioc->name, rphy));
46231+ }
46232+
46233+ if (rphy) {
46234+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46235+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46236+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46237+ ioc->name, rphy, rphy->dev.release));
46238+ }
46239+}
46240+
46241 /* no mutex */
46242 static void
46243 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46244@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46245 return NULL;
46246 }
46247
46248-static inline void
46249-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46250-{
46251- if (phy_info->port_details) {
46252- phy_info->port_details->rphy = rphy;
46253- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46254- ioc->name, rphy));
46255- }
46256-
46257- if (rphy) {
46258- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46259- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46260- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46261- ioc->name, rphy, rphy->dev.release));
46262- }
46263-}
46264-
46265 static inline struct sas_port *
46266 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46267 {
46268diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46269index b7d87cd..3fb36da 100644
46270--- a/drivers/message/i2o/i2o_proc.c
46271+++ b/drivers/message/i2o/i2o_proc.c
46272@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46273 "Array Controller Device"
46274 };
46275
46276-static char *chtostr(char *tmp, u8 *chars, int n)
46277-{
46278- tmp[0] = 0;
46279- return strncat(tmp, (char *)chars, n);
46280-}
46281-
46282 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46283 char *group)
46284 {
46285@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46286 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46287 {
46288 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46289- static u32 work32[5];
46290- static u8 *work8 = (u8 *) work32;
46291- static u16 *work16 = (u16 *) work32;
46292+ u32 work32[5];
46293+ u8 *work8 = (u8 *) work32;
46294+ u16 *work16 = (u16 *) work32;
46295 int token;
46296 u32 hwcap;
46297
46298@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46299 } *result;
46300
46301 i2o_exec_execute_ddm_table ddm_table;
46302- char tmp[28 + 1];
46303
46304 result = kmalloc(sizeof(*result), GFP_KERNEL);
46305 if (!result)
46306@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46307
46308 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46309 seq_printf(seq, "%-#8x", ddm_table.module_id);
46310- seq_printf(seq, "%-29s",
46311- chtostr(tmp, ddm_table.module_name_version, 28));
46312+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46313 seq_printf(seq, "%9d ", ddm_table.data_size);
46314 seq_printf(seq, "%8d", ddm_table.code_size);
46315
46316@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46317
46318 i2o_driver_result_table *result;
46319 i2o_driver_store_table *dst;
46320- char tmp[28 + 1];
46321
46322 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46323 if (result == NULL)
46324@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46325
46326 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46327 seq_printf(seq, "%-#8x", dst->module_id);
46328- seq_printf(seq, "%-29s",
46329- chtostr(tmp, dst->module_name_version, 28));
46330- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46331+ seq_printf(seq, "%-.28s", dst->module_name_version);
46332+ seq_printf(seq, "%-.8s", dst->date);
46333 seq_printf(seq, "%8d ", dst->module_size);
46334 seq_printf(seq, "%8d ", dst->mpb_size);
46335 seq_printf(seq, "0x%04x", dst->module_flags);
46336@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46337 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46338 {
46339 struct i2o_device *d = (struct i2o_device *)seq->private;
46340- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46341+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46342 // == (allow) 512d bytes (max)
46343- static u16 *work16 = (u16 *) work32;
46344+ u16 *work16 = (u16 *) work32;
46345 int token;
46346- char tmp[16 + 1];
46347
46348 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46349
46350@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46351 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46352 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46353 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46354- seq_printf(seq, "Vendor info : %s\n",
46355- chtostr(tmp, (u8 *) (work32 + 2), 16));
46356- seq_printf(seq, "Product info : %s\n",
46357- chtostr(tmp, (u8 *) (work32 + 6), 16));
46358- seq_printf(seq, "Description : %s\n",
46359- chtostr(tmp, (u8 *) (work32 + 10), 16));
46360- seq_printf(seq, "Product rev. : %s\n",
46361- chtostr(tmp, (u8 *) (work32 + 14), 8));
46362+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46363+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46364+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46365+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46366
46367 seq_printf(seq, "Serial number : ");
46368 print_serial_number(seq, (u8 *) (work32 + 16),
46369@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46370 u8 pad[256]; // allow up to 256 byte (max) serial number
46371 } result;
46372
46373- char tmp[24 + 1];
46374-
46375 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46376
46377 if (token < 0) {
46378@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46379 }
46380
46381 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46382- seq_printf(seq, "Module name : %s\n",
46383- chtostr(tmp, result.module_name, 24));
46384- seq_printf(seq, "Module revision : %s\n",
46385- chtostr(tmp, result.module_rev, 8));
46386+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46387+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46388
46389 seq_printf(seq, "Serial number : ");
46390 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46391@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46392 u8 instance_number[4];
46393 } result;
46394
46395- char tmp[64 + 1];
46396-
46397 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46398
46399 if (token < 0) {
46400@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46401 return 0;
46402 }
46403
46404- seq_printf(seq, "Device name : %s\n",
46405- chtostr(tmp, result.device_name, 64));
46406- seq_printf(seq, "Service name : %s\n",
46407- chtostr(tmp, result.service_name, 64));
46408- seq_printf(seq, "Physical name : %s\n",
46409- chtostr(tmp, result.physical_location, 64));
46410- seq_printf(seq, "Instance number : %s\n",
46411- chtostr(tmp, result.instance_number, 4));
46412+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46413+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46414+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46415+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46416
46417 return 0;
46418 }
46419@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46420 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46421 {
46422 struct i2o_device *d = (struct i2o_device *)seq->private;
46423- static u32 work32[12];
46424- static u16 *work16 = (u16 *) work32;
46425- static u8 *work8 = (u8 *) work32;
46426+ u32 work32[12];
46427+ u16 *work16 = (u16 *) work32;
46428+ u8 *work8 = (u8 *) work32;
46429 int token;
46430
46431 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46432diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46433index 92752fb..a7494f6 100644
46434--- a/drivers/message/i2o/iop.c
46435+++ b/drivers/message/i2o/iop.c
46436@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46437
46438 spin_lock_irqsave(&c->context_list_lock, flags);
46439
46440- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46441- atomic_inc(&c->context_list_counter);
46442+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46443+ atomic_inc_unchecked(&c->context_list_counter);
46444
46445- entry->context = atomic_read(&c->context_list_counter);
46446+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46447
46448 list_add(&entry->list, &c->context_list);
46449
46450@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46451
46452 #if BITS_PER_LONG == 64
46453 spin_lock_init(&c->context_list_lock);
46454- atomic_set(&c->context_list_counter, 0);
46455+ atomic_set_unchecked(&c->context_list_counter, 0);
46456 INIT_LIST_HEAD(&c->context_list);
46457 #endif
46458
46459diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46460index 9a8e185..27ff17d 100644
46461--- a/drivers/mfd/ab8500-debugfs.c
46462+++ b/drivers/mfd/ab8500-debugfs.c
46463@@ -100,7 +100,7 @@ static int irq_last;
46464 static u32 *irq_count;
46465 static int num_irqs;
46466
46467-static struct device_attribute **dev_attr;
46468+static device_attribute_no_const **dev_attr;
46469 static char **event_name;
46470
46471 static u8 avg_sample = SAMPLE_16;
46472diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46473index c880c89..45a7c68 100644
46474--- a/drivers/mfd/max8925-i2c.c
46475+++ b/drivers/mfd/max8925-i2c.c
46476@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46477 const struct i2c_device_id *id)
46478 {
46479 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46480- static struct max8925_chip *chip;
46481+ struct max8925_chip *chip;
46482 struct device_node *node = client->dev.of_node;
46483
46484 if (node && !pdata) {
46485diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46486index 7612d89..70549c2 100644
46487--- a/drivers/mfd/tps65910.c
46488+++ b/drivers/mfd/tps65910.c
46489@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46490 struct tps65910_platform_data *pdata)
46491 {
46492 int ret = 0;
46493- static struct regmap_irq_chip *tps6591x_irqs_chip;
46494+ struct regmap_irq_chip *tps6591x_irqs_chip;
46495
46496 if (!irq) {
46497 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46498diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46499index 1b772ef..01e77d33 100644
46500--- a/drivers/mfd/twl4030-irq.c
46501+++ b/drivers/mfd/twl4030-irq.c
46502@@ -34,6 +34,7 @@
46503 #include <linux/of.h>
46504 #include <linux/irqdomain.h>
46505 #include <linux/i2c/twl.h>
46506+#include <asm/pgtable.h>
46507
46508 #include "twl-core.h"
46509
46510@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46511 * Install an irq handler for each of the SIH modules;
46512 * clone dummy irq_chip since PIH can't *do* anything
46513 */
46514- twl4030_irq_chip = dummy_irq_chip;
46515- twl4030_irq_chip.name = "twl4030";
46516+ pax_open_kernel();
46517+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46518+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46519
46520- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46521+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46522+ pax_close_kernel();
46523
46524 for (i = irq_base; i < irq_end; i++) {
46525 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46526diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46527index 464419b..64bae8d 100644
46528--- a/drivers/misc/c2port/core.c
46529+++ b/drivers/misc/c2port/core.c
46530@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46531 goto error_idr_alloc;
46532 c2dev->id = ret;
46533
46534- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46535+ pax_open_kernel();
46536+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46537+ pax_close_kernel();
46538
46539 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46540 "c2port%d", c2dev->id);
46541diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46542index 8385177..2f54635 100644
46543--- a/drivers/misc/eeprom/sunxi_sid.c
46544+++ b/drivers/misc/eeprom/sunxi_sid.c
46545@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46546
46547 platform_set_drvdata(pdev, sid_data);
46548
46549- sid_bin_attr.size = sid_data->keysize;
46550+ pax_open_kernel();
46551+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46552+ pax_close_kernel();
46553 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46554 return -ENODEV;
46555
46556diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46557index 36f5d52..32311c3 100644
46558--- a/drivers/misc/kgdbts.c
46559+++ b/drivers/misc/kgdbts.c
46560@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46561 char before[BREAK_INSTR_SIZE];
46562 char after[BREAK_INSTR_SIZE];
46563
46564- probe_kernel_read(before, (char *)kgdbts_break_test,
46565+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46566 BREAK_INSTR_SIZE);
46567 init_simple_test();
46568 ts.tst = plant_and_detach_test;
46569@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46570 /* Activate test with initial breakpoint */
46571 if (!is_early)
46572 kgdb_breakpoint();
46573- probe_kernel_read(after, (char *)kgdbts_break_test,
46574+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46575 BREAK_INSTR_SIZE);
46576 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46577 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46578diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46579index 3ef4627..8d00486 100644
46580--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46581+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46582@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46583 * the lid is closed. This leads to interrupts as soon as a little move
46584 * is done.
46585 */
46586- atomic_inc(&lis3->count);
46587+ atomic_inc_unchecked(&lis3->count);
46588
46589 wake_up_interruptible(&lis3->misc_wait);
46590 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46591@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46592 if (lis3->pm_dev)
46593 pm_runtime_get_sync(lis3->pm_dev);
46594
46595- atomic_set(&lis3->count, 0);
46596+ atomic_set_unchecked(&lis3->count, 0);
46597 return 0;
46598 }
46599
46600@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46601 add_wait_queue(&lis3->misc_wait, &wait);
46602 while (true) {
46603 set_current_state(TASK_INTERRUPTIBLE);
46604- data = atomic_xchg(&lis3->count, 0);
46605+ data = atomic_xchg_unchecked(&lis3->count, 0);
46606 if (data)
46607 break;
46608
46609@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46610 struct lis3lv02d, miscdev);
46611
46612 poll_wait(file, &lis3->misc_wait, wait);
46613- if (atomic_read(&lis3->count))
46614+ if (atomic_read_unchecked(&lis3->count))
46615 return POLLIN | POLLRDNORM;
46616 return 0;
46617 }
46618diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46619index c439c82..1f20f57 100644
46620--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46621+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46622@@ -297,7 +297,7 @@ struct lis3lv02d {
46623 struct input_polled_dev *idev; /* input device */
46624 struct platform_device *pdev; /* platform device */
46625 struct regulator_bulk_data regulators[2];
46626- atomic_t count; /* interrupt count after last read */
46627+ atomic_unchecked_t count; /* interrupt count after last read */
46628 union axis_conversion ac; /* hw -> logical axis */
46629 int mapped_btns[3];
46630
46631diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46632index 2f30bad..c4c13d0 100644
46633--- a/drivers/misc/sgi-gru/gruhandles.c
46634+++ b/drivers/misc/sgi-gru/gruhandles.c
46635@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46636 unsigned long nsec;
46637
46638 nsec = CLKS2NSEC(clks);
46639- atomic_long_inc(&mcs_op_statistics[op].count);
46640- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46641+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46642+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46643 if (mcs_op_statistics[op].max < nsec)
46644 mcs_op_statistics[op].max = nsec;
46645 }
46646diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46647index 4f76359..cdfcb2e 100644
46648--- a/drivers/misc/sgi-gru/gruprocfs.c
46649+++ b/drivers/misc/sgi-gru/gruprocfs.c
46650@@ -32,9 +32,9 @@
46651
46652 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46653
46654-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46655+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46656 {
46657- unsigned long val = atomic_long_read(v);
46658+ unsigned long val = atomic_long_read_unchecked(v);
46659
46660 seq_printf(s, "%16lu %s\n", val, id);
46661 }
46662@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46663
46664 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46665 for (op = 0; op < mcsop_last; op++) {
46666- count = atomic_long_read(&mcs_op_statistics[op].count);
46667- total = atomic_long_read(&mcs_op_statistics[op].total);
46668+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46669+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46670 max = mcs_op_statistics[op].max;
46671 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46672 count ? total / count : 0, max);
46673diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46674index 5c3ce24..4915ccb 100644
46675--- a/drivers/misc/sgi-gru/grutables.h
46676+++ b/drivers/misc/sgi-gru/grutables.h
46677@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46678 * GRU statistics.
46679 */
46680 struct gru_stats_s {
46681- atomic_long_t vdata_alloc;
46682- atomic_long_t vdata_free;
46683- atomic_long_t gts_alloc;
46684- atomic_long_t gts_free;
46685- atomic_long_t gms_alloc;
46686- atomic_long_t gms_free;
46687- atomic_long_t gts_double_allocate;
46688- atomic_long_t assign_context;
46689- atomic_long_t assign_context_failed;
46690- atomic_long_t free_context;
46691- atomic_long_t load_user_context;
46692- atomic_long_t load_kernel_context;
46693- atomic_long_t lock_kernel_context;
46694- atomic_long_t unlock_kernel_context;
46695- atomic_long_t steal_user_context;
46696- atomic_long_t steal_kernel_context;
46697- atomic_long_t steal_context_failed;
46698- atomic_long_t nopfn;
46699- atomic_long_t asid_new;
46700- atomic_long_t asid_next;
46701- atomic_long_t asid_wrap;
46702- atomic_long_t asid_reuse;
46703- atomic_long_t intr;
46704- atomic_long_t intr_cbr;
46705- atomic_long_t intr_tfh;
46706- atomic_long_t intr_spurious;
46707- atomic_long_t intr_mm_lock_failed;
46708- atomic_long_t call_os;
46709- atomic_long_t call_os_wait_queue;
46710- atomic_long_t user_flush_tlb;
46711- atomic_long_t user_unload_context;
46712- atomic_long_t user_exception;
46713- atomic_long_t set_context_option;
46714- atomic_long_t check_context_retarget_intr;
46715- atomic_long_t check_context_unload;
46716- atomic_long_t tlb_dropin;
46717- atomic_long_t tlb_preload_page;
46718- atomic_long_t tlb_dropin_fail_no_asid;
46719- atomic_long_t tlb_dropin_fail_upm;
46720- atomic_long_t tlb_dropin_fail_invalid;
46721- atomic_long_t tlb_dropin_fail_range_active;
46722- atomic_long_t tlb_dropin_fail_idle;
46723- atomic_long_t tlb_dropin_fail_fmm;
46724- atomic_long_t tlb_dropin_fail_no_exception;
46725- atomic_long_t tfh_stale_on_fault;
46726- atomic_long_t mmu_invalidate_range;
46727- atomic_long_t mmu_invalidate_page;
46728- atomic_long_t flush_tlb;
46729- atomic_long_t flush_tlb_gru;
46730- atomic_long_t flush_tlb_gru_tgh;
46731- atomic_long_t flush_tlb_gru_zero_asid;
46732+ atomic_long_unchecked_t vdata_alloc;
46733+ atomic_long_unchecked_t vdata_free;
46734+ atomic_long_unchecked_t gts_alloc;
46735+ atomic_long_unchecked_t gts_free;
46736+ atomic_long_unchecked_t gms_alloc;
46737+ atomic_long_unchecked_t gms_free;
46738+ atomic_long_unchecked_t gts_double_allocate;
46739+ atomic_long_unchecked_t assign_context;
46740+ atomic_long_unchecked_t assign_context_failed;
46741+ atomic_long_unchecked_t free_context;
46742+ atomic_long_unchecked_t load_user_context;
46743+ atomic_long_unchecked_t load_kernel_context;
46744+ atomic_long_unchecked_t lock_kernel_context;
46745+ atomic_long_unchecked_t unlock_kernel_context;
46746+ atomic_long_unchecked_t steal_user_context;
46747+ atomic_long_unchecked_t steal_kernel_context;
46748+ atomic_long_unchecked_t steal_context_failed;
46749+ atomic_long_unchecked_t nopfn;
46750+ atomic_long_unchecked_t asid_new;
46751+ atomic_long_unchecked_t asid_next;
46752+ atomic_long_unchecked_t asid_wrap;
46753+ atomic_long_unchecked_t asid_reuse;
46754+ atomic_long_unchecked_t intr;
46755+ atomic_long_unchecked_t intr_cbr;
46756+ atomic_long_unchecked_t intr_tfh;
46757+ atomic_long_unchecked_t intr_spurious;
46758+ atomic_long_unchecked_t intr_mm_lock_failed;
46759+ atomic_long_unchecked_t call_os;
46760+ atomic_long_unchecked_t call_os_wait_queue;
46761+ atomic_long_unchecked_t user_flush_tlb;
46762+ atomic_long_unchecked_t user_unload_context;
46763+ atomic_long_unchecked_t user_exception;
46764+ atomic_long_unchecked_t set_context_option;
46765+ atomic_long_unchecked_t check_context_retarget_intr;
46766+ atomic_long_unchecked_t check_context_unload;
46767+ atomic_long_unchecked_t tlb_dropin;
46768+ atomic_long_unchecked_t tlb_preload_page;
46769+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46770+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46771+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46772+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46773+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46774+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46775+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46776+ atomic_long_unchecked_t tfh_stale_on_fault;
46777+ atomic_long_unchecked_t mmu_invalidate_range;
46778+ atomic_long_unchecked_t mmu_invalidate_page;
46779+ atomic_long_unchecked_t flush_tlb;
46780+ atomic_long_unchecked_t flush_tlb_gru;
46781+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46782+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46783
46784- atomic_long_t copy_gpa;
46785- atomic_long_t read_gpa;
46786+ atomic_long_unchecked_t copy_gpa;
46787+ atomic_long_unchecked_t read_gpa;
46788
46789- atomic_long_t mesq_receive;
46790- atomic_long_t mesq_receive_none;
46791- atomic_long_t mesq_send;
46792- atomic_long_t mesq_send_failed;
46793- atomic_long_t mesq_noop;
46794- atomic_long_t mesq_send_unexpected_error;
46795- atomic_long_t mesq_send_lb_overflow;
46796- atomic_long_t mesq_send_qlimit_reached;
46797- atomic_long_t mesq_send_amo_nacked;
46798- atomic_long_t mesq_send_put_nacked;
46799- atomic_long_t mesq_page_overflow;
46800- atomic_long_t mesq_qf_locked;
46801- atomic_long_t mesq_qf_noop_not_full;
46802- atomic_long_t mesq_qf_switch_head_failed;
46803- atomic_long_t mesq_qf_unexpected_error;
46804- atomic_long_t mesq_noop_unexpected_error;
46805- atomic_long_t mesq_noop_lb_overflow;
46806- atomic_long_t mesq_noop_qlimit_reached;
46807- atomic_long_t mesq_noop_amo_nacked;
46808- atomic_long_t mesq_noop_put_nacked;
46809- atomic_long_t mesq_noop_page_overflow;
46810+ atomic_long_unchecked_t mesq_receive;
46811+ atomic_long_unchecked_t mesq_receive_none;
46812+ atomic_long_unchecked_t mesq_send;
46813+ atomic_long_unchecked_t mesq_send_failed;
46814+ atomic_long_unchecked_t mesq_noop;
46815+ atomic_long_unchecked_t mesq_send_unexpected_error;
46816+ atomic_long_unchecked_t mesq_send_lb_overflow;
46817+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46818+ atomic_long_unchecked_t mesq_send_amo_nacked;
46819+ atomic_long_unchecked_t mesq_send_put_nacked;
46820+ atomic_long_unchecked_t mesq_page_overflow;
46821+ atomic_long_unchecked_t mesq_qf_locked;
46822+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46823+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46824+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46825+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46826+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46827+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46828+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46829+ atomic_long_unchecked_t mesq_noop_put_nacked;
46830+ atomic_long_unchecked_t mesq_noop_page_overflow;
46831
46832 };
46833
46834@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46835 tghop_invalidate, mcsop_last};
46836
46837 struct mcs_op_statistic {
46838- atomic_long_t count;
46839- atomic_long_t total;
46840+ atomic_long_unchecked_t count;
46841+ atomic_long_unchecked_t total;
46842 unsigned long max;
46843 };
46844
46845@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46846
46847 #define STAT(id) do { \
46848 if (gru_options & OPT_STATS) \
46849- atomic_long_inc(&gru_stats.id); \
46850+ atomic_long_inc_unchecked(&gru_stats.id); \
46851 } while (0)
46852
46853 #ifdef CONFIG_SGI_GRU_DEBUG
46854diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46855index c862cd4..0d176fe 100644
46856--- a/drivers/misc/sgi-xp/xp.h
46857+++ b/drivers/misc/sgi-xp/xp.h
46858@@ -288,7 +288,7 @@ struct xpc_interface {
46859 xpc_notify_func, void *);
46860 void (*received) (short, int, void *);
46861 enum xp_retval (*partid_to_nasids) (short, void *);
46862-};
46863+} __no_const;
46864
46865 extern struct xpc_interface xpc_interface;
46866
46867diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46868index 01be66d..e3a0c7e 100644
46869--- a/drivers/misc/sgi-xp/xp_main.c
46870+++ b/drivers/misc/sgi-xp/xp_main.c
46871@@ -78,13 +78,13 @@ xpc_notloaded(void)
46872 }
46873
46874 struct xpc_interface xpc_interface = {
46875- (void (*)(int))xpc_notloaded,
46876- (void (*)(int))xpc_notloaded,
46877- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46878- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46879+ .connect = (void (*)(int))xpc_notloaded,
46880+ .disconnect = (void (*)(int))xpc_notloaded,
46881+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46882+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46883 void *))xpc_notloaded,
46884- (void (*)(short, int, void *))xpc_notloaded,
46885- (enum xp_retval(*)(short, void *))xpc_notloaded
46886+ .received = (void (*)(short, int, void *))xpc_notloaded,
46887+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46888 };
46889 EXPORT_SYMBOL_GPL(xpc_interface);
46890
46891diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46892index b94d5f7..7f494c5 100644
46893--- a/drivers/misc/sgi-xp/xpc.h
46894+++ b/drivers/misc/sgi-xp/xpc.h
46895@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46896 void (*received_payload) (struct xpc_channel *, void *);
46897 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46898 };
46899+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46900
46901 /* struct xpc_partition act_state values (for XPC HB) */
46902
46903@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46904 /* found in xpc_main.c */
46905 extern struct device *xpc_part;
46906 extern struct device *xpc_chan;
46907-extern struct xpc_arch_operations xpc_arch_ops;
46908+extern xpc_arch_operations_no_const xpc_arch_ops;
46909 extern int xpc_disengage_timelimit;
46910 extern int xpc_disengage_timedout;
46911 extern int xpc_activate_IRQ_rcvd;
46912diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46913index 82dc574..8539ab2 100644
46914--- a/drivers/misc/sgi-xp/xpc_main.c
46915+++ b/drivers/misc/sgi-xp/xpc_main.c
46916@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46917 .notifier_call = xpc_system_die,
46918 };
46919
46920-struct xpc_arch_operations xpc_arch_ops;
46921+xpc_arch_operations_no_const xpc_arch_ops;
46922
46923 /*
46924 * Timer function to enforce the timelimit on the partition disengage.
46925@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46926
46927 if (((die_args->trapnr == X86_TRAP_MF) ||
46928 (die_args->trapnr == X86_TRAP_XF)) &&
46929- !user_mode_vm(die_args->regs))
46930+ !user_mode(die_args->regs))
46931 xpc_die_deactivate();
46932
46933 break;
46934diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46935index 4409d79..d7766d0 100644
46936--- a/drivers/mmc/card/block.c
46937+++ b/drivers/mmc/card/block.c
46938@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46939 if (idata->ic.postsleep_min_us)
46940 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46941
46942- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46943+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46944 err = -EFAULT;
46945 goto cmd_rel_host;
46946 }
46947diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46948index 0d0f7a2..45b8d60 100644
46949--- a/drivers/mmc/host/dw_mmc.h
46950+++ b/drivers/mmc/host/dw_mmc.h
46951@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46952 int (*parse_dt)(struct dw_mci *host);
46953 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46954 struct dw_mci_tuning_data *tuning_data);
46955-};
46956+} __do_const;
46957 #endif /* _DW_MMC_H_ */
46958diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46959index 8232e9a..7776006 100644
46960--- a/drivers/mmc/host/mmci.c
46961+++ b/drivers/mmc/host/mmci.c
46962@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46963 mmc->caps |= MMC_CAP_CMD23;
46964
46965 if (variant->busy_detect) {
46966- mmci_ops.card_busy = mmci_card_busy;
46967+ pax_open_kernel();
46968+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46969+ pax_close_kernel();
46970 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46971 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46972 mmc->max_busy_timeout = 0;
46973diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46974index 7c71dcd..74cb746 100644
46975--- a/drivers/mmc/host/omap_hsmmc.c
46976+++ b/drivers/mmc/host/omap_hsmmc.c
46977@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46978
46979 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46980 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46981- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46982+ pax_open_kernel();
46983+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46984+ pax_close_kernel();
46985 }
46986
46987 pm_runtime_enable(host->dev);
46988diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46989index af1f7c0..00d368a 100644
46990--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46991+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46992@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46993 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46994 }
46995
46996- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46997- sdhci_esdhc_ops.platform_execute_tuning =
46998+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46999+ pax_open_kernel();
47000+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47001 esdhc_executing_tuning;
47002+ pax_close_kernel();
47003+ }
47004
47005 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47006 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47007diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47008index c45b893..fba0144 100644
47009--- a/drivers/mmc/host/sdhci-s3c.c
47010+++ b/drivers/mmc/host/sdhci-s3c.c
47011@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47012 * we can use overriding functions instead of default.
47013 */
47014 if (sc->no_divider) {
47015- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47016- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47017- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47018+ pax_open_kernel();
47019+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47020+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47021+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47022+ pax_close_kernel();
47023 }
47024
47025 /* It supports additional host capabilities if needed */
47026diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47027index 423666b..81ff5eb 100644
47028--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47029+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47030@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47031 size_t totlen = 0, thislen;
47032 int ret = 0;
47033 size_t buflen = 0;
47034- static char *buffer;
47035+ char *buffer;
47036
47037 if (!ECCBUF_SIZE) {
47038 /* We should fall back to a general writev implementation.
47039diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47040index b3b7ca1..5dd4634 100644
47041--- a/drivers/mtd/nand/denali.c
47042+++ b/drivers/mtd/nand/denali.c
47043@@ -24,6 +24,7 @@
47044 #include <linux/slab.h>
47045 #include <linux/mtd/mtd.h>
47046 #include <linux/module.h>
47047+#include <linux/slab.h>
47048
47049 #include "denali.h"
47050
47051diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47052index 4f3851a..f477a23 100644
47053--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47054+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47055@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47056
47057 /* first try to map the upper buffer directly */
47058 if (virt_addr_valid(this->upper_buf) &&
47059- !object_is_on_stack(this->upper_buf)) {
47060+ !object_starts_on_stack(this->upper_buf)) {
47061 sg_init_one(sgl, this->upper_buf, this->upper_len);
47062 ret = dma_map_sg(this->dev, sgl, 1, dr);
47063 if (ret == 0)
47064diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47065index 51b9d6a..52af9a7 100644
47066--- a/drivers/mtd/nftlmount.c
47067+++ b/drivers/mtd/nftlmount.c
47068@@ -24,6 +24,7 @@
47069 #include <asm/errno.h>
47070 #include <linux/delay.h>
47071 #include <linux/slab.h>
47072+#include <linux/sched.h>
47073 #include <linux/mtd/mtd.h>
47074 #include <linux/mtd/nand.h>
47075 #include <linux/mtd/nftl.h>
47076diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47077index c23184a..4115c41 100644
47078--- a/drivers/mtd/sm_ftl.c
47079+++ b/drivers/mtd/sm_ftl.c
47080@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47081 #define SM_CIS_VENDOR_OFFSET 0x59
47082 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47083 {
47084- struct attribute_group *attr_group;
47085+ attribute_group_no_const *attr_group;
47086 struct attribute **attributes;
47087 struct sm_sysfs_attribute *vendor_attribute;
47088 char *vendor;
47089diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47090index 7b11243..b3278a3 100644
47091--- a/drivers/net/bonding/bond_netlink.c
47092+++ b/drivers/net/bonding/bond_netlink.c
47093@@ -585,7 +585,7 @@ nla_put_failure:
47094 return -EMSGSIZE;
47095 }
47096
47097-struct rtnl_link_ops bond_link_ops __read_mostly = {
47098+struct rtnl_link_ops bond_link_ops = {
47099 .kind = "bond",
47100 .priv_size = sizeof(struct bonding),
47101 .setup = bond_setup,
47102diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47103index b3b922a..80bba38 100644
47104--- a/drivers/net/caif/caif_hsi.c
47105+++ b/drivers/net/caif/caif_hsi.c
47106@@ -1444,7 +1444,7 @@ err:
47107 return -ENODEV;
47108 }
47109
47110-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47111+static struct rtnl_link_ops caif_hsi_link_ops = {
47112 .kind = "cfhsi",
47113 .priv_size = sizeof(struct cfhsi),
47114 .setup = cfhsi_setup,
47115diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47116index 98d73aa..63ef9da 100644
47117--- a/drivers/net/can/Kconfig
47118+++ b/drivers/net/can/Kconfig
47119@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47120
47121 config CAN_FLEXCAN
47122 tristate "Support for Freescale FLEXCAN based chips"
47123- depends on ARM || PPC
47124+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47125 ---help---
47126 Say Y here if you want to support for Freescale FlexCAN.
47127
47128diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47129index 62ca0e8..3bed607 100644
47130--- a/drivers/net/can/dev.c
47131+++ b/drivers/net/can/dev.c
47132@@ -958,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47133 return -EOPNOTSUPP;
47134 }
47135
47136-static struct rtnl_link_ops can_link_ops __read_mostly = {
47137+static struct rtnl_link_ops can_link_ops = {
47138 .kind = "can",
47139 .maxtype = IFLA_CAN_MAX,
47140 .policy = can_policy,
47141diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47142index 674f367..ec3a31f 100644
47143--- a/drivers/net/can/vcan.c
47144+++ b/drivers/net/can/vcan.c
47145@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47146 dev->destructor = free_netdev;
47147 }
47148
47149-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47150+static struct rtnl_link_ops vcan_link_ops = {
47151 .kind = "vcan",
47152 .setup = vcan_setup,
47153 };
47154diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47155index 49adbf1..fff7ff8 100644
47156--- a/drivers/net/dummy.c
47157+++ b/drivers/net/dummy.c
47158@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47159 return 0;
47160 }
47161
47162-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47163+static struct rtnl_link_ops dummy_link_ops = {
47164 .kind = DRV_NAME,
47165 .setup = dummy_setup,
47166 .validate = dummy_validate,
47167diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47168index 0443654..4f0aa18 100644
47169--- a/drivers/net/ethernet/8390/ax88796.c
47170+++ b/drivers/net/ethernet/8390/ax88796.c
47171@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47172 if (ax->plat->reg_offsets)
47173 ei_local->reg_offset = ax->plat->reg_offsets;
47174 else {
47175+ resource_size_t _mem_size = mem_size;
47176+ do_div(_mem_size, 0x18);
47177 ei_local->reg_offset = ax->reg_offsets;
47178 for (ret = 0; ret < 0x18; ret++)
47179- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47180+ ax->reg_offsets[ret] = _mem_size * ret;
47181 }
47182
47183 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47184diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47185index 760c72c..a99728c 100644
47186--- a/drivers/net/ethernet/altera/altera_tse_main.c
47187+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47188@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47189 return 0;
47190 }
47191
47192-static struct net_device_ops altera_tse_netdev_ops = {
47193+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47194 .ndo_open = tse_open,
47195 .ndo_stop = tse_shutdown,
47196 .ndo_start_xmit = tse_start_xmit,
47197@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47198 ndev->netdev_ops = &altera_tse_netdev_ops;
47199 altera_tse_set_ethtool_ops(ndev);
47200
47201+ pax_open_kernel();
47202 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47203
47204 if (priv->hash_filter)
47205 altera_tse_netdev_ops.ndo_set_rx_mode =
47206 tse_set_rx_mode_hashfilter;
47207+ pax_close_kernel();
47208
47209 /* Scatter/gather IO is not supported,
47210 * so it is turned off
47211diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47212index 29a0927..5a348e24 100644
47213--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47214+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47215@@ -1122,14 +1122,14 @@ do { \
47216 * operations, everything works on mask values.
47217 */
47218 #define XMDIO_READ(_pdata, _mmd, _reg) \
47219- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47220+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47221 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47222
47223 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47224 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47225
47226 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47227- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47228+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47229 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47230
47231 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47232diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47233index 8a50b01..39c1ad0 100644
47234--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47235+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47236@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47237
47238 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47239
47240- pdata->hw_if.config_dcb_tc(pdata);
47241+ pdata->hw_if->config_dcb_tc(pdata);
47242
47243 return 0;
47244 }
47245@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47246
47247 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47248
47249- pdata->hw_if.config_dcb_pfc(pdata);
47250+ pdata->hw_if->config_dcb_pfc(pdata);
47251
47252 return 0;
47253 }
47254diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47255index a50891f..b26fe24 100644
47256--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47257+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47258@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47259
47260 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47261 {
47262- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47263+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47264 struct xgbe_channel *channel;
47265 struct xgbe_ring *ring;
47266 struct xgbe_ring_data *rdata;
47267@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47268
47269 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47270 {
47271- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47272+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47273 struct xgbe_channel *channel;
47274 struct xgbe_ring *ring;
47275 struct xgbe_ring_desc *rdesc;
47276@@ -624,7 +624,7 @@ err_out:
47277 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47278 {
47279 struct xgbe_prv_data *pdata = channel->pdata;
47280- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47281+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47282 struct xgbe_ring *ring = channel->rx_ring;
47283 struct xgbe_ring_data *rdata;
47284 int i;
47285@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47286 DBGPR("<--xgbe_realloc_rx_buffer\n");
47287 }
47288
47289-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47290-{
47291- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47292-
47293- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47294- desc_if->free_ring_resources = xgbe_free_ring_resources;
47295- desc_if->map_tx_skb = xgbe_map_tx_skb;
47296- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47297- desc_if->unmap_rdata = xgbe_unmap_rdata;
47298- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47299- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47300-
47301- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47302-}
47303+const struct xgbe_desc_if default_xgbe_desc_if = {
47304+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47305+ .free_ring_resources = xgbe_free_ring_resources,
47306+ .map_tx_skb = xgbe_map_tx_skb,
47307+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47308+ .unmap_rdata = xgbe_unmap_rdata,
47309+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47310+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47311+};
47312diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47313index 4c66cd1..1a20aab 100644
47314--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47315+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47316@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47317
47318 static int xgbe_init(struct xgbe_prv_data *pdata)
47319 {
47320- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47321+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47322 int ret;
47323
47324 DBGPR("-->xgbe_init\n");
47325@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47326 return 0;
47327 }
47328
47329-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47330-{
47331- DBGPR("-->xgbe_init_function_ptrs\n");
47332-
47333- hw_if->tx_complete = xgbe_tx_complete;
47334-
47335- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47336- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47337- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47338- hw_if->set_mac_address = xgbe_set_mac_address;
47339-
47340- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47341- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47342-
47343- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47344- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47345- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47346- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47347- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47348-
47349- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47350- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47351-
47352- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47353- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47354- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47355-
47356- hw_if->enable_tx = xgbe_enable_tx;
47357- hw_if->disable_tx = xgbe_disable_tx;
47358- hw_if->enable_rx = xgbe_enable_rx;
47359- hw_if->disable_rx = xgbe_disable_rx;
47360-
47361- hw_if->powerup_tx = xgbe_powerup_tx;
47362- hw_if->powerdown_tx = xgbe_powerdown_tx;
47363- hw_if->powerup_rx = xgbe_powerup_rx;
47364- hw_if->powerdown_rx = xgbe_powerdown_rx;
47365-
47366- hw_if->dev_xmit = xgbe_dev_xmit;
47367- hw_if->dev_read = xgbe_dev_read;
47368- hw_if->enable_int = xgbe_enable_int;
47369- hw_if->disable_int = xgbe_disable_int;
47370- hw_if->init = xgbe_init;
47371- hw_if->exit = xgbe_exit;
47372+const struct xgbe_hw_if default_xgbe_hw_if = {
47373+ .tx_complete = xgbe_tx_complete,
47374+
47375+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47376+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47377+ .add_mac_addresses = xgbe_add_mac_addresses,
47378+ .set_mac_address = xgbe_set_mac_address,
47379+
47380+ .enable_rx_csum = xgbe_enable_rx_csum,
47381+ .disable_rx_csum = xgbe_disable_rx_csum,
47382+
47383+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47384+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47385+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47386+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47387+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47388+
47389+ .read_mmd_regs = xgbe_read_mmd_regs,
47390+ .write_mmd_regs = xgbe_write_mmd_regs,
47391+
47392+ .set_gmii_speed = xgbe_set_gmii_speed,
47393+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47394+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47395+
47396+ .enable_tx = xgbe_enable_tx,
47397+ .disable_tx = xgbe_disable_tx,
47398+ .enable_rx = xgbe_enable_rx,
47399+ .disable_rx = xgbe_disable_rx,
47400+
47401+ .powerup_tx = xgbe_powerup_tx,
47402+ .powerdown_tx = xgbe_powerdown_tx,
47403+ .powerup_rx = xgbe_powerup_rx,
47404+ .powerdown_rx = xgbe_powerdown_rx,
47405+
47406+ .dev_xmit = xgbe_dev_xmit,
47407+ .dev_read = xgbe_dev_read,
47408+ .enable_int = xgbe_enable_int,
47409+ .disable_int = xgbe_disable_int,
47410+ .init = xgbe_init,
47411+ .exit = xgbe_exit,
47412
47413 /* Descriptor related Sequences have to be initialized here */
47414- hw_if->tx_desc_init = xgbe_tx_desc_init;
47415- hw_if->rx_desc_init = xgbe_rx_desc_init;
47416- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47417- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47418- hw_if->is_last_desc = xgbe_is_last_desc;
47419- hw_if->is_context_desc = xgbe_is_context_desc;
47420- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47421+ .tx_desc_init = xgbe_tx_desc_init,
47422+ .rx_desc_init = xgbe_rx_desc_init,
47423+ .tx_desc_reset = xgbe_tx_desc_reset,
47424+ .rx_desc_reset = xgbe_rx_desc_reset,
47425+ .is_last_desc = xgbe_is_last_desc,
47426+ .is_context_desc = xgbe_is_context_desc,
47427+ .tx_start_xmit = xgbe_tx_start_xmit,
47428
47429 /* For FLOW ctrl */
47430- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47431- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47432+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47433+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47434
47435 /* For RX coalescing */
47436- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47437- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47438- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47439- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47440+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47441+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47442+ .usec_to_riwt = xgbe_usec_to_riwt,
47443+ .riwt_to_usec = xgbe_riwt_to_usec,
47444
47445 /* For RX and TX threshold config */
47446- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47447- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47448+ .config_rx_threshold = xgbe_config_rx_threshold,
47449+ .config_tx_threshold = xgbe_config_tx_threshold,
47450
47451 /* For RX and TX Store and Forward Mode config */
47452- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47453- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47454+ .config_rsf_mode = xgbe_config_rsf_mode,
47455+ .config_tsf_mode = xgbe_config_tsf_mode,
47456
47457 /* For TX DMA Operating on Second Frame config */
47458- hw_if->config_osp_mode = xgbe_config_osp_mode;
47459+ .config_osp_mode = xgbe_config_osp_mode,
47460
47461 /* For RX and TX PBL config */
47462- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47463- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47464- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47465- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47466- hw_if->config_pblx8 = xgbe_config_pblx8;
47467+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47468+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47469+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47470+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47471+ .config_pblx8 = xgbe_config_pblx8,
47472
47473 /* For MMC statistics support */
47474- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47475- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47476- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47477+ .tx_mmc_int = xgbe_tx_mmc_int,
47478+ .rx_mmc_int = xgbe_rx_mmc_int,
47479+ .read_mmc_stats = xgbe_read_mmc_stats,
47480
47481 /* For PTP config */
47482- hw_if->config_tstamp = xgbe_config_tstamp;
47483- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47484- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47485- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47486- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47487+ .config_tstamp = xgbe_config_tstamp,
47488+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47489+ .set_tstamp_time = xgbe_set_tstamp_time,
47490+ .get_tstamp_time = xgbe_get_tstamp_time,
47491+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47492
47493 /* For Data Center Bridging config */
47494- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47495- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47496+ .config_dcb_tc = xgbe_config_dcb_tc,
47497+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47498
47499 /* For Receive Side Scaling */
47500- hw_if->enable_rss = xgbe_enable_rss;
47501- hw_if->disable_rss = xgbe_disable_rss;
47502- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47503- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47504-
47505- DBGPR("<--xgbe_init_function_ptrs\n");
47506-}
47507+ .enable_rss = xgbe_enable_rss,
47508+ .disable_rss = xgbe_disable_rss,
47509+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47510+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47511+};
47512diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47513index e5ffb2c..e56d30b 100644
47514--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47515+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47516@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47517 * support, tell it now
47518 */
47519 if (ring->tx.xmit_more)
47520- pdata->hw_if.tx_start_xmit(channel, ring);
47521+ pdata->hw_if->tx_start_xmit(channel, ring);
47522
47523 return NETDEV_TX_BUSY;
47524 }
47525@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47526
47527 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47528 {
47529- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47530+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47531 struct xgbe_channel *channel;
47532 enum xgbe_int int_id;
47533 unsigned int i;
47534@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47535
47536 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47537 {
47538- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47539+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47540 struct xgbe_channel *channel;
47541 enum xgbe_int int_id;
47542 unsigned int i;
47543@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47544 static irqreturn_t xgbe_isr(int irq, void *data)
47545 {
47546 struct xgbe_prv_data *pdata = data;
47547- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47548+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47549 struct xgbe_channel *channel;
47550 unsigned int dma_isr, dma_ch_isr;
47551 unsigned int mac_isr, mac_tssr;
47552@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47553
47554 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47555 {
47556- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47557+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47558
47559 DBGPR("-->xgbe_init_tx_coalesce\n");
47560
47561@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47562
47563 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47564 {
47565- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47566+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47567
47568 DBGPR("-->xgbe_init_rx_coalesce\n");
47569
47570@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47571
47572 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47573 {
47574- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47575+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47576 struct xgbe_channel *channel;
47577 struct xgbe_ring *ring;
47578 struct xgbe_ring_data *rdata;
47579@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47580
47581 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47582 {
47583- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47584+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47585 struct xgbe_channel *channel;
47586 struct xgbe_ring *ring;
47587 struct xgbe_ring_data *rdata;
47588@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47589 static void xgbe_adjust_link(struct net_device *netdev)
47590 {
47591 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47592- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47593+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47594 struct phy_device *phydev = pdata->phydev;
47595 int new_state = 0;
47596
47597@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47598 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47599 {
47600 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47601- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47602+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47603 unsigned long flags;
47604
47605 DBGPR("-->xgbe_powerdown\n");
47606@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47607 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47608 {
47609 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47610- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47611+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47612 unsigned long flags;
47613
47614 DBGPR("-->xgbe_powerup\n");
47615@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47616
47617 static int xgbe_start(struct xgbe_prv_data *pdata)
47618 {
47619- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47620+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47621 struct net_device *netdev = pdata->netdev;
47622
47623 DBGPR("-->xgbe_start\n");
47624@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47625
47626 static void xgbe_stop(struct xgbe_prv_data *pdata)
47627 {
47628- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47629+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47630 struct xgbe_channel *channel;
47631 struct net_device *netdev = pdata->netdev;
47632 struct netdev_queue *txq;
47633@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47634 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47635 {
47636 struct xgbe_channel *channel;
47637- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47638+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47639 unsigned int i;
47640
47641 DBGPR("-->xgbe_restart_dev\n");
47642@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47643 return -ERANGE;
47644 }
47645
47646- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47647+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47648
47649 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47650
47651@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47652 static int xgbe_open(struct net_device *netdev)
47653 {
47654 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47655- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47656- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47657+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47658+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47659 struct xgbe_channel *channel = NULL;
47660 unsigned int i = 0;
47661 int ret;
47662@@ -1400,8 +1400,8 @@ err_phy_init:
47663 static int xgbe_close(struct net_device *netdev)
47664 {
47665 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47666- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47667- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47668+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47669+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47670 struct xgbe_channel *channel;
47671 unsigned int i;
47672
47673@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47674 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47675 {
47676 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47677- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47678- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47679+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47680+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47681 struct xgbe_channel *channel;
47682 struct xgbe_ring *ring;
47683 struct xgbe_packet_data *packet;
47684@@ -1518,7 +1518,7 @@ tx_netdev_return:
47685 static void xgbe_set_rx_mode(struct net_device *netdev)
47686 {
47687 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47688- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47689+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47690 unsigned int pr_mode, am_mode;
47691
47692 DBGPR("-->xgbe_set_rx_mode\n");
47693@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47694 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47695 {
47696 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47697- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47698+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47699 struct sockaddr *saddr = addr;
47700
47701 DBGPR("-->xgbe_set_mac_address\n");
47702@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47703
47704 DBGPR("-->%s\n", __func__);
47705
47706- pdata->hw_if.read_mmc_stats(pdata);
47707+ pdata->hw_if->read_mmc_stats(pdata);
47708
47709 s->rx_packets = pstats->rxframecount_gb;
47710 s->rx_bytes = pstats->rxoctetcount_gb;
47711@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47712 u16 vid)
47713 {
47714 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47715- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47716+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47717
47718 DBGPR("-->%s\n", __func__);
47719
47720@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47721 u16 vid)
47722 {
47723 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47724- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47725+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47726
47727 DBGPR("-->%s\n", __func__);
47728
47729@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47730 netdev_features_t features)
47731 {
47732 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47733- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47734+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47735 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47736 int ret = 0;
47737
47738@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47739 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47740 {
47741 struct xgbe_prv_data *pdata = channel->pdata;
47742- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47743+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47744 struct xgbe_ring *ring = channel->rx_ring;
47745 struct xgbe_ring_data *rdata;
47746
47747@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47748 static int xgbe_tx_poll(struct xgbe_channel *channel)
47749 {
47750 struct xgbe_prv_data *pdata = channel->pdata;
47751- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47752- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47753+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47754+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47755 struct xgbe_ring *ring = channel->tx_ring;
47756 struct xgbe_ring_data *rdata;
47757 struct xgbe_ring_desc *rdesc;
47758@@ -1891,7 +1891,7 @@ unlock:
47759 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47760 {
47761 struct xgbe_prv_data *pdata = channel->pdata;
47762- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47763+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47764 struct xgbe_ring *ring = channel->rx_ring;
47765 struct xgbe_ring_data *rdata;
47766 struct xgbe_packet_data *packet;
47767diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47768index ebf4893..28108c7 100644
47769--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47770+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47771@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47772
47773 DBGPR("-->%s\n", __func__);
47774
47775- pdata->hw_if.read_mmc_stats(pdata);
47776+ pdata->hw_if->read_mmc_stats(pdata);
47777 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47778 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47779 *data++ = *(u64 *)stat;
47780@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47781 struct ethtool_coalesce *ec)
47782 {
47783 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47784- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47785+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47786 unsigned int riwt;
47787
47788 DBGPR("-->xgbe_get_coalesce\n");
47789@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47790 struct ethtool_coalesce *ec)
47791 {
47792 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47793- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47794+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47795 unsigned int rx_frames, rx_riwt, rx_usecs;
47796 unsigned int tx_frames, tx_usecs;
47797
47798diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47799index dbd3850..4e31b38 100644
47800--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47801+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47802@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47803 DBGPR("<--xgbe_default_config\n");
47804 }
47805
47806-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47807-{
47808- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47809- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47810-}
47811-
47812 static int xgbe_probe(struct platform_device *pdev)
47813 {
47814 struct xgbe_prv_data *pdata;
47815@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47816 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47817
47818 /* Set all the function pointers */
47819- xgbe_init_all_fptrs(pdata);
47820- hw_if = &pdata->hw_if;
47821- desc_if = &pdata->desc_if;
47822+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47823+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47824
47825 /* Issue software reset to device */
47826 hw_if->exit(pdata);
47827diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47828index 363b210..b241389 100644
47829--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47830+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47831@@ -126,7 +126,7 @@
47832 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47833 {
47834 struct xgbe_prv_data *pdata = mii->priv;
47835- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47836+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47837 int mmd_data;
47838
47839 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47840@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47841 u16 mmd_val)
47842 {
47843 struct xgbe_prv_data *pdata = mii->priv;
47844- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47845+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47846 int mmd_data = mmd_val;
47847
47848 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47849diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47850index a1bf9d1c..84adcab 100644
47851--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47852+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47853@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47854 tstamp_cc);
47855 u64 nsec;
47856
47857- nsec = pdata->hw_if.get_tstamp_time(pdata);
47858+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47859
47860 return nsec;
47861 }
47862@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47863
47864 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47865
47866- pdata->hw_if.update_tstamp_addend(pdata, addend);
47867+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47868
47869 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47870
47871diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47872index f9ec762..988c969 100644
47873--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47874+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47875@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47876 int dev_irq;
47877 unsigned int per_channel_irq;
47878
47879- struct xgbe_hw_if hw_if;
47880- struct xgbe_desc_if desc_if;
47881+ const struct xgbe_hw_if *hw_if;
47882+ const struct xgbe_desc_if *desc_if;
47883
47884 /* AXI DMA settings */
47885 unsigned int axdomain;
47886@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47887 #endif
47888 };
47889
47890+extern const struct xgbe_hw_if default_xgbe_hw_if;
47891+extern const struct xgbe_desc_if default_xgbe_desc_if;
47892+
47893 /* Function prototypes*/
47894
47895 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47896diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47897index adcacda..fa6e0ae 100644
47898--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47899+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47900@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47901 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47902 {
47903 /* RX_MODE controlling object */
47904- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47905+ bnx2x_init_rx_mode_obj(bp);
47906
47907 /* multicast configuration controlling object */
47908 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47909diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47910index 07cdf9b..b08ecc7 100644
47911--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47912+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47913@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47914 return rc;
47915 }
47916
47917-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47918- struct bnx2x_rx_mode_obj *o)
47919+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47920 {
47921 if (CHIP_IS_E1x(bp)) {
47922- o->wait_comp = bnx2x_empty_rx_mode_wait;
47923- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47924+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47925+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47926 } else {
47927- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47928- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47929+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47930+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47931 }
47932 }
47933
47934diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47935index 86baecb..ff3bb46 100644
47936--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47937+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47938@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47939
47940 /********************* RX MODE ****************/
47941
47942-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47943- struct bnx2x_rx_mode_obj *o);
47944+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47945
47946 /**
47947 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47948diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47949index 31c9f82..e65e986 100644
47950--- a/drivers/net/ethernet/broadcom/tg3.h
47951+++ b/drivers/net/ethernet/broadcom/tg3.h
47952@@ -150,6 +150,7 @@
47953 #define CHIPREV_ID_5750_A0 0x4000
47954 #define CHIPREV_ID_5750_A1 0x4001
47955 #define CHIPREV_ID_5750_A3 0x4003
47956+#define CHIPREV_ID_5750_C1 0x4201
47957 #define CHIPREV_ID_5750_C2 0x4202
47958 #define CHIPREV_ID_5752_A0_HW 0x5000
47959 #define CHIPREV_ID_5752_A0 0x6000
47960diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47961index 903466e..b285864 100644
47962--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47963+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47964@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47965 }
47966
47967 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47968- bna_cb_ioceth_enable,
47969- bna_cb_ioceth_disable,
47970- bna_cb_ioceth_hbfail,
47971- bna_cb_ioceth_reset
47972+ .enable_cbfn = bna_cb_ioceth_enable,
47973+ .disable_cbfn = bna_cb_ioceth_disable,
47974+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47975+ .reset_cbfn = bna_cb_ioceth_reset
47976 };
47977
47978 static void bna_attr_init(struct bna_ioceth *ioceth)
47979diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47980index 8cffcdf..aadf043 100644
47981--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47982+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47983@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47984 */
47985 struct l2t_skb_cb {
47986 arp_failure_handler_func arp_failure_handler;
47987-};
47988+} __no_const;
47989
47990 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47991
47992diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47993index ccf3436..b720d77 100644
47994--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47995+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47996@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47997
47998 int i;
47999 struct adapter *ap = netdev2adap(dev);
48000- static const unsigned int *reg_ranges;
48001+ const unsigned int *reg_ranges;
48002 int arr_size = 0, buf_size = 0;
48003
48004 if (is_t4(ap->params.chip)) {
48005diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48006index badff18..e15c4ec 100644
48007--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48008+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48009@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48010 for (i=0; i<ETH_ALEN; i++) {
48011 tmp.addr[i] = dev->dev_addr[i];
48012 }
48013- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48014+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48015 break;
48016
48017 case DE4X5_SET_HWADDR: /* Set the hardware address */
48018@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48019 spin_lock_irqsave(&lp->lock, flags);
48020 memcpy(&statbuf, &lp->pktStats, ioc->len);
48021 spin_unlock_irqrestore(&lp->lock, flags);
48022- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48023+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48024 return -EFAULT;
48025 break;
48026 }
48027diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48028index d48806b..41cd80f 100644
48029--- a/drivers/net/ethernet/emulex/benet/be_main.c
48030+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48031@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48032
48033 if (wrapped)
48034 newacc += 65536;
48035- ACCESS_ONCE(*acc) = newacc;
48036+ ACCESS_ONCE_RW(*acc) = newacc;
48037 }
48038
48039 static void populate_erx_stats(struct be_adapter *adapter,
48040diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48041index 6d0c5d5..55be363 100644
48042--- a/drivers/net/ethernet/faraday/ftgmac100.c
48043+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48044@@ -30,6 +30,8 @@
48045 #include <linux/netdevice.h>
48046 #include <linux/phy.h>
48047 #include <linux/platform_device.h>
48048+#include <linux/interrupt.h>
48049+#include <linux/irqreturn.h>
48050 #include <net/ip.h>
48051
48052 #include "ftgmac100.h"
48053diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48054index dce5f7b..2433466 100644
48055--- a/drivers/net/ethernet/faraday/ftmac100.c
48056+++ b/drivers/net/ethernet/faraday/ftmac100.c
48057@@ -31,6 +31,8 @@
48058 #include <linux/module.h>
48059 #include <linux/netdevice.h>
48060 #include <linux/platform_device.h>
48061+#include <linux/interrupt.h>
48062+#include <linux/irqreturn.h>
48063
48064 #include "ftmac100.h"
48065
48066diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48067index 6d1ec92..4d5d97d 100644
48068--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48069+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48070@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48071 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48072
48073 /* Update the base adjustement value. */
48074- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48075+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48076 smp_mb(); /* Force the above update. */
48077 }
48078
48079diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48080index 5fd4b52..87aa34b 100644
48081--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48082+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48083@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48084 }
48085
48086 /* update the base incval used to calculate frequency adjustment */
48087- ACCESS_ONCE(adapter->base_incval) = incval;
48088+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48089 smp_mb();
48090
48091 /* need lock to prevent incorrect read while modifying cyclecounter */
48092diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48093index e3357bf..d4d5348 100644
48094--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48095+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48096@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48097 wmb();
48098
48099 /* we want to dirty this cache line once */
48100- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48101- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48102+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48103+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48104
48105 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48106
48107diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48108index 2bbd01f..e8baa64 100644
48109--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48110+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48111@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48112 struct __vxge_hw_fifo *fifo;
48113 struct vxge_hw_fifo_config *config;
48114 u32 txdl_size, txdl_per_memblock;
48115- struct vxge_hw_mempool_cbs fifo_mp_callback;
48116+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48117+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48118+ };
48119+
48120 struct __vxge_hw_virtualpath *vpath;
48121
48122 if ((vp == NULL) || (attr == NULL)) {
48123@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48124 goto exit;
48125 }
48126
48127- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48128-
48129 fifo->mempool =
48130 __vxge_hw_mempool_create(vpath->hldev,
48131 fifo->config->memblock_size,
48132diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48133index 2bb48d5..d1a865d 100644
48134--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48135+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48136@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48137 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48138 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48139 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48140- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48141+ pax_open_kernel();
48142+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48143+ pax_close_kernel();
48144 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48145 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48146 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48147diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48148index be7d7a6..a8983f8 100644
48149--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48150+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48151@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48152 case QLCNIC_NON_PRIV_FUNC:
48153 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48154 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48155- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48156+ pax_open_kernel();
48157+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48158+ pax_close_kernel();
48159 break;
48160 case QLCNIC_PRIV_FUNC:
48161 ahw->op_mode = QLCNIC_PRIV_FUNC;
48162 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48163- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48164+ pax_open_kernel();
48165+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48166+ pax_close_kernel();
48167 break;
48168 case QLCNIC_MGMT_FUNC:
48169 ahw->op_mode = QLCNIC_MGMT_FUNC;
48170 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48171- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48172+ pax_open_kernel();
48173+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48174+ pax_close_kernel();
48175 break;
48176 default:
48177 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48178diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48179index c9f57fb..208bdc1 100644
48180--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48181+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48182@@ -1285,7 +1285,7 @@ flash_temp:
48183 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48184 {
48185 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48186- static const struct qlcnic_dump_operations *fw_dump_ops;
48187+ const struct qlcnic_dump_operations *fw_dump_ops;
48188 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48189 u32 entry_offset, dump, no_entries, buf_offset = 0;
48190 int i, k, ops_cnt, ops_index, dump_size = 0;
48191diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48192index 2e2cf80..ebc796d 100644
48193--- a/drivers/net/ethernet/realtek/r8169.c
48194+++ b/drivers/net/ethernet/realtek/r8169.c
48195@@ -788,22 +788,22 @@ struct rtl8169_private {
48196 struct mdio_ops {
48197 void (*write)(struct rtl8169_private *, int, int);
48198 int (*read)(struct rtl8169_private *, int);
48199- } mdio_ops;
48200+ } __no_const mdio_ops;
48201
48202 struct pll_power_ops {
48203 void (*down)(struct rtl8169_private *);
48204 void (*up)(struct rtl8169_private *);
48205- } pll_power_ops;
48206+ } __no_const pll_power_ops;
48207
48208 struct jumbo_ops {
48209 void (*enable)(struct rtl8169_private *);
48210 void (*disable)(struct rtl8169_private *);
48211- } jumbo_ops;
48212+ } __no_const jumbo_ops;
48213
48214 struct csi_ops {
48215 void (*write)(struct rtl8169_private *, int, int);
48216 u32 (*read)(struct rtl8169_private *, int);
48217- } csi_ops;
48218+ } __no_const csi_ops;
48219
48220 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48221 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48222diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48223index 6b861e3..204ac86 100644
48224--- a/drivers/net/ethernet/sfc/ptp.c
48225+++ b/drivers/net/ethernet/sfc/ptp.c
48226@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48227 ptp->start.dma_addr);
48228
48229 /* Clear flag that signals MC ready */
48230- ACCESS_ONCE(*start) = 0;
48231+ ACCESS_ONCE_RW(*start) = 0;
48232 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48233 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48234 EFX_BUG_ON_PARANOID(rc);
48235diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
48236index 10b6173..b605dfd5 100644
48237--- a/drivers/net/ethernet/sfc/selftest.c
48238+++ b/drivers/net/ethernet/sfc/selftest.c
48239@@ -46,7 +46,7 @@ struct efx_loopback_payload {
48240 struct iphdr ip;
48241 struct udphdr udp;
48242 __be16 iteration;
48243- const char msg[64];
48244+ char msg[64];
48245 } __packed;
48246
48247 /* Loopback test source MAC address */
48248diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48249index 08c483b..2c4a553 100644
48250--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48251+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48252@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48253
48254 writel(value, ioaddr + MMC_CNTRL);
48255
48256- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48257- MMC_CNTRL, value);
48258+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48259+// MMC_CNTRL, value);
48260 }
48261
48262 /* To mask all all interrupts.*/
48263diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48264index 384ca4f..dd7d4f9 100644
48265--- a/drivers/net/hyperv/hyperv_net.h
48266+++ b/drivers/net/hyperv/hyperv_net.h
48267@@ -171,7 +171,7 @@ struct rndis_device {
48268 enum rndis_device_state state;
48269 bool link_state;
48270 bool link_change;
48271- atomic_t new_req_id;
48272+ atomic_unchecked_t new_req_id;
48273
48274 spinlock_t request_lock;
48275 struct list_head req_list;
48276diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48277index ec0c40a..c9e42eb 100644
48278--- a/drivers/net/hyperv/rndis_filter.c
48279+++ b/drivers/net/hyperv/rndis_filter.c
48280@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48281 * template
48282 */
48283 set = &rndis_msg->msg.set_req;
48284- set->req_id = atomic_inc_return(&dev->new_req_id);
48285+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48286
48287 /* Add to the request list */
48288 spin_lock_irqsave(&dev->request_lock, flags);
48289@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48290
48291 /* Setup the rndis set */
48292 halt = &request->request_msg.msg.halt_req;
48293- halt->req_id = atomic_inc_return(&dev->new_req_id);
48294+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48295
48296 /* Ignore return since this msg is optional. */
48297 rndis_filter_send_request(dev, request);
48298diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48299index 34f846b..4a0d5b1 100644
48300--- a/drivers/net/ifb.c
48301+++ b/drivers/net/ifb.c
48302@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48303 return 0;
48304 }
48305
48306-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48307+static struct rtnl_link_ops ifb_link_ops = {
48308 .kind = "ifb",
48309 .priv_size = sizeof(struct ifb_private),
48310 .setup = ifb_setup,
48311diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48312index 612e073..a9f5eda 100644
48313--- a/drivers/net/macvlan.c
48314+++ b/drivers/net/macvlan.c
48315@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48316 free_nskb:
48317 kfree_skb(nskb);
48318 err:
48319- atomic_long_inc(&skb->dev->rx_dropped);
48320+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48321 }
48322
48323 static void macvlan_flush_sources(struct macvlan_port *port,
48324@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48325 int macvlan_link_register(struct rtnl_link_ops *ops)
48326 {
48327 /* common fields */
48328- ops->priv_size = sizeof(struct macvlan_dev);
48329- ops->validate = macvlan_validate;
48330- ops->maxtype = IFLA_MACVLAN_MAX;
48331- ops->policy = macvlan_policy;
48332- ops->changelink = macvlan_changelink;
48333- ops->get_size = macvlan_get_size;
48334- ops->fill_info = macvlan_fill_info;
48335+ pax_open_kernel();
48336+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48337+ *(void **)&ops->validate = macvlan_validate;
48338+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48339+ *(const void **)&ops->policy = macvlan_policy;
48340+ *(void **)&ops->changelink = macvlan_changelink;
48341+ *(void **)&ops->get_size = macvlan_get_size;
48342+ *(void **)&ops->fill_info = macvlan_fill_info;
48343+ pax_close_kernel();
48344
48345 return rtnl_link_register(ops);
48346 };
48347@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48348 return NOTIFY_DONE;
48349 }
48350
48351-static struct notifier_block macvlan_notifier_block __read_mostly = {
48352+static struct notifier_block macvlan_notifier_block = {
48353 .notifier_call = macvlan_device_event,
48354 };
48355
48356diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48357index 4d050ee..012f6dd 100644
48358--- a/drivers/net/macvtap.c
48359+++ b/drivers/net/macvtap.c
48360@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48361 dev->tx_queue_len = TUN_READQ_SIZE;
48362 }
48363
48364-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48365+static struct rtnl_link_ops macvtap_link_ops = {
48366 .kind = "macvtap",
48367 .setup = macvtap_setup,
48368 .newlink = macvtap_newlink,
48369@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48370
48371 ret = 0;
48372 u = q->flags;
48373- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48374+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48375 put_user(u, &ifr->ifr_flags))
48376 ret = -EFAULT;
48377 macvtap_put_vlan(vlan);
48378@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48379 return NOTIFY_DONE;
48380 }
48381
48382-static struct notifier_block macvtap_notifier_block __read_mostly = {
48383+static struct notifier_block macvtap_notifier_block = {
48384 .notifier_call = macvtap_device_event,
48385 };
48386
48387diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48388index 34924df..a747360 100644
48389--- a/drivers/net/nlmon.c
48390+++ b/drivers/net/nlmon.c
48391@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48392 return 0;
48393 }
48394
48395-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48396+static struct rtnl_link_ops nlmon_link_ops = {
48397 .kind = "nlmon",
48398 .priv_size = sizeof(struct nlmon),
48399 .setup = nlmon_setup,
48400diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48401index 3fc91e8..6c36337 100644
48402--- a/drivers/net/phy/phy_device.c
48403+++ b/drivers/net/phy/phy_device.c
48404@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48405 * zero on success.
48406 *
48407 */
48408-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48409+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48410 struct phy_c45_device_ids *c45_ids) {
48411 int phy_reg;
48412 int i, reg_addr;
48413@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48414 * its return value is in turn returned.
48415 *
48416 */
48417-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48418+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48419 bool is_c45, struct phy_c45_device_ids *c45_ids)
48420 {
48421 int phy_reg;
48422@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48423 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48424 {
48425 struct phy_c45_device_ids c45_ids = {0};
48426- u32 phy_id = 0;
48427+ int phy_id = 0;
48428 int r;
48429
48430 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48431diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48432index af034db..1611c0b2 100644
48433--- a/drivers/net/ppp/ppp_generic.c
48434+++ b/drivers/net/ppp/ppp_generic.c
48435@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48436 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48437 struct ppp_stats stats;
48438 struct ppp_comp_stats cstats;
48439- char *vers;
48440
48441 switch (cmd) {
48442 case SIOCGPPPSTATS:
48443@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48444 break;
48445
48446 case SIOCGPPPVER:
48447- vers = PPP_VERSION;
48448- if (copy_to_user(addr, vers, strlen(vers) + 1))
48449+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48450 break;
48451 err = 0;
48452 break;
48453diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48454index 079f7ad..b2a2bfa7 100644
48455--- a/drivers/net/slip/slhc.c
48456+++ b/drivers/net/slip/slhc.c
48457@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48458 register struct tcphdr *thp;
48459 register struct iphdr *ip;
48460 register struct cstate *cs;
48461- int len, hdrlen;
48462+ long len, hdrlen;
48463 unsigned char *cp = icp;
48464
48465 /* We've got a compressed packet; read the change byte */
48466diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48467index 2c087ef..4859007 100644
48468--- a/drivers/net/team/team.c
48469+++ b/drivers/net/team/team.c
48470@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48471 return TEAM_DEFAULT_NUM_RX_QUEUES;
48472 }
48473
48474-static struct rtnl_link_ops team_link_ops __read_mostly = {
48475+static struct rtnl_link_ops team_link_ops = {
48476 .kind = DRV_NAME,
48477 .priv_size = sizeof(struct team),
48478 .setup = team_setup,
48479@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48480 return NOTIFY_DONE;
48481 }
48482
48483-static struct notifier_block team_notifier_block __read_mostly = {
48484+static struct notifier_block team_notifier_block = {
48485 .notifier_call = team_device_event,
48486 };
48487
48488diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48489index 10f9e40..3515e7e 100644
48490--- a/drivers/net/tun.c
48491+++ b/drivers/net/tun.c
48492@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48493 return -EINVAL;
48494 }
48495
48496-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48497+static struct rtnl_link_ops tun_link_ops = {
48498 .kind = DRV_NAME,
48499 .priv_size = sizeof(struct tun_struct),
48500 .setup = tun_setup,
48501@@ -1827,7 +1827,7 @@ unlock:
48502 }
48503
48504 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48505- unsigned long arg, int ifreq_len)
48506+ unsigned long arg, size_t ifreq_len)
48507 {
48508 struct tun_file *tfile = file->private_data;
48509 struct tun_struct *tun;
48510@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48511 int le;
48512 int ret;
48513
48514+ if (ifreq_len > sizeof ifr)
48515+ return -EFAULT;
48516+
48517 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48518 if (copy_from_user(&ifr, argp, ifreq_len))
48519 return -EFAULT;
48520diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48521index 9c5aa92..8cd0405 100644
48522--- a/drivers/net/usb/hso.c
48523+++ b/drivers/net/usb/hso.c
48524@@ -71,7 +71,7 @@
48525 #include <asm/byteorder.h>
48526 #include <linux/serial_core.h>
48527 #include <linux/serial.h>
48528-
48529+#include <asm/local.h>
48530
48531 #define MOD_AUTHOR "Option Wireless"
48532 #define MOD_DESCRIPTION "USB High Speed Option driver"
48533@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48534 struct urb *urb;
48535
48536 urb = serial->rx_urb[0];
48537- if (serial->port.count > 0) {
48538+ if (atomic_read(&serial->port.count) > 0) {
48539 count = put_rxbuf_data(urb, serial);
48540 if (count == -1)
48541 return;
48542@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48543 DUMP1(urb->transfer_buffer, urb->actual_length);
48544
48545 /* Anyone listening? */
48546- if (serial->port.count == 0)
48547+ if (atomic_read(&serial->port.count) == 0)
48548 return;
48549
48550 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48551@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48552 tty_port_tty_set(&serial->port, tty);
48553
48554 /* check for port already opened, if not set the termios */
48555- serial->port.count++;
48556- if (serial->port.count == 1) {
48557+ if (atomic_inc_return(&serial->port.count) == 1) {
48558 serial->rx_state = RX_IDLE;
48559 /* Force default termio settings */
48560 _hso_serial_set_termios(tty, NULL);
48561@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48562 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48563 if (result) {
48564 hso_stop_serial_device(serial->parent);
48565- serial->port.count--;
48566+ atomic_dec(&serial->port.count);
48567 kref_put(&serial->parent->ref, hso_serial_ref_free);
48568 }
48569 } else {
48570@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48571
48572 /* reset the rts and dtr */
48573 /* do the actual close */
48574- serial->port.count--;
48575+ atomic_dec(&serial->port.count);
48576
48577- if (serial->port.count <= 0) {
48578- serial->port.count = 0;
48579+ if (atomic_read(&serial->port.count) <= 0) {
48580+ atomic_set(&serial->port.count, 0);
48581 tty_port_tty_set(&serial->port, NULL);
48582 if (!usb_gone)
48583 hso_stop_serial_device(serial->parent);
48584@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48585
48586 /* the actual setup */
48587 spin_lock_irqsave(&serial->serial_lock, flags);
48588- if (serial->port.count)
48589+ if (atomic_read(&serial->port.count))
48590 _hso_serial_set_termios(tty, old);
48591 else
48592 tty->termios = *old;
48593@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48594 D1("Pending read interrupt on port %d\n", i);
48595 spin_lock(&serial->serial_lock);
48596 if (serial->rx_state == RX_IDLE &&
48597- serial->port.count > 0) {
48598+ atomic_read(&serial->port.count) > 0) {
48599 /* Setup and send a ctrl req read on
48600 * port i */
48601 if (!serial->rx_urb_filled[0]) {
48602@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48603 /* Start all serial ports */
48604 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48605 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48606- if (dev2ser(serial_table[i])->port.count) {
48607+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48608 result =
48609 hso_start_serial_device(serial_table[i], GFP_NOIO);
48610 hso_kick_transmit(dev2ser(serial_table[i]));
48611diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48612index bf405f1..fd847ee 100644
48613--- a/drivers/net/usb/r8152.c
48614+++ b/drivers/net/usb/r8152.c
48615@@ -571,7 +571,7 @@ struct r8152 {
48616 void (*unload)(struct r8152 *);
48617 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48618 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48619- } rtl_ops;
48620+ } __no_const rtl_ops;
48621
48622 int intr_interval;
48623 u32 saved_wolopts;
48624diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48625index a2515887..6d13233 100644
48626--- a/drivers/net/usb/sierra_net.c
48627+++ b/drivers/net/usb/sierra_net.c
48628@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48629 /* atomic counter partially included in MAC address to make sure 2 devices
48630 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48631 */
48632-static atomic_t iface_counter = ATOMIC_INIT(0);
48633+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48634
48635 /*
48636 * SYNC Timer Delay definition used to set the expiry time
48637@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48638 dev->net->netdev_ops = &sierra_net_device_ops;
48639
48640 /* change MAC addr to include, ifacenum, and to be unique */
48641- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48642+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48643 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48644
48645 /* we will have to manufacture ethernet headers, prepare template */
48646diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48647index 0ad6c0c..4013638 100644
48648--- a/drivers/net/virtio_net.c
48649+++ b/drivers/net/virtio_net.c
48650@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48651 #define RECEIVE_AVG_WEIGHT 64
48652
48653 /* Minimum alignment for mergeable packet buffers. */
48654-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48655+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48656
48657 #define VIRTNET_DRIVER_VERSION "1.0.0"
48658
48659diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48660index a8c755d..a988b71 100644
48661--- a/drivers/net/vxlan.c
48662+++ b/drivers/net/vxlan.c
48663@@ -2702,7 +2702,7 @@ nla_put_failure:
48664 return -EMSGSIZE;
48665 }
48666
48667-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48668+static struct rtnl_link_ops vxlan_link_ops = {
48669 .kind = "vxlan",
48670 .maxtype = IFLA_VXLAN_MAX,
48671 .policy = vxlan_policy,
48672@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48673 return NOTIFY_DONE;
48674 }
48675
48676-static struct notifier_block vxlan_notifier_block __read_mostly = {
48677+static struct notifier_block vxlan_notifier_block = {
48678 .notifier_call = vxlan_lowerdev_event,
48679 };
48680
48681diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48682index 5920c99..ff2e4a5 100644
48683--- a/drivers/net/wan/lmc/lmc_media.c
48684+++ b/drivers/net/wan/lmc/lmc_media.c
48685@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48686 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48687
48688 lmc_media_t lmc_ds3_media = {
48689- lmc_ds3_init, /* special media init stuff */
48690- lmc_ds3_default, /* reset to default state */
48691- lmc_ds3_set_status, /* reset status to state provided */
48692- lmc_dummy_set_1, /* set clock source */
48693- lmc_dummy_set2_1, /* set line speed */
48694- lmc_ds3_set_100ft, /* set cable length */
48695- lmc_ds3_set_scram, /* set scrambler */
48696- lmc_ds3_get_link_status, /* get link status */
48697- lmc_dummy_set_1, /* set link status */
48698- lmc_ds3_set_crc_length, /* set CRC length */
48699- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48700- lmc_ds3_watchdog
48701+ .init = lmc_ds3_init, /* special media init stuff */
48702+ .defaults = lmc_ds3_default, /* reset to default state */
48703+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48704+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48705+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48706+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48707+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48708+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48709+ .set_link_status = lmc_dummy_set_1, /* set link status */
48710+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48711+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48712+ .watchdog = lmc_ds3_watchdog
48713 };
48714
48715 lmc_media_t lmc_hssi_media = {
48716- lmc_hssi_init, /* special media init stuff */
48717- lmc_hssi_default, /* reset to default state */
48718- lmc_hssi_set_status, /* reset status to state provided */
48719- lmc_hssi_set_clock, /* set clock source */
48720- lmc_dummy_set2_1, /* set line speed */
48721- lmc_dummy_set_1, /* set cable length */
48722- lmc_dummy_set_1, /* set scrambler */
48723- lmc_hssi_get_link_status, /* get link status */
48724- lmc_hssi_set_link_status, /* set link status */
48725- lmc_hssi_set_crc_length, /* set CRC length */
48726- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48727- lmc_hssi_watchdog
48728+ .init = lmc_hssi_init, /* special media init stuff */
48729+ .defaults = lmc_hssi_default, /* reset to default state */
48730+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48731+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48732+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48733+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48734+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48735+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48736+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48737+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48738+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48739+ .watchdog = lmc_hssi_watchdog
48740 };
48741
48742-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48743- lmc_ssi_default, /* reset to default state */
48744- lmc_ssi_set_status, /* reset status to state provided */
48745- lmc_ssi_set_clock, /* set clock source */
48746- lmc_ssi_set_speed, /* set line speed */
48747- lmc_dummy_set_1, /* set cable length */
48748- lmc_dummy_set_1, /* set scrambler */
48749- lmc_ssi_get_link_status, /* get link status */
48750- lmc_ssi_set_link_status, /* set link status */
48751- lmc_ssi_set_crc_length, /* set CRC length */
48752- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48753- lmc_ssi_watchdog
48754+lmc_media_t lmc_ssi_media = {
48755+ .init = lmc_ssi_init, /* special media init stuff */
48756+ .defaults = lmc_ssi_default, /* reset to default state */
48757+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48758+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48759+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48760+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48761+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48762+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48763+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48764+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48765+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48766+ .watchdog = lmc_ssi_watchdog
48767 };
48768
48769 lmc_media_t lmc_t1_media = {
48770- lmc_t1_init, /* special media init stuff */
48771- lmc_t1_default, /* reset to default state */
48772- lmc_t1_set_status, /* reset status to state provided */
48773- lmc_t1_set_clock, /* set clock source */
48774- lmc_dummy_set2_1, /* set line speed */
48775- lmc_dummy_set_1, /* set cable length */
48776- lmc_dummy_set_1, /* set scrambler */
48777- lmc_t1_get_link_status, /* get link status */
48778- lmc_dummy_set_1, /* set link status */
48779- lmc_t1_set_crc_length, /* set CRC length */
48780- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48781- lmc_t1_watchdog
48782+ .init = lmc_t1_init, /* special media init stuff */
48783+ .defaults = lmc_t1_default, /* reset to default state */
48784+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48785+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48786+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48787+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48788+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48789+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48790+ .set_link_status = lmc_dummy_set_1, /* set link status */
48791+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48792+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48793+ .watchdog = lmc_t1_watchdog
48794 };
48795
48796 static void
48797diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48798index feacc3b..5bac0de 100644
48799--- a/drivers/net/wan/z85230.c
48800+++ b/drivers/net/wan/z85230.c
48801@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48802
48803 struct z8530_irqhandler z8530_sync =
48804 {
48805- z8530_rx,
48806- z8530_tx,
48807- z8530_status
48808+ .rx = z8530_rx,
48809+ .tx = z8530_tx,
48810+ .status = z8530_status
48811 };
48812
48813 EXPORT_SYMBOL(z8530_sync);
48814@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48815 }
48816
48817 static struct z8530_irqhandler z8530_dma_sync = {
48818- z8530_dma_rx,
48819- z8530_dma_tx,
48820- z8530_dma_status
48821+ .rx = z8530_dma_rx,
48822+ .tx = z8530_dma_tx,
48823+ .status = z8530_dma_status
48824 };
48825
48826 static struct z8530_irqhandler z8530_txdma_sync = {
48827- z8530_rx,
48828- z8530_dma_tx,
48829- z8530_dma_status
48830+ .rx = z8530_rx,
48831+ .tx = z8530_dma_tx,
48832+ .status = z8530_dma_status
48833 };
48834
48835 /**
48836@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48837
48838 struct z8530_irqhandler z8530_nop=
48839 {
48840- z8530_rx_clear,
48841- z8530_tx_clear,
48842- z8530_status_clear
48843+ .rx = z8530_rx_clear,
48844+ .tx = z8530_tx_clear,
48845+ .status = z8530_status_clear
48846 };
48847
48848
48849diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48850index 0b60295..b8bfa5b 100644
48851--- a/drivers/net/wimax/i2400m/rx.c
48852+++ b/drivers/net/wimax/i2400m/rx.c
48853@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48854 if (i2400m->rx_roq == NULL)
48855 goto error_roq_alloc;
48856
48857- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48858+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48859 GFP_KERNEL);
48860 if (rd == NULL) {
48861 result = -ENOMEM;
48862diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48863index e71a2ce..2268d61 100644
48864--- a/drivers/net/wireless/airo.c
48865+++ b/drivers/net/wireless/airo.c
48866@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48867 struct airo_info *ai = dev->ml_priv;
48868 int ridcode;
48869 int enabled;
48870- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48871+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48872 unsigned char *iobuf;
48873
48874 /* Only super-user can write RIDs */
48875diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48876index da92bfa..5a9001a 100644
48877--- a/drivers/net/wireless/at76c50x-usb.c
48878+++ b/drivers/net/wireless/at76c50x-usb.c
48879@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48880 }
48881
48882 /* Convert timeout from the DFU status to jiffies */
48883-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48884+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48885 {
48886 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48887 | (s->poll_timeout[1] << 8)
48888diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48889index f1946a6..cd367fb 100644
48890--- a/drivers/net/wireless/ath/ath10k/htc.c
48891+++ b/drivers/net/wireless/ath/ath10k/htc.c
48892@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48893 /* registered target arrival callback from the HIF layer */
48894 int ath10k_htc_init(struct ath10k *ar)
48895 {
48896- struct ath10k_hif_cb htc_callbacks;
48897+ static struct ath10k_hif_cb htc_callbacks = {
48898+ .rx_completion = ath10k_htc_rx_completion_handler,
48899+ .tx_completion = ath10k_htc_tx_completion_handler,
48900+ };
48901 struct ath10k_htc_ep *ep = NULL;
48902 struct ath10k_htc *htc = &ar->htc;
48903
48904@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48905 ath10k_htc_reset_endpoint_states(htc);
48906
48907 /* setup HIF layer callbacks */
48908- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48909- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48910 htc->ar = ar;
48911
48912 /* Get HIF default pipe for HTC message exchange */
48913diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48914index 527179c..a890150 100644
48915--- a/drivers/net/wireless/ath/ath10k/htc.h
48916+++ b/drivers/net/wireless/ath/ath10k/htc.h
48917@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48918
48919 struct ath10k_htc_ops {
48920 void (*target_send_suspend_complete)(struct ath10k *ar);
48921-};
48922+} __no_const;
48923
48924 struct ath10k_htc_ep_ops {
48925 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48926 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48927 void (*ep_tx_credits)(struct ath10k *);
48928-};
48929+} __no_const;
48930
48931 /* service connection information */
48932 struct ath10k_htc_svc_conn_req {
48933diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48934index f816909..e56cd8b 100644
48935--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48936+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48937@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48938 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48939 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48940
48941- ACCESS_ONCE(ads->ds_link) = i->link;
48942- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48943+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48944+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48945
48946 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48947 ctl6 = SM(i->keytype, AR_EncrType);
48948@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48949
48950 if ((i->is_first || i->is_last) &&
48951 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48952- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48953+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48954 | set11nTries(i->rates, 1)
48955 | set11nTries(i->rates, 2)
48956 | set11nTries(i->rates, 3)
48957 | (i->dur_update ? AR_DurUpdateEna : 0)
48958 | SM(0, AR_BurstDur);
48959
48960- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48961+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48962 | set11nRate(i->rates, 1)
48963 | set11nRate(i->rates, 2)
48964 | set11nRate(i->rates, 3);
48965 } else {
48966- ACCESS_ONCE(ads->ds_ctl2) = 0;
48967- ACCESS_ONCE(ads->ds_ctl3) = 0;
48968+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48969+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48970 }
48971
48972 if (!i->is_first) {
48973- ACCESS_ONCE(ads->ds_ctl0) = 0;
48974- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48975- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48976+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48977+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48978+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48979 return;
48980 }
48981
48982@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48983 break;
48984 }
48985
48986- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48987+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48988 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48989 | SM(i->txpower[0], AR_XmitPower0)
48990 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48991@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48992 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48993 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48994
48995- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48996- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48997+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48998+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48999
49000 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49001 return;
49002
49003- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49004+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49005 | set11nPktDurRTSCTS(i->rates, 1);
49006
49007- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49008+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49009 | set11nPktDurRTSCTS(i->rates, 3);
49010
49011- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49012+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49013 | set11nRateFlags(i->rates, 1)
49014 | set11nRateFlags(i->rates, 2)
49015 | set11nRateFlags(i->rates, 3)
49016 | SM(i->rtscts_rate, AR_RTSCTSRate);
49017
49018- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49019- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49020- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49021+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49022+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49023+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49024 }
49025
49026 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49027diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49028index da84b70..83e4978 100644
49029--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49030+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49031@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49032 (i->qcu << AR_TxQcuNum_S) | desc_len;
49033
49034 checksum += val;
49035- ACCESS_ONCE(ads->info) = val;
49036+ ACCESS_ONCE_RW(ads->info) = val;
49037
49038 checksum += i->link;
49039- ACCESS_ONCE(ads->link) = i->link;
49040+ ACCESS_ONCE_RW(ads->link) = i->link;
49041
49042 checksum += i->buf_addr[0];
49043- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49044+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49045 checksum += i->buf_addr[1];
49046- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49047+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49048 checksum += i->buf_addr[2];
49049- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49050+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49051 checksum += i->buf_addr[3];
49052- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49053+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49054
49055 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49056- ACCESS_ONCE(ads->ctl3) = val;
49057+ ACCESS_ONCE_RW(ads->ctl3) = val;
49058 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49059- ACCESS_ONCE(ads->ctl5) = val;
49060+ ACCESS_ONCE_RW(ads->ctl5) = val;
49061 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49062- ACCESS_ONCE(ads->ctl7) = val;
49063+ ACCESS_ONCE_RW(ads->ctl7) = val;
49064 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49065- ACCESS_ONCE(ads->ctl9) = val;
49066+ ACCESS_ONCE_RW(ads->ctl9) = val;
49067
49068 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49069- ACCESS_ONCE(ads->ctl10) = checksum;
49070+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49071
49072 if (i->is_first || i->is_last) {
49073- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49074+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49075 | set11nTries(i->rates, 1)
49076 | set11nTries(i->rates, 2)
49077 | set11nTries(i->rates, 3)
49078 | (i->dur_update ? AR_DurUpdateEna : 0)
49079 | SM(0, AR_BurstDur);
49080
49081- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49082+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49083 | set11nRate(i->rates, 1)
49084 | set11nRate(i->rates, 2)
49085 | set11nRate(i->rates, 3);
49086 } else {
49087- ACCESS_ONCE(ads->ctl13) = 0;
49088- ACCESS_ONCE(ads->ctl14) = 0;
49089+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49090+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49091 }
49092
49093 ads->ctl20 = 0;
49094@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49095
49096 ctl17 = SM(i->keytype, AR_EncrType);
49097 if (!i->is_first) {
49098- ACCESS_ONCE(ads->ctl11) = 0;
49099- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49100- ACCESS_ONCE(ads->ctl15) = 0;
49101- ACCESS_ONCE(ads->ctl16) = 0;
49102- ACCESS_ONCE(ads->ctl17) = ctl17;
49103- ACCESS_ONCE(ads->ctl18) = 0;
49104- ACCESS_ONCE(ads->ctl19) = 0;
49105+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49106+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49107+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49108+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49109+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49110+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49111+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49112 return;
49113 }
49114
49115- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49116+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49117 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49118 | SM(i->txpower[0], AR_XmitPower0)
49119 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49120@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49121 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49122 ctl12 |= SM(val, AR_PAPRDChainMask);
49123
49124- ACCESS_ONCE(ads->ctl12) = ctl12;
49125- ACCESS_ONCE(ads->ctl17) = ctl17;
49126+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49127+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49128
49129- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49130+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49131 | set11nPktDurRTSCTS(i->rates, 1);
49132
49133- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49134+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49135 | set11nPktDurRTSCTS(i->rates, 3);
49136
49137- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49138+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49139 | set11nRateFlags(i->rates, 1)
49140 | set11nRateFlags(i->rates, 2)
49141 | set11nRateFlags(i->rates, 3)
49142 | SM(i->rtscts_rate, AR_RTSCTSRate);
49143
49144- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49145+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49146
49147- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49148- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49149- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49150+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49151+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49152+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49153 }
49154
49155 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49156diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49157index 1cbd335..27dfb40 100644
49158--- a/drivers/net/wireless/ath/ath9k/hw.h
49159+++ b/drivers/net/wireless/ath/ath9k/hw.h
49160@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
49161
49162 /* ANI */
49163 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49164-};
49165+} __no_const;
49166
49167 /**
49168 * struct ath_spec_scan - parameters for Atheros spectral scan
49169@@ -716,7 +716,7 @@ struct ath_hw_ops {
49170 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49171 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49172 #endif
49173-};
49174+} __no_const;
49175
49176 struct ath_nf_limits {
49177 s16 max;
49178diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49179index 62b0bf4..4ae094c 100644
49180--- a/drivers/net/wireless/ath/ath9k/main.c
49181+++ b/drivers/net/wireless/ath/ath9k/main.c
49182@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49183 if (!ath9k_is_chanctx_enabled())
49184 return;
49185
49186- ath9k_ops.hw_scan = ath9k_hw_scan;
49187- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49188- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49189- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49190- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49191- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49192- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49193- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49194- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49195- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49196+ pax_open_kernel();
49197+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49198+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49199+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49200+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49201+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49202+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49203+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49204+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49205+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49206+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49207+ pax_close_kernel();
49208 }
49209
49210 #endif
49211diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49212index 058a9f2..d5cb1ba 100644
49213--- a/drivers/net/wireless/b43/phy_lp.c
49214+++ b/drivers/net/wireless/b43/phy_lp.c
49215@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49216 {
49217 struct ssb_bus *bus = dev->dev->sdev->bus;
49218
49219- static const struct b206x_channel *chandata = NULL;
49220+ const struct b206x_channel *chandata = NULL;
49221 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49222 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49223 u16 old_comm15, scale;
49224diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49225index dc1d20c..f7a4f06 100644
49226--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49227+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49228@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49229 */
49230 if (il3945_mod_params.disable_hw_scan) {
49231 D_INFO("Disabling hw_scan\n");
49232- il3945_mac_ops.hw_scan = NULL;
49233+ pax_open_kernel();
49234+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49235+ pax_close_kernel();
49236 }
49237
49238 D_INFO("*** LOAD DRIVER ***\n");
49239diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49240index 0ffb6ff..c0b7f0e 100644
49241--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49242+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49243@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49244 {
49245 struct iwl_priv *priv = file->private_data;
49246 char buf[64];
49247- int buf_size;
49248+ size_t buf_size;
49249 u32 offset, len;
49250
49251 memset(buf, 0, sizeof(buf));
49252@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49253 struct iwl_priv *priv = file->private_data;
49254
49255 char buf[8];
49256- int buf_size;
49257+ size_t buf_size;
49258 u32 reset_flag;
49259
49260 memset(buf, 0, sizeof(buf));
49261@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49262 {
49263 struct iwl_priv *priv = file->private_data;
49264 char buf[8];
49265- int buf_size;
49266+ size_t buf_size;
49267 int ht40;
49268
49269 memset(buf, 0, sizeof(buf));
49270@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49271 {
49272 struct iwl_priv *priv = file->private_data;
49273 char buf[8];
49274- int buf_size;
49275+ size_t buf_size;
49276 int value;
49277
49278 memset(buf, 0, sizeof(buf));
49279@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49280 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49281 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49282
49283-static const char *fmt_value = " %-30s %10u\n";
49284-static const char *fmt_hex = " %-30s 0x%02X\n";
49285-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49286-static const char *fmt_header =
49287+static const char fmt_value[] = " %-30s %10u\n";
49288+static const char fmt_hex[] = " %-30s 0x%02X\n";
49289+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49290+static const char fmt_header[] =
49291 "%-32s current cumulative delta max\n";
49292
49293 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49294@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49295 {
49296 struct iwl_priv *priv = file->private_data;
49297 char buf[8];
49298- int buf_size;
49299+ size_t buf_size;
49300 int clear;
49301
49302 memset(buf, 0, sizeof(buf));
49303@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49304 {
49305 struct iwl_priv *priv = file->private_data;
49306 char buf[8];
49307- int buf_size;
49308+ size_t buf_size;
49309 int trace;
49310
49311 memset(buf, 0, sizeof(buf));
49312@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49313 {
49314 struct iwl_priv *priv = file->private_data;
49315 char buf[8];
49316- int buf_size;
49317+ size_t buf_size;
49318 int missed;
49319
49320 memset(buf, 0, sizeof(buf));
49321@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49322
49323 struct iwl_priv *priv = file->private_data;
49324 char buf[8];
49325- int buf_size;
49326+ size_t buf_size;
49327 int plcp;
49328
49329 memset(buf, 0, sizeof(buf));
49330@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49331
49332 struct iwl_priv *priv = file->private_data;
49333 char buf[8];
49334- int buf_size;
49335+ size_t buf_size;
49336 int flush;
49337
49338 memset(buf, 0, sizeof(buf));
49339@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49340
49341 struct iwl_priv *priv = file->private_data;
49342 char buf[8];
49343- int buf_size;
49344+ size_t buf_size;
49345 int rts;
49346
49347 if (!priv->cfg->ht_params)
49348@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49349 {
49350 struct iwl_priv *priv = file->private_data;
49351 char buf[8];
49352- int buf_size;
49353+ size_t buf_size;
49354
49355 memset(buf, 0, sizeof(buf));
49356 buf_size = min(count, sizeof(buf) - 1);
49357@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49358 struct iwl_priv *priv = file->private_data;
49359 u32 event_log_flag;
49360 char buf[8];
49361- int buf_size;
49362+ size_t buf_size;
49363
49364 /* check that the interface is up */
49365 if (!iwl_is_ready(priv))
49366@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49367 struct iwl_priv *priv = file->private_data;
49368 char buf[8];
49369 u32 calib_disabled;
49370- int buf_size;
49371+ size_t buf_size;
49372
49373 memset(buf, 0, sizeof(buf));
49374 buf_size = min(count, sizeof(buf) - 1);
49375diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49376index 523fe0c..0d9473b 100644
49377--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49378+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49379@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49380 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49381
49382 char buf[8];
49383- int buf_size;
49384+ size_t buf_size;
49385 u32 reset_flag;
49386
49387 memset(buf, 0, sizeof(buf));
49388@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49389 {
49390 struct iwl_trans *trans = file->private_data;
49391 char buf[8];
49392- int buf_size;
49393+ size_t buf_size;
49394 int csr;
49395
49396 memset(buf, 0, sizeof(buf));
49397diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49398index ef58a88..fafa731 100644
49399--- a/drivers/net/wireless/mac80211_hwsim.c
49400+++ b/drivers/net/wireless/mac80211_hwsim.c
49401@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49402 if (channels < 1)
49403 return -EINVAL;
49404
49405- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49406- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49407- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49408- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49409- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49410- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49411- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49412- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49413- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49414- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49415- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49416- mac80211_hwsim_assign_vif_chanctx;
49417- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49418- mac80211_hwsim_unassign_vif_chanctx;
49419+ pax_open_kernel();
49420+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49421+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49422+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49423+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49424+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49425+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49426+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49427+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49428+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49429+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49430+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49431+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49432+ pax_close_kernel();
49433
49434 spin_lock_init(&hwsim_radio_lock);
49435 INIT_LIST_HEAD(&hwsim_radios);
49436diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49437index 1a4facd..a2ecbbd 100644
49438--- a/drivers/net/wireless/rndis_wlan.c
49439+++ b/drivers/net/wireless/rndis_wlan.c
49440@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49441
49442 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49443
49444- if (rts_threshold < 0 || rts_threshold > 2347)
49445+ if (rts_threshold > 2347)
49446 rts_threshold = 2347;
49447
49448 tmp = cpu_to_le32(rts_threshold);
49449diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49450index 9bb398b..b0cc047 100644
49451--- a/drivers/net/wireless/rt2x00/rt2x00.h
49452+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49453@@ -375,7 +375,7 @@ struct rt2x00_intf {
49454 * for hardware which doesn't support hardware
49455 * sequence counting.
49456 */
49457- atomic_t seqno;
49458+ atomic_unchecked_t seqno;
49459 };
49460
49461 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49462diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49463index 66ff364..3ce34f7 100644
49464--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49465+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49466@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49467 * sequence counter given by mac80211.
49468 */
49469 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49470- seqno = atomic_add_return(0x10, &intf->seqno);
49471+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49472 else
49473- seqno = atomic_read(&intf->seqno);
49474+ seqno = atomic_read_unchecked(&intf->seqno);
49475
49476 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49477 hdr->seq_ctrl |= cpu_to_le16(seqno);
49478diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49479index b661f896..ddf7d2b 100644
49480--- a/drivers/net/wireless/ti/wl1251/sdio.c
49481+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49482@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49483
49484 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49485
49486- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49487- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49488+ pax_open_kernel();
49489+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49490+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49491+ pax_close_kernel();
49492
49493 wl1251_info("using dedicated interrupt line");
49494 } else {
49495- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49496- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49497+ pax_open_kernel();
49498+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49499+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49500+ pax_close_kernel();
49501
49502 wl1251_info("using SDIO interrupt");
49503 }
49504diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49505index d6d0d6d..60c23a0 100644
49506--- a/drivers/net/wireless/ti/wl12xx/main.c
49507+++ b/drivers/net/wireless/ti/wl12xx/main.c
49508@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49509 sizeof(wl->conf.mem));
49510
49511 /* read data preparation is only needed by wl127x */
49512- wl->ops->prepare_read = wl127x_prepare_read;
49513+ pax_open_kernel();
49514+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49515+ pax_close_kernel();
49516
49517 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49518 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49519@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49520 sizeof(wl->conf.mem));
49521
49522 /* read data preparation is only needed by wl127x */
49523- wl->ops->prepare_read = wl127x_prepare_read;
49524+ pax_open_kernel();
49525+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49526+ pax_close_kernel();
49527
49528 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49529 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49530diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49531index 8e56261..9140678 100644
49532--- a/drivers/net/wireless/ti/wl18xx/main.c
49533+++ b/drivers/net/wireless/ti/wl18xx/main.c
49534@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49535 }
49536
49537 if (!checksum_param) {
49538- wl18xx_ops.set_rx_csum = NULL;
49539- wl18xx_ops.init_vif = NULL;
49540+ pax_open_kernel();
49541+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49542+ *(void **)&wl18xx_ops.init_vif = NULL;
49543+ pax_close_kernel();
49544 }
49545
49546 /* Enable 11a Band only if we have 5G antennas */
49547diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49548index a912dc0..a8225ba 100644
49549--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49550+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49551@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49552 {
49553 struct zd_usb *usb = urb->context;
49554 struct zd_usb_interrupt *intr = &usb->intr;
49555- int len;
49556+ unsigned int len;
49557 u16 int_num;
49558
49559 ZD_ASSERT(in_interrupt());
49560diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49561index ce2e2cf..f81e500 100644
49562--- a/drivers/nfc/nfcwilink.c
49563+++ b/drivers/nfc/nfcwilink.c
49564@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49565
49566 static int nfcwilink_probe(struct platform_device *pdev)
49567 {
49568- static struct nfcwilink *drv;
49569+ struct nfcwilink *drv;
49570 int rc;
49571 __u32 protocols;
49572
49573diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49574index f2596c8..50d53af 100644
49575--- a/drivers/nfc/st21nfca/st21nfca.c
49576+++ b/drivers/nfc/st21nfca/st21nfca.c
49577@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49578 goto exit;
49579 }
49580
49581- gate = uid_skb->data;
49582+ memcpy(gate, uid_skb->data, uid_skb->len);
49583 *len = uid_skb->len;
49584 exit:
49585 kfree_skb(uid_skb);
49586diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49587index 5100742..6ad4e6d 100644
49588--- a/drivers/of/fdt.c
49589+++ b/drivers/of/fdt.c
49590@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49591 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49592 return 0;
49593 }
49594- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49595+ pax_open_kernel();
49596+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49597+ pax_close_kernel();
49598 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49599 }
49600 late_initcall(of_fdt_raw_init);
49601diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49602index d93b2b6..ae50401 100644
49603--- a/drivers/oprofile/buffer_sync.c
49604+++ b/drivers/oprofile/buffer_sync.c
49605@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49606 if (cookie == NO_COOKIE)
49607 offset = pc;
49608 if (cookie == INVALID_COOKIE) {
49609- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49610+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49611 offset = pc;
49612 }
49613 if (cookie != last_cookie) {
49614@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49615 /* add userspace sample */
49616
49617 if (!mm) {
49618- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49619+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49620 return 0;
49621 }
49622
49623 cookie = lookup_dcookie(mm, s->eip, &offset);
49624
49625 if (cookie == INVALID_COOKIE) {
49626- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49627+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49628 return 0;
49629 }
49630
49631@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49632 /* ignore backtraces if failed to add a sample */
49633 if (state == sb_bt_start) {
49634 state = sb_bt_ignore;
49635- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49636+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49637 }
49638 }
49639 release_mm(mm);
49640diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49641index c0cc4e7..44d4e54 100644
49642--- a/drivers/oprofile/event_buffer.c
49643+++ b/drivers/oprofile/event_buffer.c
49644@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49645 }
49646
49647 if (buffer_pos == buffer_size) {
49648- atomic_inc(&oprofile_stats.event_lost_overflow);
49649+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49650 return;
49651 }
49652
49653diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49654index ed2c3ec..deda85a 100644
49655--- a/drivers/oprofile/oprof.c
49656+++ b/drivers/oprofile/oprof.c
49657@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49658 if (oprofile_ops.switch_events())
49659 return;
49660
49661- atomic_inc(&oprofile_stats.multiplex_counter);
49662+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49663 start_switch_worker();
49664 }
49665
49666diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49667index ee2cfce..7f8f699 100644
49668--- a/drivers/oprofile/oprofile_files.c
49669+++ b/drivers/oprofile/oprofile_files.c
49670@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49671
49672 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49673
49674-static ssize_t timeout_read(struct file *file, char __user *buf,
49675+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49676 size_t count, loff_t *offset)
49677 {
49678 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49679diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49680index 59659ce..6c860a0 100644
49681--- a/drivers/oprofile/oprofile_stats.c
49682+++ b/drivers/oprofile/oprofile_stats.c
49683@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49684 cpu_buf->sample_invalid_eip = 0;
49685 }
49686
49687- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49688- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49689- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49690- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49691- atomic_set(&oprofile_stats.multiplex_counter, 0);
49692+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49693+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49694+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49695+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49696+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49697 }
49698
49699
49700diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49701index 1fc622b..8c48fc3 100644
49702--- a/drivers/oprofile/oprofile_stats.h
49703+++ b/drivers/oprofile/oprofile_stats.h
49704@@ -13,11 +13,11 @@
49705 #include <linux/atomic.h>
49706
49707 struct oprofile_stat_struct {
49708- atomic_t sample_lost_no_mm;
49709- atomic_t sample_lost_no_mapping;
49710- atomic_t bt_lost_no_mapping;
49711- atomic_t event_lost_overflow;
49712- atomic_t multiplex_counter;
49713+ atomic_unchecked_t sample_lost_no_mm;
49714+ atomic_unchecked_t sample_lost_no_mapping;
49715+ atomic_unchecked_t bt_lost_no_mapping;
49716+ atomic_unchecked_t event_lost_overflow;
49717+ atomic_unchecked_t multiplex_counter;
49718 };
49719
49720 extern struct oprofile_stat_struct oprofile_stats;
49721diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49722index 3f49345..c750d0b 100644
49723--- a/drivers/oprofile/oprofilefs.c
49724+++ b/drivers/oprofile/oprofilefs.c
49725@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49726
49727 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49728 {
49729- atomic_t *val = file->private_data;
49730- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49731+ atomic_unchecked_t *val = file->private_data;
49732+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49733 }
49734
49735
49736@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49737
49738
49739 int oprofilefs_create_ro_atomic(struct dentry *root,
49740- char const *name, atomic_t *val)
49741+ char const *name, atomic_unchecked_t *val)
49742 {
49743 return __oprofilefs_create_file(root, name,
49744 &atomic_ro_fops, 0444, val);
49745diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49746index bdef916..88c7dee 100644
49747--- a/drivers/oprofile/timer_int.c
49748+++ b/drivers/oprofile/timer_int.c
49749@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49750 return NOTIFY_OK;
49751 }
49752
49753-static struct notifier_block __refdata oprofile_cpu_notifier = {
49754+static struct notifier_block oprofile_cpu_notifier = {
49755 .notifier_call = oprofile_cpu_notify,
49756 };
49757
49758diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49759index 3b47080..6cd05dd 100644
49760--- a/drivers/parport/procfs.c
49761+++ b/drivers/parport/procfs.c
49762@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49763
49764 *ppos += len;
49765
49766- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49767+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49768 }
49769
49770 #ifdef CONFIG_PARPORT_1284
49771@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49772
49773 *ppos += len;
49774
49775- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49776+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49777 }
49778 #endif /* IEEE1284.3 support. */
49779
49780diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49781index 6ca2399..68d866b 100644
49782--- a/drivers/pci/hotplug/acpiphp_ibm.c
49783+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49784@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49785 goto init_cleanup;
49786 }
49787
49788- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49789+ pax_open_kernel();
49790+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49791+ pax_close_kernel();
49792 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49793
49794 return retval;
49795diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49796index 66b7bbe..26bee78 100644
49797--- a/drivers/pci/hotplug/cpcihp_generic.c
49798+++ b/drivers/pci/hotplug/cpcihp_generic.c
49799@@ -73,7 +73,6 @@ static u16 port;
49800 static unsigned int enum_bit;
49801 static u8 enum_mask;
49802
49803-static struct cpci_hp_controller_ops generic_hpc_ops;
49804 static struct cpci_hp_controller generic_hpc;
49805
49806 static int __init validate_parameters(void)
49807@@ -139,6 +138,10 @@ static int query_enum(void)
49808 return ((value & enum_mask) == enum_mask);
49809 }
49810
49811+static struct cpci_hp_controller_ops generic_hpc_ops = {
49812+ .query_enum = query_enum,
49813+};
49814+
49815 static int __init cpcihp_generic_init(void)
49816 {
49817 int status;
49818@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49819 pci_dev_put(dev);
49820
49821 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49822- generic_hpc_ops.query_enum = query_enum;
49823 generic_hpc.ops = &generic_hpc_ops;
49824
49825 status = cpci_hp_register_controller(&generic_hpc);
49826diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49827index 7ecf34e..effed62 100644
49828--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49829+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49830@@ -59,7 +59,6 @@
49831 /* local variables */
49832 static bool debug;
49833 static bool poll;
49834-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49835 static struct cpci_hp_controller zt5550_hpc;
49836
49837 /* Primary cPCI bus bridge device */
49838@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49839 return 0;
49840 }
49841
49842+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49843+ .query_enum = zt5550_hc_query_enum,
49844+};
49845+
49846 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49847 {
49848 int status;
49849@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49850 dbg("returned from zt5550_hc_config");
49851
49852 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49853- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49854 zt5550_hpc.ops = &zt5550_hpc_ops;
49855 if (!poll) {
49856 zt5550_hpc.irq = hc_dev->irq;
49857 zt5550_hpc.irq_flags = IRQF_SHARED;
49858 zt5550_hpc.dev_id = hc_dev;
49859
49860- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49861- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49862- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49863+ pax_open_kernel();
49864+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49865+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49866+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49867+ pax_open_kernel();
49868 } else {
49869 info("using ENUM# polling mode");
49870 }
49871diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49872index 1e08ff8c..3cd145f 100644
49873--- a/drivers/pci/hotplug/cpqphp_nvram.c
49874+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49875@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49876
49877 void compaq_nvram_init (void __iomem *rom_start)
49878 {
49879+#ifndef CONFIG_PAX_KERNEXEC
49880 if (rom_start)
49881 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49882+#endif
49883
49884 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49885
49886diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49887index 56d8486..f26113f 100644
49888--- a/drivers/pci/hotplug/pci_hotplug_core.c
49889+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49890@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49891 return -EINVAL;
49892 }
49893
49894- slot->ops->owner = owner;
49895- slot->ops->mod_name = mod_name;
49896+ pax_open_kernel();
49897+ *(struct module **)&slot->ops->owner = owner;
49898+ *(const char **)&slot->ops->mod_name = mod_name;
49899+ pax_close_kernel();
49900
49901 mutex_lock(&pci_hp_mutex);
49902 /*
49903diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49904index 07aa722..84514b4 100644
49905--- a/drivers/pci/hotplug/pciehp_core.c
49906+++ b/drivers/pci/hotplug/pciehp_core.c
49907@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49908 struct slot *slot = ctrl->slot;
49909 struct hotplug_slot *hotplug = NULL;
49910 struct hotplug_slot_info *info = NULL;
49911- struct hotplug_slot_ops *ops = NULL;
49912+ hotplug_slot_ops_no_const *ops = NULL;
49913 char name[SLOT_NAME_SIZE];
49914 int retval = -ENOMEM;
49915
49916diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49917index fd60806..ab6c565 100644
49918--- a/drivers/pci/msi.c
49919+++ b/drivers/pci/msi.c
49920@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49921 {
49922 struct attribute **msi_attrs;
49923 struct attribute *msi_attr;
49924- struct device_attribute *msi_dev_attr;
49925- struct attribute_group *msi_irq_group;
49926+ device_attribute_no_const *msi_dev_attr;
49927+ attribute_group_no_const *msi_irq_group;
49928 const struct attribute_group **msi_irq_groups;
49929 struct msi_desc *entry;
49930 int ret = -ENOMEM;
49931@@ -573,7 +573,7 @@ error_attrs:
49932 count = 0;
49933 msi_attr = msi_attrs[count];
49934 while (msi_attr) {
49935- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49936+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49937 kfree(msi_attr->name);
49938 kfree(msi_dev_attr);
49939 ++count;
49940diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49941index 312f23a..d21181c 100644
49942--- a/drivers/pci/pci-sysfs.c
49943+++ b/drivers/pci/pci-sysfs.c
49944@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49945 {
49946 /* allocate attribute structure, piggyback attribute name */
49947 int name_len = write_combine ? 13 : 10;
49948- struct bin_attribute *res_attr;
49949+ bin_attribute_no_const *res_attr;
49950 int retval;
49951
49952 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49953@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49954 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49955 {
49956 int retval;
49957- struct bin_attribute *attr;
49958+ bin_attribute_no_const *attr;
49959
49960 /* If the device has VPD, try to expose it in sysfs. */
49961 if (dev->vpd) {
49962@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49963 {
49964 int retval;
49965 int rom_size = 0;
49966- struct bin_attribute *attr;
49967+ bin_attribute_no_const *attr;
49968
49969 if (!sysfs_initialized)
49970 return -EACCES;
49971diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49972index d54632a..198c84d 100644
49973--- a/drivers/pci/pci.h
49974+++ b/drivers/pci/pci.h
49975@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49976 struct pci_vpd {
49977 unsigned int len;
49978 const struct pci_vpd_ops *ops;
49979- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49980+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49981 };
49982
49983 int pci_vpd_pci22_init(struct pci_dev *dev);
49984diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49985index e1e7026..d28dd33 100644
49986--- a/drivers/pci/pcie/aspm.c
49987+++ b/drivers/pci/pcie/aspm.c
49988@@ -27,9 +27,9 @@
49989 #define MODULE_PARAM_PREFIX "pcie_aspm."
49990
49991 /* Note: those are not register definitions */
49992-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49993-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49994-#define ASPM_STATE_L1 (4) /* L1 state */
49995+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49996+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49997+#define ASPM_STATE_L1 (4U) /* L1 state */
49998 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49999 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50000
50001diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50002index 23212f8..65e945b 100644
50003--- a/drivers/pci/probe.c
50004+++ b/drivers/pci/probe.c
50005@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50006 u16 orig_cmd;
50007 struct pci_bus_region region, inverted_region;
50008
50009- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50010+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50011
50012 /* No printks while decoding is disabled! */
50013 if (!dev->mmio_always_on) {
50014diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50015index 3f155e7..0f4b1f0 100644
50016--- a/drivers/pci/proc.c
50017+++ b/drivers/pci/proc.c
50018@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50019 static int __init pci_proc_init(void)
50020 {
50021 struct pci_dev *dev = NULL;
50022+
50023+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50024+#ifdef CONFIG_GRKERNSEC_PROC_USER
50025+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50027+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50028+#endif
50029+#else
50030 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50031+#endif
50032 proc_create("devices", 0, proc_bus_pci_dir,
50033 &proc_bus_pci_dev_operations);
50034 proc_initialized = 1;
50035diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50036index b84fdd6..b89d829 100644
50037--- a/drivers/platform/chrome/chromeos_laptop.c
50038+++ b/drivers/platform/chrome/chromeos_laptop.c
50039@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50040 .callback = chromeos_laptop_dmi_matched, \
50041 .driver_data = (void *)&board_
50042
50043-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50044+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50045 {
50046 .ident = "Samsung Series 5 550",
50047 .matches = {
50048diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50049index 1e1e594..8fe59c5 100644
50050--- a/drivers/platform/x86/alienware-wmi.c
50051+++ b/drivers/platform/x86/alienware-wmi.c
50052@@ -150,7 +150,7 @@ struct wmax_led_args {
50053 } __packed;
50054
50055 static struct platform_device *platform_device;
50056-static struct device_attribute *zone_dev_attrs;
50057+static device_attribute_no_const *zone_dev_attrs;
50058 static struct attribute **zone_attrs;
50059 static struct platform_zone *zone_data;
50060
50061@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50062 }
50063 };
50064
50065-static struct attribute_group zone_attribute_group = {
50066+static attribute_group_no_const zone_attribute_group = {
50067 .name = "rgb_zones",
50068 };
50069
50070diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50071index 7543a56..367ca8ed 100644
50072--- a/drivers/platform/x86/asus-wmi.c
50073+++ b/drivers/platform/x86/asus-wmi.c
50074@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50075 int err;
50076 u32 retval = -1;
50077
50078+#ifdef CONFIG_GRKERNSEC_KMEM
50079+ return -EPERM;
50080+#endif
50081+
50082 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50083
50084 if (err < 0)
50085@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50086 int err;
50087 u32 retval = -1;
50088
50089+#ifdef CONFIG_GRKERNSEC_KMEM
50090+ return -EPERM;
50091+#endif
50092+
50093 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50094 &retval);
50095
50096@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
50097 union acpi_object *obj;
50098 acpi_status status;
50099
50100+#ifdef CONFIG_GRKERNSEC_KMEM
50101+ return -EPERM;
50102+#endif
50103+
50104 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50105 1, asus->debug.method_id,
50106 &input, &output);
50107diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50108index 0859877..1cf7d08 100644
50109--- a/drivers/platform/x86/msi-laptop.c
50110+++ b/drivers/platform/x86/msi-laptop.c
50111@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50112
50113 if (!quirks->ec_read_only) {
50114 /* allow userland write sysfs file */
50115- dev_attr_bluetooth.store = store_bluetooth;
50116- dev_attr_wlan.store = store_wlan;
50117- dev_attr_threeg.store = store_threeg;
50118- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50119- dev_attr_wlan.attr.mode |= S_IWUSR;
50120- dev_attr_threeg.attr.mode |= S_IWUSR;
50121+ pax_open_kernel();
50122+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50123+ *(void **)&dev_attr_wlan.store = store_wlan;
50124+ *(void **)&dev_attr_threeg.store = store_threeg;
50125+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50126+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50127+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50128+ pax_close_kernel();
50129 }
50130
50131 /* disable hardware control by fn key */
50132diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50133index 6d2bac0..ec2b029 100644
50134--- a/drivers/platform/x86/msi-wmi.c
50135+++ b/drivers/platform/x86/msi-wmi.c
50136@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50137 static void msi_wmi_notify(u32 value, void *context)
50138 {
50139 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50140- static struct key_entry *key;
50141+ struct key_entry *key;
50142 union acpi_object *obj;
50143 acpi_status status;
50144
50145diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50146index 6dd1c0e..5d602c7 100644
50147--- a/drivers/platform/x86/sony-laptop.c
50148+++ b/drivers/platform/x86/sony-laptop.c
50149@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50150 }
50151
50152 /* High speed charging function */
50153-static struct device_attribute *hsc_handle;
50154+static device_attribute_no_const *hsc_handle;
50155
50156 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50157 struct device_attribute *attr,
50158@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50159 }
50160
50161 /* low battery function */
50162-static struct device_attribute *lowbatt_handle;
50163+static device_attribute_no_const *lowbatt_handle;
50164
50165 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50166 struct device_attribute *attr,
50167@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50168 }
50169
50170 /* fan speed function */
50171-static struct device_attribute *fan_handle, *hsf_handle;
50172+static device_attribute_no_const *fan_handle, *hsf_handle;
50173
50174 static ssize_t sony_nc_hsfan_store(struct device *dev,
50175 struct device_attribute *attr,
50176@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50177 }
50178
50179 /* USB charge function */
50180-static struct device_attribute *uc_handle;
50181+static device_attribute_no_const *uc_handle;
50182
50183 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50184 struct device_attribute *attr,
50185@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50186 }
50187
50188 /* Panel ID function */
50189-static struct device_attribute *panel_handle;
50190+static device_attribute_no_const *panel_handle;
50191
50192 static ssize_t sony_nc_panelid_show(struct device *dev,
50193 struct device_attribute *attr, char *buffer)
50194@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50195 }
50196
50197 /* smart connect function */
50198-static struct device_attribute *sc_handle;
50199+static device_attribute_no_const *sc_handle;
50200
50201 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50202 struct device_attribute *attr,
50203diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50204index c3d11fa..f83cded 100644
50205--- a/drivers/platform/x86/thinkpad_acpi.c
50206+++ b/drivers/platform/x86/thinkpad_acpi.c
50207@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50208 return 0;
50209 }
50210
50211-void static hotkey_mask_warn_incomplete_mask(void)
50212+static void hotkey_mask_warn_incomplete_mask(void)
50213 {
50214 /* log only what the user can fix... */
50215 const u32 wantedmask = hotkey_driver_mask &
50216@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50217 && !tp_features.bright_unkfw)
50218 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50219 }
50220+}
50221
50222 #undef TPACPI_COMPARE_KEY
50223 #undef TPACPI_MAY_SEND_KEY
50224-}
50225
50226 /*
50227 * Polling driver
50228diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50229index 438d4c7..ca8a2fb 100644
50230--- a/drivers/pnp/pnpbios/bioscalls.c
50231+++ b/drivers/pnp/pnpbios/bioscalls.c
50232@@ -59,7 +59,7 @@ do { \
50233 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50234 } while(0)
50235
50236-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50237+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50238 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50239
50240 /*
50241@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50242
50243 cpu = get_cpu();
50244 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50245+
50246+ pax_open_kernel();
50247 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50248+ pax_close_kernel();
50249
50250 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50251 spin_lock_irqsave(&pnp_bios_lock, flags);
50252@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50253 :"memory");
50254 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50255
50256+ pax_open_kernel();
50257 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50258+ pax_close_kernel();
50259+
50260 put_cpu();
50261
50262 /* If we get here and this is set then the PnP BIOS faulted on us. */
50263@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50264 return status;
50265 }
50266
50267-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50268+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50269 {
50270 int i;
50271
50272@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50273 pnp_bios_callpoint.offset = header->fields.pm16offset;
50274 pnp_bios_callpoint.segment = PNP_CS16;
50275
50276+ pax_open_kernel();
50277+
50278 for_each_possible_cpu(i) {
50279 struct desc_struct *gdt = get_cpu_gdt_table(i);
50280 if (!gdt)
50281@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50282 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50283 (unsigned long)__va(header->fields.pm16dseg));
50284 }
50285+
50286+ pax_close_kernel();
50287 }
50288diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50289index 0c52e2a..3421ab7 100644
50290--- a/drivers/power/pda_power.c
50291+++ b/drivers/power/pda_power.c
50292@@ -37,7 +37,11 @@ static int polling;
50293
50294 #if IS_ENABLED(CONFIG_USB_PHY)
50295 static struct usb_phy *transceiver;
50296-static struct notifier_block otg_nb;
50297+static int otg_handle_notification(struct notifier_block *nb,
50298+ unsigned long event, void *unused);
50299+static struct notifier_block otg_nb = {
50300+ .notifier_call = otg_handle_notification
50301+};
50302 #endif
50303
50304 static struct regulator *ac_draw;
50305@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50306
50307 #if IS_ENABLED(CONFIG_USB_PHY)
50308 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50309- otg_nb.notifier_call = otg_handle_notification;
50310 ret = usb_register_notifier(transceiver, &otg_nb);
50311 if (ret) {
50312 dev_err(dev, "failure to register otg notifier\n");
50313diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50314index cc439fd..8fa30df 100644
50315--- a/drivers/power/power_supply.h
50316+++ b/drivers/power/power_supply.h
50317@@ -16,12 +16,12 @@ struct power_supply;
50318
50319 #ifdef CONFIG_SYSFS
50320
50321-extern void power_supply_init_attrs(struct device_type *dev_type);
50322+extern void power_supply_init_attrs(void);
50323 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50324
50325 #else
50326
50327-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50328+static inline void power_supply_init_attrs(void) {}
50329 #define power_supply_uevent NULL
50330
50331 #endif /* CONFIG_SYSFS */
50332diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50333index 694e8cd..9f03483 100644
50334--- a/drivers/power/power_supply_core.c
50335+++ b/drivers/power/power_supply_core.c
50336@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50337 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50338 EXPORT_SYMBOL_GPL(power_supply_notifier);
50339
50340-static struct device_type power_supply_dev_type;
50341+extern const struct attribute_group *power_supply_attr_groups[];
50342+static struct device_type power_supply_dev_type = {
50343+ .groups = power_supply_attr_groups,
50344+};
50345
50346 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50347 struct power_supply *supply)
50348@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50349 return PTR_ERR(power_supply_class);
50350
50351 power_supply_class->dev_uevent = power_supply_uevent;
50352- power_supply_init_attrs(&power_supply_dev_type);
50353+ power_supply_init_attrs();
50354
50355 return 0;
50356 }
50357diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50358index 62653f5..d0bb485 100644
50359--- a/drivers/power/power_supply_sysfs.c
50360+++ b/drivers/power/power_supply_sysfs.c
50361@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50362 .is_visible = power_supply_attr_is_visible,
50363 };
50364
50365-static const struct attribute_group *power_supply_attr_groups[] = {
50366+const struct attribute_group *power_supply_attr_groups[] = {
50367 &power_supply_attr_group,
50368 NULL,
50369 };
50370
50371-void power_supply_init_attrs(struct device_type *dev_type)
50372+void power_supply_init_attrs(void)
50373 {
50374 int i;
50375
50376- dev_type->groups = power_supply_attr_groups;
50377-
50378 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50379 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50380 }
50381diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50382index 84419af..268ede8 100644
50383--- a/drivers/powercap/powercap_sys.c
50384+++ b/drivers/powercap/powercap_sys.c
50385@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50386 struct device_attribute name_attr;
50387 };
50388
50389+static ssize_t show_constraint_name(struct device *dev,
50390+ struct device_attribute *dev_attr,
50391+ char *buf);
50392+
50393 static struct powercap_constraint_attr
50394- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50395+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50396+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50397+ .power_limit_attr = {
50398+ .attr = {
50399+ .name = NULL,
50400+ .mode = S_IWUSR | S_IRUGO
50401+ },
50402+ .show = show_constraint_power_limit_uw,
50403+ .store = store_constraint_power_limit_uw
50404+ },
50405+
50406+ .time_window_attr = {
50407+ .attr = {
50408+ .name = NULL,
50409+ .mode = S_IWUSR | S_IRUGO
50410+ },
50411+ .show = show_constraint_time_window_us,
50412+ .store = store_constraint_time_window_us
50413+ },
50414+
50415+ .max_power_attr = {
50416+ .attr = {
50417+ .name = NULL,
50418+ .mode = S_IRUGO
50419+ },
50420+ .show = show_constraint_max_power_uw,
50421+ .store = NULL
50422+ },
50423+
50424+ .min_power_attr = {
50425+ .attr = {
50426+ .name = NULL,
50427+ .mode = S_IRUGO
50428+ },
50429+ .show = show_constraint_min_power_uw,
50430+ .store = NULL
50431+ },
50432+
50433+ .max_time_window_attr = {
50434+ .attr = {
50435+ .name = NULL,
50436+ .mode = S_IRUGO
50437+ },
50438+ .show = show_constraint_max_time_window_us,
50439+ .store = NULL
50440+ },
50441+
50442+ .min_time_window_attr = {
50443+ .attr = {
50444+ .name = NULL,
50445+ .mode = S_IRUGO
50446+ },
50447+ .show = show_constraint_min_time_window_us,
50448+ .store = NULL
50449+ },
50450+
50451+ .name_attr = {
50452+ .attr = {
50453+ .name = NULL,
50454+ .mode = S_IRUGO
50455+ },
50456+ .show = show_constraint_name,
50457+ .store = NULL
50458+ }
50459+ }
50460+};
50461
50462 /* A list of powercap control_types */
50463 static LIST_HEAD(powercap_cntrl_list);
50464@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50465 }
50466
50467 static int create_constraint_attribute(int id, const char *name,
50468- int mode,
50469- struct device_attribute *dev_attr,
50470- ssize_t (*show)(struct device *,
50471- struct device_attribute *, char *),
50472- ssize_t (*store)(struct device *,
50473- struct device_attribute *,
50474- const char *, size_t)
50475- )
50476+ struct device_attribute *dev_attr)
50477 {
50478+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50479
50480- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50481- id, name);
50482- if (!dev_attr->attr.name)
50483+ if (!name)
50484 return -ENOMEM;
50485- dev_attr->attr.mode = mode;
50486- dev_attr->show = show;
50487- dev_attr->store = store;
50488+
50489+ pax_open_kernel();
50490+ *(const char **)&dev_attr->attr.name = name;
50491+ pax_close_kernel();
50492
50493 return 0;
50494 }
50495@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50496
50497 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50498 ret = create_constraint_attribute(i, "power_limit_uw",
50499- S_IWUSR | S_IRUGO,
50500- &constraint_attrs[i].power_limit_attr,
50501- show_constraint_power_limit_uw,
50502- store_constraint_power_limit_uw);
50503+ &constraint_attrs[i].power_limit_attr);
50504 if (ret)
50505 goto err_alloc;
50506 ret = create_constraint_attribute(i, "time_window_us",
50507- S_IWUSR | S_IRUGO,
50508- &constraint_attrs[i].time_window_attr,
50509- show_constraint_time_window_us,
50510- store_constraint_time_window_us);
50511+ &constraint_attrs[i].time_window_attr);
50512 if (ret)
50513 goto err_alloc;
50514- ret = create_constraint_attribute(i, "name", S_IRUGO,
50515- &constraint_attrs[i].name_attr,
50516- show_constraint_name,
50517- NULL);
50518+ ret = create_constraint_attribute(i, "name",
50519+ &constraint_attrs[i].name_attr);
50520 if (ret)
50521 goto err_alloc;
50522- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50523- &constraint_attrs[i].max_power_attr,
50524- show_constraint_max_power_uw,
50525- NULL);
50526+ ret = create_constraint_attribute(i, "max_power_uw",
50527+ &constraint_attrs[i].max_power_attr);
50528 if (ret)
50529 goto err_alloc;
50530- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50531- &constraint_attrs[i].min_power_attr,
50532- show_constraint_min_power_uw,
50533- NULL);
50534+ ret = create_constraint_attribute(i, "min_power_uw",
50535+ &constraint_attrs[i].min_power_attr);
50536 if (ret)
50537 goto err_alloc;
50538 ret = create_constraint_attribute(i, "max_time_window_us",
50539- S_IRUGO,
50540- &constraint_attrs[i].max_time_window_attr,
50541- show_constraint_max_time_window_us,
50542- NULL);
50543+ &constraint_attrs[i].max_time_window_attr);
50544 if (ret)
50545 goto err_alloc;
50546 ret = create_constraint_attribute(i, "min_time_window_us",
50547- S_IRUGO,
50548- &constraint_attrs[i].min_time_window_attr,
50549- show_constraint_min_time_window_us,
50550- NULL);
50551+ &constraint_attrs[i].min_time_window_attr);
50552 if (ret)
50553 goto err_alloc;
50554
50555@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50556 power_zone->zone_dev_attrs[count++] =
50557 &dev_attr_max_energy_range_uj.attr;
50558 if (power_zone->ops->get_energy_uj) {
50559+ pax_open_kernel();
50560 if (power_zone->ops->reset_energy_uj)
50561- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50562+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50563 else
50564- dev_attr_energy_uj.attr.mode = S_IRUGO;
50565+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50566+ pax_close_kernel();
50567 power_zone->zone_dev_attrs[count++] =
50568 &dev_attr_energy_uj.attr;
50569 }
50570diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50571index 9c5d414..c7900ce 100644
50572--- a/drivers/ptp/ptp_private.h
50573+++ b/drivers/ptp/ptp_private.h
50574@@ -51,7 +51,7 @@ struct ptp_clock {
50575 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50576 wait_queue_head_t tsev_wq;
50577 int defunct; /* tells readers to go away when clock is being removed */
50578- struct device_attribute *pin_dev_attr;
50579+ device_attribute_no_const *pin_dev_attr;
50580 struct attribute **pin_attr;
50581 struct attribute_group pin_attr_group;
50582 };
50583diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50584index 302e626..12579af 100644
50585--- a/drivers/ptp/ptp_sysfs.c
50586+++ b/drivers/ptp/ptp_sysfs.c
50587@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50588 goto no_pin_attr;
50589
50590 for (i = 0; i < n_pins; i++) {
50591- struct device_attribute *da = &ptp->pin_dev_attr[i];
50592+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50593 sysfs_attr_init(&da->attr);
50594 da->attr.name = info->pin_config[i].name;
50595 da->attr.mode = 0644;
50596diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50597index a5761d0..a2a4540 100644
50598--- a/drivers/regulator/core.c
50599+++ b/drivers/regulator/core.c
50600@@ -3591,7 +3591,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50601 {
50602 const struct regulation_constraints *constraints = NULL;
50603 const struct regulator_init_data *init_data;
50604- static atomic_t regulator_no = ATOMIC_INIT(0);
50605+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50606 struct regulator_dev *rdev;
50607 struct device *dev;
50608 int ret, i;
50609@@ -3665,7 +3665,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50610 rdev->dev.class = &regulator_class;
50611 rdev->dev.parent = dev;
50612 dev_set_name(&rdev->dev, "regulator.%d",
50613- atomic_inc_return(&regulator_no) - 1);
50614+ atomic_inc_return_unchecked(&regulator_no) - 1);
50615 ret = device_register(&rdev->dev);
50616 if (ret != 0) {
50617 put_device(&rdev->dev);
50618diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50619index 7eee2ca..4024513 100644
50620--- a/drivers/regulator/max8660.c
50621+++ b/drivers/regulator/max8660.c
50622@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50623 max8660->shadow_regs[MAX8660_OVER1] = 5;
50624 } else {
50625 /* Otherwise devices can be toggled via software */
50626- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50627- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50628+ pax_open_kernel();
50629+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50630+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50631+ pax_close_kernel();
50632 }
50633
50634 /*
50635diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50636index c3d55c2..0dddfe6 100644
50637--- a/drivers/regulator/max8973-regulator.c
50638+++ b/drivers/regulator/max8973-regulator.c
50639@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50640 if (!pdata || !pdata->enable_ext_control) {
50641 max->desc.enable_reg = MAX8973_VOUT;
50642 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50643- max->ops.enable = regulator_enable_regmap;
50644- max->ops.disable = regulator_disable_regmap;
50645- max->ops.is_enabled = regulator_is_enabled_regmap;
50646+ pax_open_kernel();
50647+ *(void **)&max->ops.enable = regulator_enable_regmap;
50648+ *(void **)&max->ops.disable = regulator_disable_regmap;
50649+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50650+ pax_close_kernel();
50651 }
50652
50653 if (pdata) {
50654diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50655index 0d17c92..a29f627 100644
50656--- a/drivers/regulator/mc13892-regulator.c
50657+++ b/drivers/regulator/mc13892-regulator.c
50658@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50659 mc13xxx_unlock(mc13892);
50660
50661 /* update mc13892_vcam ops */
50662- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50663+ pax_open_kernel();
50664+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50665 sizeof(struct regulator_ops));
50666- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50667- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50668+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50669+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50670+ pax_close_kernel();
50671 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50672
50673 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50674diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50675index 5b2e761..c8c8a4a 100644
50676--- a/drivers/rtc/rtc-cmos.c
50677+++ b/drivers/rtc/rtc-cmos.c
50678@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50679 hpet_rtc_timer_init();
50680
50681 /* export at least the first block of NVRAM */
50682- nvram.size = address_space - NVRAM_OFFSET;
50683+ pax_open_kernel();
50684+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50685+ pax_close_kernel();
50686 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50687 if (retval < 0) {
50688 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50689diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50690index d049393..bb20be0 100644
50691--- a/drivers/rtc/rtc-dev.c
50692+++ b/drivers/rtc/rtc-dev.c
50693@@ -16,6 +16,7 @@
50694 #include <linux/module.h>
50695 #include <linux/rtc.h>
50696 #include <linux/sched.h>
50697+#include <linux/grsecurity.h>
50698 #include "rtc-core.h"
50699
50700 static dev_t rtc_devt;
50701@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50702 if (copy_from_user(&tm, uarg, sizeof(tm)))
50703 return -EFAULT;
50704
50705+ gr_log_timechange();
50706+
50707 return rtc_set_time(rtc, &tm);
50708
50709 case RTC_PIE_ON:
50710diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50711index 4ffabb3..1f87fca 100644
50712--- a/drivers/rtc/rtc-ds1307.c
50713+++ b/drivers/rtc/rtc-ds1307.c
50714@@ -107,7 +107,7 @@ struct ds1307 {
50715 u8 offset; /* register's offset */
50716 u8 regs[11];
50717 u16 nvram_offset;
50718- struct bin_attribute *nvram;
50719+ bin_attribute_no_const *nvram;
50720 enum ds_type type;
50721 unsigned long flags;
50722 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50723diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50724index 90abb5b..e0bf6dd 100644
50725--- a/drivers/rtc/rtc-m48t59.c
50726+++ b/drivers/rtc/rtc-m48t59.c
50727@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50728 if (IS_ERR(m48t59->rtc))
50729 return PTR_ERR(m48t59->rtc);
50730
50731- m48t59_nvram_attr.size = pdata->offset;
50732+ pax_open_kernel();
50733+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50734+ pax_close_kernel();
50735
50736 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50737 if (ret)
50738diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50739index e693af6..2e525b6 100644
50740--- a/drivers/scsi/bfa/bfa_fcpim.h
50741+++ b/drivers/scsi/bfa/bfa_fcpim.h
50742@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50743
50744 struct bfa_itn_s {
50745 bfa_isr_func_t isr;
50746-};
50747+} __no_const;
50748
50749 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50750 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50751diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50752index 0f19455..ef7adb5 100644
50753--- a/drivers/scsi/bfa/bfa_fcs.c
50754+++ b/drivers/scsi/bfa/bfa_fcs.c
50755@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50756 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50757
50758 static struct bfa_fcs_mod_s fcs_modules[] = {
50759- { bfa_fcs_port_attach, NULL, NULL },
50760- { bfa_fcs_uf_attach, NULL, NULL },
50761- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50762- bfa_fcs_fabric_modexit },
50763+ {
50764+ .attach = bfa_fcs_port_attach,
50765+ .modinit = NULL,
50766+ .modexit = NULL
50767+ },
50768+ {
50769+ .attach = bfa_fcs_uf_attach,
50770+ .modinit = NULL,
50771+ .modexit = NULL
50772+ },
50773+ {
50774+ .attach = bfa_fcs_fabric_attach,
50775+ .modinit = bfa_fcs_fabric_modinit,
50776+ .modexit = bfa_fcs_fabric_modexit
50777+ },
50778 };
50779
50780 /*
50781diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50782index ff75ef8..2dfe00a 100644
50783--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50784+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50785@@ -89,15 +89,26 @@ static struct {
50786 void (*offline) (struct bfa_fcs_lport_s *port);
50787 } __port_action[] = {
50788 {
50789- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50790- bfa_fcs_lport_unknown_offline}, {
50791- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50792- bfa_fcs_lport_fab_offline}, {
50793- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50794- bfa_fcs_lport_n2n_offline}, {
50795- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50796- bfa_fcs_lport_loop_offline},
50797- };
50798+ .init = bfa_fcs_lport_unknown_init,
50799+ .online = bfa_fcs_lport_unknown_online,
50800+ .offline = bfa_fcs_lport_unknown_offline
50801+ },
50802+ {
50803+ .init = bfa_fcs_lport_fab_init,
50804+ .online = bfa_fcs_lport_fab_online,
50805+ .offline = bfa_fcs_lport_fab_offline
50806+ },
50807+ {
50808+ .init = bfa_fcs_lport_n2n_init,
50809+ .online = bfa_fcs_lport_n2n_online,
50810+ .offline = bfa_fcs_lport_n2n_offline
50811+ },
50812+ {
50813+ .init = bfa_fcs_lport_loop_init,
50814+ .online = bfa_fcs_lport_loop_online,
50815+ .offline = bfa_fcs_lport_loop_offline
50816+ },
50817+};
50818
50819 /*
50820 * fcs_port_sm FCS logical port state machine
50821diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50822index a38aafa0..fe8f03b 100644
50823--- a/drivers/scsi/bfa/bfa_ioc.h
50824+++ b/drivers/scsi/bfa/bfa_ioc.h
50825@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50826 bfa_ioc_disable_cbfn_t disable_cbfn;
50827 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50828 bfa_ioc_reset_cbfn_t reset_cbfn;
50829-};
50830+} __no_const;
50831
50832 /*
50833 * IOC event notification mechanism.
50834@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50835 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50836 enum bfi_ioc_state fwstate);
50837 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50838-};
50839+} __no_const;
50840
50841 /*
50842 * Queue element to wait for room in request queue. FIFO order is
50843diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50844index a14c784..6de6790 100644
50845--- a/drivers/scsi/bfa/bfa_modules.h
50846+++ b/drivers/scsi/bfa/bfa_modules.h
50847@@ -78,12 +78,12 @@ enum {
50848 \
50849 extern struct bfa_module_s hal_mod_ ## __mod; \
50850 struct bfa_module_s hal_mod_ ## __mod = { \
50851- bfa_ ## __mod ## _meminfo, \
50852- bfa_ ## __mod ## _attach, \
50853- bfa_ ## __mod ## _detach, \
50854- bfa_ ## __mod ## _start, \
50855- bfa_ ## __mod ## _stop, \
50856- bfa_ ## __mod ## _iocdisable, \
50857+ .meminfo = bfa_ ## __mod ## _meminfo, \
50858+ .attach = bfa_ ## __mod ## _attach, \
50859+ .detach = bfa_ ## __mod ## _detach, \
50860+ .start = bfa_ ## __mod ## _start, \
50861+ .stop = bfa_ ## __mod ## _stop, \
50862+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50863 }
50864
50865 #define BFA_CACHELINE_SZ (256)
50866diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50867index 045c4e1..13de803 100644
50868--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50869+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50870@@ -33,8 +33,8 @@
50871 */
50872 #include "libfcoe.h"
50873
50874-static atomic_t ctlr_num;
50875-static atomic_t fcf_num;
50876+static atomic_unchecked_t ctlr_num;
50877+static atomic_unchecked_t fcf_num;
50878
50879 /*
50880 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50881@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50882 if (!ctlr)
50883 goto out;
50884
50885- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50886+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50887 ctlr->f = f;
50888 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50889 INIT_LIST_HEAD(&ctlr->fcfs);
50890@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50891 fcf->dev.parent = &ctlr->dev;
50892 fcf->dev.bus = &fcoe_bus_type;
50893 fcf->dev.type = &fcoe_fcf_device_type;
50894- fcf->id = atomic_inc_return(&fcf_num) - 1;
50895+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50896 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50897
50898 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50899@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50900 {
50901 int error;
50902
50903- atomic_set(&ctlr_num, 0);
50904- atomic_set(&fcf_num, 0);
50905+ atomic_set_unchecked(&ctlr_num, 0);
50906+ atomic_set_unchecked(&fcf_num, 0);
50907
50908 error = bus_register(&fcoe_bus_type);
50909 if (error)
50910diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50911index 8bb173e..20236b4 100644
50912--- a/drivers/scsi/hosts.c
50913+++ b/drivers/scsi/hosts.c
50914@@ -42,7 +42,7 @@
50915 #include "scsi_logging.h"
50916
50917
50918-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50919+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50920
50921
50922 static void scsi_host_cls_release(struct device *dev)
50923@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50924 * subtract one because we increment first then return, but we need to
50925 * know what the next host number was before increment
50926 */
50927- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50928+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50929 shost->dma_channel = 0xff;
50930
50931 /* These three are default values which can be overridden */
50932diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50933index 6bb4611..0203251 100644
50934--- a/drivers/scsi/hpsa.c
50935+++ b/drivers/scsi/hpsa.c
50936@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50937 struct reply_queue_buffer *rq = &h->reply_queue[q];
50938
50939 if (h->transMethod & CFGTBL_Trans_io_accel1)
50940- return h->access.command_completed(h, q);
50941+ return h->access->command_completed(h, q);
50942
50943 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50944- return h->access.command_completed(h, q);
50945+ return h->access->command_completed(h, q);
50946
50947 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50948 a = rq->head[rq->current_entry];
50949@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50950 while (!list_empty(&h->reqQ)) {
50951 c = list_entry(h->reqQ.next, struct CommandList, list);
50952 /* can't do anything if fifo is full */
50953- if ((h->access.fifo_full(h))) {
50954+ if ((h->access->fifo_full(h))) {
50955 h->fifo_recently_full = 1;
50956 dev_warn(&h->pdev->dev, "fifo full\n");
50957 break;
50958@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50959 atomic_inc(&h->commands_outstanding);
50960 spin_unlock_irqrestore(&h->lock, *flags);
50961 /* Tell the controller execute command */
50962- h->access.submit_command(h, c);
50963+ h->access->submit_command(h, c);
50964 spin_lock_irqsave(&h->lock, *flags);
50965 }
50966 }
50967@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50968
50969 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50970 {
50971- return h->access.command_completed(h, q);
50972+ return h->access->command_completed(h, q);
50973 }
50974
50975 static inline bool interrupt_pending(struct ctlr_info *h)
50976 {
50977- return h->access.intr_pending(h);
50978+ return h->access->intr_pending(h);
50979 }
50980
50981 static inline long interrupt_not_for_us(struct ctlr_info *h)
50982 {
50983- return (h->access.intr_pending(h) == 0) ||
50984+ return (h->access->intr_pending(h) == 0) ||
50985 (h->interrupts_enabled == 0);
50986 }
50987
50988@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50989 if (prod_index < 0)
50990 return -ENODEV;
50991 h->product_name = products[prod_index].product_name;
50992- h->access = *(products[prod_index].access);
50993+ h->access = products[prod_index].access;
50994
50995 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50996 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50997@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50998 unsigned long flags;
50999 u32 lockup_detected;
51000
51001- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51002+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51003 spin_lock_irqsave(&h->lock, flags);
51004 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
51005 if (!lockup_detected) {
51006@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
51007 }
51008
51009 /* make sure the board interrupts are off */
51010- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51011+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51012
51013 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51014 goto clean2;
51015@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
51016 * fake ones to scoop up any residual completions.
51017 */
51018 spin_lock_irqsave(&h->lock, flags);
51019- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51020+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51021 spin_unlock_irqrestore(&h->lock, flags);
51022 free_irqs(h);
51023 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
51024@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
51025 dev_info(&h->pdev->dev, "Board READY.\n");
51026 dev_info(&h->pdev->dev,
51027 "Waiting for stale completions to drain.\n");
51028- h->access.set_intr_mask(h, HPSA_INTR_ON);
51029+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51030 msleep(10000);
51031- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51032+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51033
51034 rc = controller_reset_failed(h->cfgtable);
51035 if (rc)
51036@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
51037 h->drv_req_rescan = 0;
51038
51039 /* Turn the interrupts on so we can service requests */
51040- h->access.set_intr_mask(h, HPSA_INTR_ON);
51041+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51042
51043 hpsa_hba_inquiry(h);
51044 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51045@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51046 * To write all data in the battery backed cache to disks
51047 */
51048 hpsa_flush_cache(h);
51049- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51050+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51051 hpsa_free_irqs_and_disable_msix(h);
51052 }
51053
51054@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51055 CFGTBL_Trans_enable_directed_msix |
51056 (trans_support & (CFGTBL_Trans_io_accel1 |
51057 CFGTBL_Trans_io_accel2));
51058- struct access_method access = SA5_performant_access;
51059+ struct access_method *access = &SA5_performant_access;
51060
51061 /* This is a bit complicated. There are 8 registers on
51062 * the controller which we write to to tell it 8 different
51063@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51064 * perform the superfluous readl() after each command submission.
51065 */
51066 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51067- access = SA5_performant_access_no_read;
51068+ access = &SA5_performant_access_no_read;
51069
51070 /* Controller spec: zero out this buffer. */
51071 for (i = 0; i < h->nreply_queues; i++)
51072@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51073 * enable outbound interrupt coalescing in accelerator mode;
51074 */
51075 if (trans_support & CFGTBL_Trans_io_accel1) {
51076- access = SA5_ioaccel_mode1_access;
51077+ access = &SA5_ioaccel_mode1_access;
51078 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51079 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51080 } else {
51081 if (trans_support & CFGTBL_Trans_io_accel2) {
51082- access = SA5_ioaccel_mode2_access;
51083+ access = &SA5_ioaccel_mode2_access;
51084 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51085 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51086 }
51087diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51088index 8e06d9e..396e0a1 100644
51089--- a/drivers/scsi/hpsa.h
51090+++ b/drivers/scsi/hpsa.h
51091@@ -127,7 +127,7 @@ struct ctlr_info {
51092 unsigned int msix_vector;
51093 unsigned int msi_vector;
51094 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51095- struct access_method access;
51096+ struct access_method *access;
51097 char hba_mode_enabled;
51098
51099 /* queue and queue Info */
51100@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51101 }
51102
51103 static struct access_method SA5_access = {
51104- SA5_submit_command,
51105- SA5_intr_mask,
51106- SA5_fifo_full,
51107- SA5_intr_pending,
51108- SA5_completed,
51109+ .submit_command = SA5_submit_command,
51110+ .set_intr_mask = SA5_intr_mask,
51111+ .fifo_full = SA5_fifo_full,
51112+ .intr_pending = SA5_intr_pending,
51113+ .command_completed = SA5_completed,
51114 };
51115
51116 static struct access_method SA5_ioaccel_mode1_access = {
51117- SA5_submit_command,
51118- SA5_performant_intr_mask,
51119- SA5_fifo_full,
51120- SA5_ioaccel_mode1_intr_pending,
51121- SA5_ioaccel_mode1_completed,
51122+ .submit_command = SA5_submit_command,
51123+ .set_intr_mask = SA5_performant_intr_mask,
51124+ .fifo_full = SA5_fifo_full,
51125+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51126+ .command_completed = SA5_ioaccel_mode1_completed,
51127 };
51128
51129 static struct access_method SA5_ioaccel_mode2_access = {
51130- SA5_submit_command_ioaccel2,
51131- SA5_performant_intr_mask,
51132- SA5_fifo_full,
51133- SA5_performant_intr_pending,
51134- SA5_performant_completed,
51135+ .submit_command = SA5_submit_command_ioaccel2,
51136+ .set_intr_mask = SA5_performant_intr_mask,
51137+ .fifo_full = SA5_fifo_full,
51138+ .intr_pending = SA5_performant_intr_pending,
51139+ .command_completed = SA5_performant_completed,
51140 };
51141
51142 static struct access_method SA5_performant_access = {
51143- SA5_submit_command,
51144- SA5_performant_intr_mask,
51145- SA5_fifo_full,
51146- SA5_performant_intr_pending,
51147- SA5_performant_completed,
51148+ .submit_command = SA5_submit_command,
51149+ .set_intr_mask = SA5_performant_intr_mask,
51150+ .fifo_full = SA5_fifo_full,
51151+ .intr_pending = SA5_performant_intr_pending,
51152+ .command_completed = SA5_performant_completed,
51153 };
51154
51155 static struct access_method SA5_performant_access_no_read = {
51156- SA5_submit_command_no_read,
51157- SA5_performant_intr_mask,
51158- SA5_fifo_full,
51159- SA5_performant_intr_pending,
51160- SA5_performant_completed,
51161+ .submit_command = SA5_submit_command_no_read,
51162+ .set_intr_mask = SA5_performant_intr_mask,
51163+ .fifo_full = SA5_fifo_full,
51164+ .intr_pending = SA5_performant_intr_pending,
51165+ .command_completed = SA5_performant_completed,
51166 };
51167
51168 struct board_type {
51169diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51170index 1b3a094..068e683 100644
51171--- a/drivers/scsi/libfc/fc_exch.c
51172+++ b/drivers/scsi/libfc/fc_exch.c
51173@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51174 u16 pool_max_index;
51175
51176 struct {
51177- atomic_t no_free_exch;
51178- atomic_t no_free_exch_xid;
51179- atomic_t xid_not_found;
51180- atomic_t xid_busy;
51181- atomic_t seq_not_found;
51182- atomic_t non_bls_resp;
51183+ atomic_unchecked_t no_free_exch;
51184+ atomic_unchecked_t no_free_exch_xid;
51185+ atomic_unchecked_t xid_not_found;
51186+ atomic_unchecked_t xid_busy;
51187+ atomic_unchecked_t seq_not_found;
51188+ atomic_unchecked_t non_bls_resp;
51189 } stats;
51190 };
51191
51192@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51193 /* allocate memory for exchange */
51194 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51195 if (!ep) {
51196- atomic_inc(&mp->stats.no_free_exch);
51197+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51198 goto out;
51199 }
51200 memset(ep, 0, sizeof(*ep));
51201@@ -874,7 +874,7 @@ out:
51202 return ep;
51203 err:
51204 spin_unlock_bh(&pool->lock);
51205- atomic_inc(&mp->stats.no_free_exch_xid);
51206+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51207 mempool_free(ep, mp->ep_pool);
51208 return NULL;
51209 }
51210@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51211 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51212 ep = fc_exch_find(mp, xid);
51213 if (!ep) {
51214- atomic_inc(&mp->stats.xid_not_found);
51215+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51216 reject = FC_RJT_OX_ID;
51217 goto out;
51218 }
51219@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51220 ep = fc_exch_find(mp, xid);
51221 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51222 if (ep) {
51223- atomic_inc(&mp->stats.xid_busy);
51224+ atomic_inc_unchecked(&mp->stats.xid_busy);
51225 reject = FC_RJT_RX_ID;
51226 goto rel;
51227 }
51228@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51229 }
51230 xid = ep->xid; /* get our XID */
51231 } else if (!ep) {
51232- atomic_inc(&mp->stats.xid_not_found);
51233+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51234 reject = FC_RJT_RX_ID; /* XID not found */
51235 goto out;
51236 }
51237@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51238 } else {
51239 sp = &ep->seq;
51240 if (sp->id != fh->fh_seq_id) {
51241- atomic_inc(&mp->stats.seq_not_found);
51242+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51243 if (f_ctl & FC_FC_END_SEQ) {
51244 /*
51245 * Update sequence_id based on incoming last
51246@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51247
51248 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51249 if (!ep) {
51250- atomic_inc(&mp->stats.xid_not_found);
51251+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51252 goto out;
51253 }
51254 if (ep->esb_stat & ESB_ST_COMPLETE) {
51255- atomic_inc(&mp->stats.xid_not_found);
51256+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51257 goto rel;
51258 }
51259 if (ep->rxid == FC_XID_UNKNOWN)
51260 ep->rxid = ntohs(fh->fh_rx_id);
51261 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51262- atomic_inc(&mp->stats.xid_not_found);
51263+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51264 goto rel;
51265 }
51266 if (ep->did != ntoh24(fh->fh_s_id) &&
51267 ep->did != FC_FID_FLOGI) {
51268- atomic_inc(&mp->stats.xid_not_found);
51269+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51270 goto rel;
51271 }
51272 sof = fr_sof(fp);
51273@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51274 sp->ssb_stat |= SSB_ST_RESP;
51275 sp->id = fh->fh_seq_id;
51276 } else if (sp->id != fh->fh_seq_id) {
51277- atomic_inc(&mp->stats.seq_not_found);
51278+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51279 goto rel;
51280 }
51281
51282@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51283 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51284
51285 if (!sp)
51286- atomic_inc(&mp->stats.xid_not_found);
51287+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51288 else
51289- atomic_inc(&mp->stats.non_bls_resp);
51290+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51291
51292 fc_frame_free(fp);
51293 }
51294@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51295
51296 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51297 mp = ema->mp;
51298- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51299+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51300 st->fc_no_free_exch_xid +=
51301- atomic_read(&mp->stats.no_free_exch_xid);
51302- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51303- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51304- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51305- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51306+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51307+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51308+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51309+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51310+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51311 }
51312 }
51313 EXPORT_SYMBOL(fc_exch_update_stats);
51314diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51315index 932d9cc..50c7ee9 100644
51316--- a/drivers/scsi/libsas/sas_ata.c
51317+++ b/drivers/scsi/libsas/sas_ata.c
51318@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51319 .postreset = ata_std_postreset,
51320 .error_handler = ata_std_error_handler,
51321 .post_internal_cmd = sas_ata_post_internal,
51322- .qc_defer = ata_std_qc_defer,
51323+ .qc_defer = ata_std_qc_defer,
51324 .qc_prep = ata_noop_qc_prep,
51325 .qc_issue = sas_ata_qc_issue,
51326 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51327diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51328index 434e903..5a4a79b 100644
51329--- a/drivers/scsi/lpfc/lpfc.h
51330+++ b/drivers/scsi/lpfc/lpfc.h
51331@@ -430,7 +430,7 @@ struct lpfc_vport {
51332 struct dentry *debug_nodelist;
51333 struct dentry *vport_debugfs_root;
51334 struct lpfc_debugfs_trc *disc_trc;
51335- atomic_t disc_trc_cnt;
51336+ atomic_unchecked_t disc_trc_cnt;
51337 #endif
51338 uint8_t stat_data_enabled;
51339 uint8_t stat_data_blocked;
51340@@ -880,8 +880,8 @@ struct lpfc_hba {
51341 struct timer_list fabric_block_timer;
51342 unsigned long bit_flags;
51343 #define FABRIC_COMANDS_BLOCKED 0
51344- atomic_t num_rsrc_err;
51345- atomic_t num_cmd_success;
51346+ atomic_unchecked_t num_rsrc_err;
51347+ atomic_unchecked_t num_cmd_success;
51348 unsigned long last_rsrc_error_time;
51349 unsigned long last_ramp_down_time;
51350 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51351@@ -916,7 +916,7 @@ struct lpfc_hba {
51352
51353 struct dentry *debug_slow_ring_trc;
51354 struct lpfc_debugfs_trc *slow_ring_trc;
51355- atomic_t slow_ring_trc_cnt;
51356+ atomic_unchecked_t slow_ring_trc_cnt;
51357 /* iDiag debugfs sub-directory */
51358 struct dentry *idiag_root;
51359 struct dentry *idiag_pci_cfg;
51360diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51361index 5633e7d..8272114 100644
51362--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51363+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51364@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51365
51366 #include <linux/debugfs.h>
51367
51368-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51369+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51370 static unsigned long lpfc_debugfs_start_time = 0L;
51371
51372 /* iDiag */
51373@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51374 lpfc_debugfs_enable = 0;
51375
51376 len = 0;
51377- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51378+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51379 (lpfc_debugfs_max_disc_trc - 1);
51380 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51381 dtp = vport->disc_trc + i;
51382@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51383 lpfc_debugfs_enable = 0;
51384
51385 len = 0;
51386- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51387+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51388 (lpfc_debugfs_max_slow_ring_trc - 1);
51389 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51390 dtp = phba->slow_ring_trc + i;
51391@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51392 !vport || !vport->disc_trc)
51393 return;
51394
51395- index = atomic_inc_return(&vport->disc_trc_cnt) &
51396+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51397 (lpfc_debugfs_max_disc_trc - 1);
51398 dtp = vport->disc_trc + index;
51399 dtp->fmt = fmt;
51400 dtp->data1 = data1;
51401 dtp->data2 = data2;
51402 dtp->data3 = data3;
51403- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51404+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51405 dtp->jif = jiffies;
51406 #endif
51407 return;
51408@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51409 !phba || !phba->slow_ring_trc)
51410 return;
51411
51412- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51413+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51414 (lpfc_debugfs_max_slow_ring_trc - 1);
51415 dtp = phba->slow_ring_trc + index;
51416 dtp->fmt = fmt;
51417 dtp->data1 = data1;
51418 dtp->data2 = data2;
51419 dtp->data3 = data3;
51420- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51421+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51422 dtp->jif = jiffies;
51423 #endif
51424 return;
51425@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51426 "slow_ring buffer\n");
51427 goto debug_failed;
51428 }
51429- atomic_set(&phba->slow_ring_trc_cnt, 0);
51430+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51431 memset(phba->slow_ring_trc, 0,
51432 (sizeof(struct lpfc_debugfs_trc) *
51433 lpfc_debugfs_max_slow_ring_trc));
51434@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51435 "buffer\n");
51436 goto debug_failed;
51437 }
51438- atomic_set(&vport->disc_trc_cnt, 0);
51439+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51440
51441 snprintf(name, sizeof(name), "discovery_trace");
51442 vport->debug_disc_trc =
51443diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51444index 0b2c53a..aec2b45 100644
51445--- a/drivers/scsi/lpfc/lpfc_init.c
51446+++ b/drivers/scsi/lpfc/lpfc_init.c
51447@@ -11290,8 +11290,10 @@ lpfc_init(void)
51448 "misc_register returned with status %d", error);
51449
51450 if (lpfc_enable_npiv) {
51451- lpfc_transport_functions.vport_create = lpfc_vport_create;
51452- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51453+ pax_open_kernel();
51454+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51455+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51456+ pax_close_kernel();
51457 }
51458 lpfc_transport_template =
51459 fc_attach_transport(&lpfc_transport_functions);
51460diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51461index 4f9222e..f1850e3 100644
51462--- a/drivers/scsi/lpfc/lpfc_scsi.c
51463+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51464@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51465 unsigned long expires;
51466
51467 spin_lock_irqsave(&phba->hbalock, flags);
51468- atomic_inc(&phba->num_rsrc_err);
51469+ atomic_inc_unchecked(&phba->num_rsrc_err);
51470 phba->last_rsrc_error_time = jiffies;
51471
51472 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51473@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51474 unsigned long num_rsrc_err, num_cmd_success;
51475 int i;
51476
51477- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51478- num_cmd_success = atomic_read(&phba->num_cmd_success);
51479+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51480+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51481
51482 /*
51483 * The error and success command counters are global per
51484@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51485 }
51486 }
51487 lpfc_destroy_vport_work_array(phba, vports);
51488- atomic_set(&phba->num_rsrc_err, 0);
51489- atomic_set(&phba->num_cmd_success, 0);
51490+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51491+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51492 }
51493
51494 /**
51495diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51496index 6a1c036..38e0e8d 100644
51497--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51498+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51499@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51500 {
51501 struct scsi_device *sdev = to_scsi_device(dev);
51502 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51503- static struct _raid_device *raid_device;
51504+ struct _raid_device *raid_device;
51505 unsigned long flags;
51506 Mpi2RaidVolPage0_t vol_pg0;
51507 Mpi2ConfigReply_t mpi_reply;
51508@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51509 {
51510 struct scsi_device *sdev = to_scsi_device(dev);
51511 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51512- static struct _raid_device *raid_device;
51513+ struct _raid_device *raid_device;
51514 unsigned long flags;
51515 Mpi2RaidVolPage0_t vol_pg0;
51516 Mpi2ConfigReply_t mpi_reply;
51517@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51518 Mpi2EventDataIrOperationStatus_t *event_data =
51519 (Mpi2EventDataIrOperationStatus_t *)
51520 fw_event->event_data;
51521- static struct _raid_device *raid_device;
51522+ struct _raid_device *raid_device;
51523 unsigned long flags;
51524 u16 handle;
51525
51526@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51527 u64 sas_address;
51528 struct _sas_device *sas_device;
51529 struct _sas_node *expander_device;
51530- static struct _raid_device *raid_device;
51531+ struct _raid_device *raid_device;
51532 u8 retry_count;
51533 unsigned long flags;
51534
51535diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51536index 8c27b6a..607f56e 100644
51537--- a/drivers/scsi/pmcraid.c
51538+++ b/drivers/scsi/pmcraid.c
51539@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51540 res->scsi_dev = scsi_dev;
51541 scsi_dev->hostdata = res;
51542 res->change_detected = 0;
51543- atomic_set(&res->read_failures, 0);
51544- atomic_set(&res->write_failures, 0);
51545+ atomic_set_unchecked(&res->read_failures, 0);
51546+ atomic_set_unchecked(&res->write_failures, 0);
51547 rc = 0;
51548 }
51549 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51550@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51551
51552 /* If this was a SCSI read/write command keep count of errors */
51553 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51554- atomic_inc(&res->read_failures);
51555+ atomic_inc_unchecked(&res->read_failures);
51556 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51557- atomic_inc(&res->write_failures);
51558+ atomic_inc_unchecked(&res->write_failures);
51559
51560 if (!RES_IS_GSCSI(res->cfg_entry) &&
51561 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51562@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51563 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51564 * hrrq_id assigned here in queuecommand
51565 */
51566- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51567+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51568 pinstance->num_hrrq;
51569 cmd->cmd_done = pmcraid_io_done;
51570
51571@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51572 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51573 * hrrq_id assigned here in queuecommand
51574 */
51575- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51576+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51577 pinstance->num_hrrq;
51578
51579 if (request_size) {
51580@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51581
51582 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51583 /* add resources only after host is added into system */
51584- if (!atomic_read(&pinstance->expose_resources))
51585+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51586 return;
51587
51588 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51589@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51590 init_waitqueue_head(&pinstance->reset_wait_q);
51591
51592 atomic_set(&pinstance->outstanding_cmds, 0);
51593- atomic_set(&pinstance->last_message_id, 0);
51594- atomic_set(&pinstance->expose_resources, 0);
51595+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51596+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51597
51598 INIT_LIST_HEAD(&pinstance->free_res_q);
51599 INIT_LIST_HEAD(&pinstance->used_res_q);
51600@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51601 /* Schedule worker thread to handle CCN and take care of adding and
51602 * removing devices to OS
51603 */
51604- atomic_set(&pinstance->expose_resources, 1);
51605+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51606 schedule_work(&pinstance->worker_q);
51607 return rc;
51608
51609diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51610index e1d150f..6c6df44 100644
51611--- a/drivers/scsi/pmcraid.h
51612+++ b/drivers/scsi/pmcraid.h
51613@@ -748,7 +748,7 @@ struct pmcraid_instance {
51614 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51615
51616 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51617- atomic_t last_message_id;
51618+ atomic_unchecked_t last_message_id;
51619
51620 /* configuration table */
51621 struct pmcraid_config_table *cfg_table;
51622@@ -777,7 +777,7 @@ struct pmcraid_instance {
51623 atomic_t outstanding_cmds;
51624
51625 /* should add/delete resources to mid-layer now ?*/
51626- atomic_t expose_resources;
51627+ atomic_unchecked_t expose_resources;
51628
51629
51630
51631@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51632 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51633 };
51634 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51635- atomic_t read_failures; /* count of failed READ commands */
51636- atomic_t write_failures; /* count of failed WRITE commands */
51637+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51638+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51639
51640 /* To indicate add/delete/modify during CCN */
51641 u8 change_detected;
51642diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51643index 82b92c4..3178171 100644
51644--- a/drivers/scsi/qla2xxx/qla_attr.c
51645+++ b/drivers/scsi/qla2xxx/qla_attr.c
51646@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51647 return 0;
51648 }
51649
51650-struct fc_function_template qla2xxx_transport_functions = {
51651+fc_function_template_no_const qla2xxx_transport_functions = {
51652
51653 .show_host_node_name = 1,
51654 .show_host_port_name = 1,
51655@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51656 .bsg_timeout = qla24xx_bsg_timeout,
51657 };
51658
51659-struct fc_function_template qla2xxx_transport_vport_functions = {
51660+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51661
51662 .show_host_node_name = 1,
51663 .show_host_port_name = 1,
51664diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51665index 7686bfe..4710893 100644
51666--- a/drivers/scsi/qla2xxx/qla_gbl.h
51667+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51668@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51669 struct device_attribute;
51670 extern struct device_attribute *qla2x00_host_attrs[];
51671 struct fc_function_template;
51672-extern struct fc_function_template qla2xxx_transport_functions;
51673-extern struct fc_function_template qla2xxx_transport_vport_functions;
51674+extern fc_function_template_no_const qla2xxx_transport_functions;
51675+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51676 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51677 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51678 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51679diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51680index cce1cbc..5b9f0fe 100644
51681--- a/drivers/scsi/qla2xxx/qla_os.c
51682+++ b/drivers/scsi/qla2xxx/qla_os.c
51683@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51684 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51685 /* Ok, a 64bit DMA mask is applicable. */
51686 ha->flags.enable_64bit_addressing = 1;
51687- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51688- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51689+ pax_open_kernel();
51690+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51691+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51692+ pax_close_kernel();
51693 return;
51694 }
51695 }
51696diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51697index 8f6d0fb..1b21097 100644
51698--- a/drivers/scsi/qla4xxx/ql4_def.h
51699+++ b/drivers/scsi/qla4xxx/ql4_def.h
51700@@ -305,7 +305,7 @@ struct ddb_entry {
51701 * (4000 only) */
51702 atomic_t relogin_timer; /* Max Time to wait for
51703 * relogin to complete */
51704- atomic_t relogin_retry_count; /* Num of times relogin has been
51705+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51706 * retried */
51707 uint32_t default_time2wait; /* Default Min time between
51708 * relogins (+aens) */
51709diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51710index 6d25879..3031a9f 100644
51711--- a/drivers/scsi/qla4xxx/ql4_os.c
51712+++ b/drivers/scsi/qla4xxx/ql4_os.c
51713@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51714 */
51715 if (!iscsi_is_session_online(cls_sess)) {
51716 /* Reset retry relogin timer */
51717- atomic_inc(&ddb_entry->relogin_retry_count);
51718+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51719 DEBUG2(ql4_printk(KERN_INFO, ha,
51720 "%s: index[%d] relogin timed out-retrying"
51721 " relogin (%d), retry (%d)\n", __func__,
51722 ddb_entry->fw_ddb_index,
51723- atomic_read(&ddb_entry->relogin_retry_count),
51724+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51725 ddb_entry->default_time2wait + 4));
51726 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51727 atomic_set(&ddb_entry->retry_relogin_timer,
51728@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51729
51730 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51731 atomic_set(&ddb_entry->relogin_timer, 0);
51732- atomic_set(&ddb_entry->relogin_retry_count, 0);
51733+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51734 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51735 ddb_entry->default_relogin_timeout =
51736 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51737diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51738index 7129701..b49c4e5 100644
51739--- a/drivers/scsi/scsi_lib.c
51740+++ b/drivers/scsi/scsi_lib.c
51741@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51742 shost = sdev->host;
51743 scsi_init_cmd_errh(cmd);
51744 cmd->result = DID_NO_CONNECT << 16;
51745- atomic_inc(&cmd->device->iorequest_cnt);
51746+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51747
51748 /*
51749 * SCSI request completion path will do scsi_device_unbusy(),
51750@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
51751
51752 INIT_LIST_HEAD(&cmd->eh_entry);
51753
51754- atomic_inc(&cmd->device->iodone_cnt);
51755+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51756 if (cmd->result)
51757- atomic_inc(&cmd->device->ioerr_cnt);
51758+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51759
51760 disposition = scsi_decide_disposition(cmd);
51761 if (disposition != SUCCESS &&
51762@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51763 struct Scsi_Host *host = cmd->device->host;
51764 int rtn = 0;
51765
51766- atomic_inc(&cmd->device->iorequest_cnt);
51767+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51768
51769 /* check if the device is still usable */
51770 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51771diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51772index 1ac38e7..6acc656 100644
51773--- a/drivers/scsi/scsi_sysfs.c
51774+++ b/drivers/scsi/scsi_sysfs.c
51775@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51776 char *buf) \
51777 { \
51778 struct scsi_device *sdev = to_scsi_device(dev); \
51779- unsigned long long count = atomic_read(&sdev->field); \
51780+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51781 return snprintf(buf, 20, "0x%llx\n", count); \
51782 } \
51783 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51784diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51785index 5d6f348..18778a6b 100644
51786--- a/drivers/scsi/scsi_transport_fc.c
51787+++ b/drivers/scsi/scsi_transport_fc.c
51788@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51789 * Netlink Infrastructure
51790 */
51791
51792-static atomic_t fc_event_seq;
51793+static atomic_unchecked_t fc_event_seq;
51794
51795 /**
51796 * fc_get_event_number - Obtain the next sequential FC event number
51797@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51798 u32
51799 fc_get_event_number(void)
51800 {
51801- return atomic_add_return(1, &fc_event_seq);
51802+ return atomic_add_return_unchecked(1, &fc_event_seq);
51803 }
51804 EXPORT_SYMBOL(fc_get_event_number);
51805
51806@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51807 {
51808 int error;
51809
51810- atomic_set(&fc_event_seq, 0);
51811+ atomic_set_unchecked(&fc_event_seq, 0);
51812
51813 error = transport_class_register(&fc_host_class);
51814 if (error)
51815@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51816 char *cp;
51817
51818 *val = simple_strtoul(buf, &cp, 0);
51819- if ((*cp && (*cp != '\n')) || (*val < 0))
51820+ if (*cp && (*cp != '\n'))
51821 return -EINVAL;
51822 /*
51823 * Check for overflow; dev_loss_tmo is u32
51824diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51825index 67d43e3..8cee73c 100644
51826--- a/drivers/scsi/scsi_transport_iscsi.c
51827+++ b/drivers/scsi/scsi_transport_iscsi.c
51828@@ -79,7 +79,7 @@ struct iscsi_internal {
51829 struct transport_container session_cont;
51830 };
51831
51832-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51833+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51834 static struct workqueue_struct *iscsi_eh_timer_workq;
51835
51836 static DEFINE_IDA(iscsi_sess_ida);
51837@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51838 int err;
51839
51840 ihost = shost->shost_data;
51841- session->sid = atomic_add_return(1, &iscsi_session_nr);
51842+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51843
51844 if (target_id == ISCSI_MAX_TARGET) {
51845 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51846@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51847 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51848 ISCSI_TRANSPORT_VERSION);
51849
51850- atomic_set(&iscsi_session_nr, 0);
51851+ atomic_set_unchecked(&iscsi_session_nr, 0);
51852
51853 err = class_register(&iscsi_transport_class);
51854 if (err)
51855diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51856index ae45bd9..c32a586 100644
51857--- a/drivers/scsi/scsi_transport_srp.c
51858+++ b/drivers/scsi/scsi_transport_srp.c
51859@@ -35,7 +35,7 @@
51860 #include "scsi_priv.h"
51861
51862 struct srp_host_attrs {
51863- atomic_t next_port_id;
51864+ atomic_unchecked_t next_port_id;
51865 };
51866 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51867
51868@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51869 struct Scsi_Host *shost = dev_to_shost(dev);
51870 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51871
51872- atomic_set(&srp_host->next_port_id, 0);
51873+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51874 return 0;
51875 }
51876
51877@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51878 rport_fast_io_fail_timedout);
51879 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51880
51881- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51882+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51883 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51884
51885 transport_setup_device(&rport->dev);
51886diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51887index 05ea0d4..5af8049 100644
51888--- a/drivers/scsi/sd.c
51889+++ b/drivers/scsi/sd.c
51890@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51891 sdkp->disk = gd;
51892 sdkp->index = index;
51893 atomic_set(&sdkp->openers, 0);
51894- atomic_set(&sdkp->device->ioerr_cnt, 0);
51895+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51896
51897 if (!sdp->request_queue->rq_timeout) {
51898 if (sdp->type != TYPE_MOD)
51899diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51900index dbf8e77..0d565c7 100644
51901--- a/drivers/scsi/sg.c
51902+++ b/drivers/scsi/sg.c
51903@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51904 sdp->disk->disk_name,
51905 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51906 NULL,
51907- (char *)arg);
51908+ (char __user *)arg);
51909 case BLKTRACESTART:
51910 return blk_trace_startstop(sdp->device->request_queue, 1);
51911 case BLKTRACESTOP:
51912diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51913index 011a336..fb2b7a0 100644
51914--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51915+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51916@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51917 return i;
51918 }
51919
51920-static struct bin_attribute fuse_bin_attr = {
51921+static bin_attribute_no_const fuse_bin_attr = {
51922 .attr = { .name = "fuse", .mode = S_IRUGO, },
51923 .read = fuse_read,
51924 };
51925diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51926index a17f533..a2ff039 100644
51927--- a/drivers/spi/spi.c
51928+++ b/drivers/spi/spi.c
51929@@ -2239,7 +2239,7 @@ int spi_bus_unlock(struct spi_master *master)
51930 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51931
51932 /* portable code must never pass more than 32 bytes */
51933-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51934+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51935
51936 static u8 *buf;
51937
51938diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51939index b41429f..2de5373 100644
51940--- a/drivers/staging/android/timed_output.c
51941+++ b/drivers/staging/android/timed_output.c
51942@@ -25,7 +25,7 @@
51943 #include "timed_output.h"
51944
51945 static struct class *timed_output_class;
51946-static atomic_t device_count;
51947+static atomic_unchecked_t device_count;
51948
51949 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51950 char *buf)
51951@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51952 timed_output_class = class_create(THIS_MODULE, "timed_output");
51953 if (IS_ERR(timed_output_class))
51954 return PTR_ERR(timed_output_class);
51955- atomic_set(&device_count, 0);
51956+ atomic_set_unchecked(&device_count, 0);
51957 timed_output_class->dev_groups = timed_output_groups;
51958 }
51959
51960@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51961 if (ret < 0)
51962 return ret;
51963
51964- tdev->index = atomic_inc_return(&device_count);
51965+ tdev->index = atomic_inc_return_unchecked(&device_count);
51966 tdev->dev = device_create(timed_output_class, NULL,
51967 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51968 if (IS_ERR(tdev->dev))
51969diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51970index f143cb6..6fb8255 100644
51971--- a/drivers/staging/comedi/comedi_fops.c
51972+++ b/drivers/staging/comedi/comedi_fops.c
51973@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51974 }
51975 cfp->last_attached = dev->attached;
51976 cfp->last_detach_count = dev->detach_count;
51977- ACCESS_ONCE(cfp->read_subdev) = read_s;
51978- ACCESS_ONCE(cfp->write_subdev) = write_s;
51979+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51980+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51981 }
51982
51983 static void comedi_file_check(struct file *file)
51984@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51985 !(s_old->async->cmd.flags & CMDF_WRITE))
51986 return -EBUSY;
51987
51988- ACCESS_ONCE(cfp->read_subdev) = s_new;
51989+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51990 return 0;
51991 }
51992
51993@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51994 (s_old->async->cmd.flags & CMDF_WRITE))
51995 return -EBUSY;
51996
51997- ACCESS_ONCE(cfp->write_subdev) = s_new;
51998+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51999 return 0;
52000 }
52001
52002diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
52003index 001348c..cfaac8a 100644
52004--- a/drivers/staging/gdm724x/gdm_tty.c
52005+++ b/drivers/staging/gdm724x/gdm_tty.c
52006@@ -44,7 +44,7 @@
52007 #define gdm_tty_send_control(n, r, v, d, l) (\
52008 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
52009
52010-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
52011+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
52012
52013 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52014 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52015diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
52016index 503b2d7..c904931 100644
52017--- a/drivers/staging/line6/driver.c
52018+++ b/drivers/staging/line6/driver.c
52019@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52020 {
52021 struct usb_device *usbdev = line6->usbdev;
52022 int ret;
52023- unsigned char len;
52024+ unsigned char *plen;
52025
52026 /* query the serial number: */
52027 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52028@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52029 return ret;
52030 }
52031
52032+ plen = kmalloc(1, GFP_KERNEL);
52033+ if (plen == NULL)
52034+ return -ENOMEM;
52035+
52036 /* Wait for data length. We'll get 0xff until length arrives. */
52037 do {
52038 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52039 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52040 USB_DIR_IN,
52041- 0x0012, 0x0000, &len, 1,
52042+ 0x0012, 0x0000, plen, 1,
52043 LINE6_TIMEOUT * HZ);
52044 if (ret < 0) {
52045 dev_err(line6->ifcdev,
52046 "receive length failed (error %d)\n", ret);
52047+ kfree(plen);
52048 return ret;
52049 }
52050- } while (len == 0xff);
52051+ } while (*plen == 0xff);
52052
52053- if (len != datalen) {
52054+ if (*plen != datalen) {
52055 /* should be equal or something went wrong */
52056 dev_err(line6->ifcdev,
52057 "length mismatch (expected %d, got %d)\n",
52058- (int)datalen, (int)len);
52059+ (int)datalen, (int)*plen);
52060+ kfree(plen);
52061 return -EINVAL;
52062 }
52063+ kfree(plen);
52064
52065 /* receive the result: */
52066 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52067@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
52068 {
52069 struct usb_device *usbdev = line6->usbdev;
52070 int ret;
52071- unsigned char status;
52072+ unsigned char *status;
52073
52074 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52075 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
52076@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
52077 return ret;
52078 }
52079
52080+ status = kmalloc(1, GFP_KERNEL);
52081+ if (status == NULL)
52082+ return -ENOMEM;
52083+
52084 do {
52085 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
52086 0x67,
52087 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52088 USB_DIR_IN,
52089 0x0012, 0x0000,
52090- &status, 1, LINE6_TIMEOUT * HZ);
52091+ status, 1, LINE6_TIMEOUT * HZ);
52092
52093 if (ret < 0) {
52094 dev_err(line6->ifcdev,
52095 "receiving status failed (error %d)\n", ret);
52096+ kfree(status);
52097 return ret;
52098 }
52099- } while (status == 0xff);
52100+ } while (*status == 0xff);
52101
52102- if (status != 0) {
52103+ if (*status != 0) {
52104 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
52105+ kfree(status);
52106 return -EINVAL;
52107 }
52108
52109+ kfree(status);
52110+
52111 return 0;
52112 }
52113
52114diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
52115index 6943715..0a93632 100644
52116--- a/drivers/staging/line6/toneport.c
52117+++ b/drivers/staging/line6/toneport.c
52118@@ -11,6 +11,7 @@
52119 */
52120
52121 #include <linux/wait.h>
52122+#include <linux/slab.h>
52123 #include <sound/control.h>
52124
52125 #include "audio.h"
52126@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
52127 */
52128 static void toneport_setup(struct usb_line6_toneport *toneport)
52129 {
52130- int ticks;
52131+ int *ticks;
52132 struct usb_line6 *line6 = &toneport->line6;
52133 struct usb_device *usbdev = line6->usbdev;
52134 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
52135
52136+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
52137+ if (ticks == NULL)
52138+ return;
52139+
52140 /* sync time on device with host: */
52141- ticks = (int)get_seconds();
52142- line6_write_data(line6, 0x80c6, &ticks, 4);
52143+ *ticks = (int)get_seconds();
52144+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
52145+
52146+ kfree(ticks);
52147
52148 /* enable device: */
52149 toneport_send_cmd(usbdev, 0x0301, 0x0000);
52150diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52151index 463da07..e791ce9 100644
52152--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52153+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52154@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52155 return 0;
52156 }
52157
52158-sfw_test_client_ops_t brw_test_client;
52159-void brw_init_test_client(void)
52160-{
52161- brw_test_client.tso_init = brw_client_init;
52162- brw_test_client.tso_fini = brw_client_fini;
52163- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52164- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52165+sfw_test_client_ops_t brw_test_client = {
52166+ .tso_init = brw_client_init,
52167+ .tso_fini = brw_client_fini,
52168+ .tso_prep_rpc = brw_client_prep_rpc,
52169+ .tso_done_rpc = brw_client_done_rpc,
52170 };
52171
52172 srpc_service_t brw_test_service;
52173diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52174index cc9d182..8fabce3 100644
52175--- a/drivers/staging/lustre/lnet/selftest/framework.c
52176+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52177@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52178
52179 extern sfw_test_client_ops_t ping_test_client;
52180 extern srpc_service_t ping_test_service;
52181-extern void ping_init_test_client(void);
52182 extern void ping_init_test_service(void);
52183
52184 extern sfw_test_client_ops_t brw_test_client;
52185 extern srpc_service_t brw_test_service;
52186-extern void brw_init_test_client(void);
52187 extern void brw_init_test_service(void);
52188
52189
52190@@ -1675,12 +1673,10 @@ sfw_startup (void)
52191 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52192 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52193
52194- brw_init_test_client();
52195 brw_init_test_service();
52196 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52197 LASSERT (rc == 0);
52198
52199- ping_init_test_client();
52200 ping_init_test_service();
52201 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52202 LASSERT (rc == 0);
52203diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52204index d8c0df6..5041cbb 100644
52205--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52206+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52207@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52208 return 0;
52209 }
52210
52211-sfw_test_client_ops_t ping_test_client;
52212-void ping_init_test_client(void)
52213-{
52214- ping_test_client.tso_init = ping_client_init;
52215- ping_test_client.tso_fini = ping_client_fini;
52216- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52217- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52218-}
52219+sfw_test_client_ops_t ping_test_client = {
52220+ .tso_init = ping_client_init,
52221+ .tso_fini = ping_client_fini,
52222+ .tso_prep_rpc = ping_client_prep_rpc,
52223+ .tso_done_rpc = ping_client_done_rpc,
52224+};
52225
52226 srpc_service_t ping_test_service;
52227 void ping_init_test_service(void)
52228diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52229index 83bc0a9..12ba00a 100644
52230--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52231+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52232@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52233 ldlm_completion_callback lcs_completion;
52234 ldlm_blocking_callback lcs_blocking;
52235 ldlm_glimpse_callback lcs_glimpse;
52236-};
52237+} __no_const;
52238
52239 /* ldlm_lockd.c */
52240 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52241diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52242index 2a88b80..62e7e5f 100644
52243--- a/drivers/staging/lustre/lustre/include/obd.h
52244+++ b/drivers/staging/lustre/lustre/include/obd.h
52245@@ -1362,7 +1362,7 @@ struct md_ops {
52246 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52247 * wrapper function in include/linux/obd_class.h.
52248 */
52249-};
52250+} __no_const;
52251
52252 struct lsm_operations {
52253 void (*lsm_free)(struct lov_stripe_md *);
52254diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52255index a4c252f..b21acac 100644
52256--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52257+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52258@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52259 int added = (mode == LCK_NL);
52260 int overlaps = 0;
52261 int splitted = 0;
52262- const struct ldlm_callback_suite null_cbs = { NULL };
52263+ const struct ldlm_callback_suite null_cbs = { };
52264
52265 CDEBUG(D_DLMTRACE,
52266 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52267diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52268index 83d3f08..b03adad 100644
52269--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52270+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52271@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52272 void __user *buffer, size_t *lenp, loff_t *ppos)
52273 {
52274 int rc, max_delay_cs;
52275- struct ctl_table dummy = *table;
52276+ ctl_table_no_const dummy = *table;
52277 long d;
52278
52279 dummy.data = &max_delay_cs;
52280@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52281 void __user *buffer, size_t *lenp, loff_t *ppos)
52282 {
52283 int rc, min_delay_cs;
52284- struct ctl_table dummy = *table;
52285+ ctl_table_no_const dummy = *table;
52286 long d;
52287
52288 dummy.data = &min_delay_cs;
52289@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52290 void __user *buffer, size_t *lenp, loff_t *ppos)
52291 {
52292 int rc, backoff;
52293- struct ctl_table dummy = *table;
52294+ ctl_table_no_const dummy = *table;
52295
52296 dummy.data = &backoff;
52297 dummy.proc_handler = &proc_dointvec;
52298diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52299index 2c4fc74..b04ca79 100644
52300--- a/drivers/staging/lustre/lustre/libcfs/module.c
52301+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52302@@ -315,11 +315,11 @@ out:
52303
52304
52305 struct cfs_psdev_ops libcfs_psdev_ops = {
52306- libcfs_psdev_open,
52307- libcfs_psdev_release,
52308- NULL,
52309- NULL,
52310- libcfs_ioctl
52311+ .p_open = libcfs_psdev_open,
52312+ .p_close = libcfs_psdev_release,
52313+ .p_read = NULL,
52314+ .p_write = NULL,
52315+ .p_ioctl = libcfs_ioctl
52316 };
52317
52318 extern int insert_proc(void);
52319diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52320index fcbe836..8a7ada4 100644
52321--- a/drivers/staging/octeon/ethernet-rx.c
52322+++ b/drivers/staging/octeon/ethernet-rx.c
52323@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52324 /* Increment RX stats for virtual ports */
52325 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52326 #ifdef CONFIG_64BIT
52327- atomic64_add(1,
52328+ atomic64_add_unchecked(1,
52329 (atomic64_t *)&priv->stats.rx_packets);
52330- atomic64_add(skb->len,
52331+ atomic64_add_unchecked(skb->len,
52332 (atomic64_t *)&priv->stats.rx_bytes);
52333 #else
52334- atomic_add(1,
52335+ atomic_add_unchecked(1,
52336 (atomic_t *)&priv->stats.rx_packets);
52337- atomic_add(skb->len,
52338+ atomic_add_unchecked(skb->len,
52339 (atomic_t *)&priv->stats.rx_bytes);
52340 #endif
52341 }
52342@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52343 dev->name);
52344 */
52345 #ifdef CONFIG_64BIT
52346- atomic64_add(1,
52347+ atomic64_add_unchecked(1,
52348 (atomic64_t *)&priv->stats.rx_dropped);
52349 #else
52350- atomic_add(1,
52351+ atomic_add_unchecked(1,
52352 (atomic_t *)&priv->stats.rx_dropped);
52353 #endif
52354 dev_kfree_skb_irq(skb);
52355diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52356index ee32149..052d1836 100644
52357--- a/drivers/staging/octeon/ethernet.c
52358+++ b/drivers/staging/octeon/ethernet.c
52359@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52360 * since the RX tasklet also increments it.
52361 */
52362 #ifdef CONFIG_64BIT
52363- atomic64_add(rx_status.dropped_packets,
52364- (atomic64_t *)&priv->stats.rx_dropped);
52365+ atomic64_add_unchecked(rx_status.dropped_packets,
52366+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52367 #else
52368- atomic_add(rx_status.dropped_packets,
52369- (atomic_t *)&priv->stats.rx_dropped);
52370+ atomic_add_unchecked(rx_status.dropped_packets,
52371+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52372 #endif
52373 }
52374
52375diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52376index 3b476d8..f522d68 100644
52377--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52378+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52379@@ -225,7 +225,7 @@ struct hal_ops {
52380
52381 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52382 void (*hal_reset_security_engine)(struct adapter *adapter);
52383-};
52384+} __no_const;
52385
52386 enum rt_eeprom_type {
52387 EEPROM_93C46,
52388diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52389index 070cc03..6806e37 100644
52390--- a/drivers/staging/rtl8712/rtl871x_io.h
52391+++ b/drivers/staging/rtl8712/rtl871x_io.h
52392@@ -108,7 +108,7 @@ struct _io_ops {
52393 u8 *pmem);
52394 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52395 u8 *pmem);
52396-};
52397+} __no_const;
52398
52399 struct io_req {
52400 struct list_head list;
52401diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52402index 46dad63..fe4acdc 100644
52403--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52404+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52405@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52406 void (*device_resume)(ulong bus_no, ulong dev_no);
52407 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52408 ulong *max_size);
52409-};
52410+} __no_const;
52411
52412 /* These functions live inside visorchipset, and will be called to indicate
52413 * responses to specific events (by code outside of visorchipset).
52414@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52415 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52416 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52417 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52418-};
52419+} __no_const;
52420
52421 /** Register functions (in the bus driver) to get called by visorchipset
52422 * whenever a bus or device appears for which this service partition is
52423diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52424index 9512af6..045bf5a 100644
52425--- a/drivers/target/sbp/sbp_target.c
52426+++ b/drivers/target/sbp/sbp_target.c
52427@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52428
52429 #define SESSION_MAINTENANCE_INTERVAL HZ
52430
52431-static atomic_t login_id = ATOMIC_INIT(0);
52432+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52433
52434 static void session_maintenance_work(struct work_struct *);
52435 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52436@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52437 login->lun = se_lun;
52438 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52439 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52440- login->login_id = atomic_inc_return(&login_id);
52441+ login->login_id = atomic_inc_return_unchecked(&login_id);
52442
52443 login->tgt_agt = sbp_target_agent_register(login);
52444 if (IS_ERR(login->tgt_agt)) {
52445diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52446index 54da2a4..3dd6f57 100644
52447--- a/drivers/target/target_core_device.c
52448+++ b/drivers/target/target_core_device.c
52449@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52450 spin_lock_init(&dev->se_tmr_lock);
52451 spin_lock_init(&dev->qf_cmd_lock);
52452 sema_init(&dev->caw_sem, 1);
52453- atomic_set(&dev->dev_ordered_id, 0);
52454+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52455 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52456 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52457 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52458diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52459index ac3cbab..f0d1dd2 100644
52460--- a/drivers/target/target_core_transport.c
52461+++ b/drivers/target/target_core_transport.c
52462@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52463 * Used to determine when ORDERED commands should go from
52464 * Dormant to Active status.
52465 */
52466- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52467+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52468 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52469 cmd->se_ordered_id, cmd->sam_task_attr,
52470 dev->transport->name);
52471diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52472index 65a98a9..d93d3a8 100644
52473--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52474+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52475@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52476 platform_set_drvdata(pdev, priv);
52477
52478 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52479- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52480- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52481+ pax_open_kernel();
52482+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52483+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52484+ pax_close_kernel();
52485 }
52486 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52487 priv, &int3400_thermal_ops,
52488diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52489index d717f3d..cae1cc3e 100644
52490--- a/drivers/thermal/of-thermal.c
52491+++ b/drivers/thermal/of-thermal.c
52492@@ -31,6 +31,7 @@
52493 #include <linux/export.h>
52494 #include <linux/string.h>
52495 #include <linux/thermal.h>
52496+#include <linux/mm.h>
52497
52498 #include "thermal_core.h"
52499
52500@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52501 tz->ops = ops;
52502 tz->sensor_data = data;
52503
52504- tzd->ops->get_temp = of_thermal_get_temp;
52505- tzd->ops->get_trend = of_thermal_get_trend;
52506- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52507+ pax_open_kernel();
52508+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52509+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52510+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52511+ pax_close_kernel();
52512 mutex_unlock(&tzd->lock);
52513
52514 return tzd;
52515@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52516 return;
52517
52518 mutex_lock(&tzd->lock);
52519- tzd->ops->get_temp = NULL;
52520- tzd->ops->get_trend = NULL;
52521- tzd->ops->set_emul_temp = NULL;
52522+ pax_open_kernel();
52523+ *(void **)&tzd->ops->get_temp = NULL;
52524+ *(void **)&tzd->ops->get_trend = NULL;
52525+ *(void **)&tzd->ops->set_emul_temp = NULL;
52526+ pax_close_kernel();
52527
52528 tz->ops = NULL;
52529 tz->sensor_data = NULL;
52530diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52531index fd66f57..48e6376 100644
52532--- a/drivers/tty/cyclades.c
52533+++ b/drivers/tty/cyclades.c
52534@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52535 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52536 info->port.count);
52537 #endif
52538- info->port.count++;
52539+ atomic_inc(&info->port.count);
52540 #ifdef CY_DEBUG_COUNT
52541 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52542- current->pid, info->port.count);
52543+ current->pid, atomic_read(&info->port.count));
52544 #endif
52545
52546 /*
52547@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52548 for (j = 0; j < cy_card[i].nports; j++) {
52549 info = &cy_card[i].ports[j];
52550
52551- if (info->port.count) {
52552+ if (atomic_read(&info->port.count)) {
52553 /* XXX is the ldisc num worth this? */
52554 struct tty_struct *tty;
52555 struct tty_ldisc *ld;
52556diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52557index 4fcec1d..5a036f7 100644
52558--- a/drivers/tty/hvc/hvc_console.c
52559+++ b/drivers/tty/hvc/hvc_console.c
52560@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52561
52562 spin_lock_irqsave(&hp->port.lock, flags);
52563 /* Check and then increment for fast path open. */
52564- if (hp->port.count++ > 0) {
52565+ if (atomic_inc_return(&hp->port.count) > 1) {
52566 spin_unlock_irqrestore(&hp->port.lock, flags);
52567 hvc_kick();
52568 return 0;
52569@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52570
52571 spin_lock_irqsave(&hp->port.lock, flags);
52572
52573- if (--hp->port.count == 0) {
52574+ if (atomic_dec_return(&hp->port.count) == 0) {
52575 spin_unlock_irqrestore(&hp->port.lock, flags);
52576 /* We are done with the tty pointer now. */
52577 tty_port_tty_set(&hp->port, NULL);
52578@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52579 */
52580 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52581 } else {
52582- if (hp->port.count < 0)
52583+ if (atomic_read(&hp->port.count) < 0)
52584 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52585- hp->vtermno, hp->port.count);
52586+ hp->vtermno, atomic_read(&hp->port.count));
52587 spin_unlock_irqrestore(&hp->port.lock, flags);
52588 }
52589 }
52590@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52591 * open->hangup case this can be called after the final close so prevent
52592 * that from happening for now.
52593 */
52594- if (hp->port.count <= 0) {
52595+ if (atomic_read(&hp->port.count) <= 0) {
52596 spin_unlock_irqrestore(&hp->port.lock, flags);
52597 return;
52598 }
52599
52600- hp->port.count = 0;
52601+ atomic_set(&hp->port.count, 0);
52602 spin_unlock_irqrestore(&hp->port.lock, flags);
52603 tty_port_tty_set(&hp->port, NULL);
52604
52605@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52606 return -EPIPE;
52607
52608 /* FIXME what's this (unprotected) check for? */
52609- if (hp->port.count <= 0)
52610+ if (atomic_read(&hp->port.count) <= 0)
52611 return -EIO;
52612
52613 spin_lock_irqsave(&hp->lock, flags);
52614diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52615index 81ff7e1..dfb7b71 100644
52616--- a/drivers/tty/hvc/hvcs.c
52617+++ b/drivers/tty/hvc/hvcs.c
52618@@ -83,6 +83,7 @@
52619 #include <asm/hvcserver.h>
52620 #include <asm/uaccess.h>
52621 #include <asm/vio.h>
52622+#include <asm/local.h>
52623
52624 /*
52625 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52626@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52627
52628 spin_lock_irqsave(&hvcsd->lock, flags);
52629
52630- if (hvcsd->port.count > 0) {
52631+ if (atomic_read(&hvcsd->port.count) > 0) {
52632 spin_unlock_irqrestore(&hvcsd->lock, flags);
52633 printk(KERN_INFO "HVCS: vterm state unchanged. "
52634 "The hvcs device node is still in use.\n");
52635@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52636 }
52637 }
52638
52639- hvcsd->port.count = 0;
52640+ atomic_set(&hvcsd->port.count, 0);
52641 hvcsd->port.tty = tty;
52642 tty->driver_data = hvcsd;
52643
52644@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52645 unsigned long flags;
52646
52647 spin_lock_irqsave(&hvcsd->lock, flags);
52648- hvcsd->port.count++;
52649+ atomic_inc(&hvcsd->port.count);
52650 hvcsd->todo_mask |= HVCS_SCHED_READ;
52651 spin_unlock_irqrestore(&hvcsd->lock, flags);
52652
52653@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52654 hvcsd = tty->driver_data;
52655
52656 spin_lock_irqsave(&hvcsd->lock, flags);
52657- if (--hvcsd->port.count == 0) {
52658+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52659
52660 vio_disable_interrupts(hvcsd->vdev);
52661
52662@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52663
52664 free_irq(irq, hvcsd);
52665 return;
52666- } else if (hvcsd->port.count < 0) {
52667+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52668 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52669 " is missmanaged.\n",
52670- hvcsd->vdev->unit_address, hvcsd->port.count);
52671+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52672 }
52673
52674 spin_unlock_irqrestore(&hvcsd->lock, flags);
52675@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52676
52677 spin_lock_irqsave(&hvcsd->lock, flags);
52678 /* Preserve this so that we know how many kref refs to put */
52679- temp_open_count = hvcsd->port.count;
52680+ temp_open_count = atomic_read(&hvcsd->port.count);
52681
52682 /*
52683 * Don't kref put inside the spinlock because the destruction
52684@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52685 tty->driver_data = NULL;
52686 hvcsd->port.tty = NULL;
52687
52688- hvcsd->port.count = 0;
52689+ atomic_set(&hvcsd->port.count, 0);
52690
52691 /* This will drop any buffered data on the floor which is OK in a hangup
52692 * scenario. */
52693@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52694 * the middle of a write operation? This is a crummy place to do this
52695 * but we want to keep it all in the spinlock.
52696 */
52697- if (hvcsd->port.count <= 0) {
52698+ if (atomic_read(&hvcsd->port.count) <= 0) {
52699 spin_unlock_irqrestore(&hvcsd->lock, flags);
52700 return -ENODEV;
52701 }
52702@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52703 {
52704 struct hvcs_struct *hvcsd = tty->driver_data;
52705
52706- if (!hvcsd || hvcsd->port.count <= 0)
52707+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52708 return 0;
52709
52710 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52711diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52712index 4190199..06d5bfa 100644
52713--- a/drivers/tty/hvc/hvsi.c
52714+++ b/drivers/tty/hvc/hvsi.c
52715@@ -85,7 +85,7 @@ struct hvsi_struct {
52716 int n_outbuf;
52717 uint32_t vtermno;
52718 uint32_t virq;
52719- atomic_t seqno; /* HVSI packet sequence number */
52720+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52721 uint16_t mctrl;
52722 uint8_t state; /* HVSI protocol state */
52723 uint8_t flags;
52724@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52725
52726 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52727 packet.hdr.len = sizeof(struct hvsi_query_response);
52728- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52729+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52730 packet.verb = VSV_SEND_VERSION_NUMBER;
52731 packet.u.version = HVSI_VERSION;
52732 packet.query_seqno = query_seqno+1;
52733@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52734
52735 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52736 packet.hdr.len = sizeof(struct hvsi_query);
52737- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52738+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52739 packet.verb = verb;
52740
52741 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52742@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52743 int wrote;
52744
52745 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52746- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52747+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52748 packet.hdr.len = sizeof(struct hvsi_control);
52749 packet.verb = VSV_SET_MODEM_CTL;
52750 packet.mask = HVSI_TSDTR;
52751@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52752 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52753
52754 packet.hdr.type = VS_DATA_PACKET_HEADER;
52755- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52756+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52757 packet.hdr.len = count + sizeof(struct hvsi_header);
52758 memcpy(&packet.data, buf, count);
52759
52760@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52761 struct hvsi_control packet __ALIGNED__;
52762
52763 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52764- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52765+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52766 packet.hdr.len = 6;
52767 packet.verb = VSV_CLOSE_PROTOCOL;
52768
52769@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52770
52771 tty_port_tty_set(&hp->port, tty);
52772 spin_lock_irqsave(&hp->lock, flags);
52773- hp->port.count++;
52774+ atomic_inc(&hp->port.count);
52775 atomic_set(&hp->seqno, 0);
52776 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52777 spin_unlock_irqrestore(&hp->lock, flags);
52778@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52779
52780 spin_lock_irqsave(&hp->lock, flags);
52781
52782- if (--hp->port.count == 0) {
52783+ if (atomic_dec_return(&hp->port.count) == 0) {
52784 tty_port_tty_set(&hp->port, NULL);
52785 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52786
52787@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52788
52789 spin_lock_irqsave(&hp->lock, flags);
52790 }
52791- } else if (hp->port.count < 0)
52792+ } else if (atomic_read(&hp->port.count) < 0)
52793 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52794- hp - hvsi_ports, hp->port.count);
52795+ hp - hvsi_ports, atomic_read(&hp->port.count));
52796
52797 spin_unlock_irqrestore(&hp->lock, flags);
52798 }
52799@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52800 tty_port_tty_set(&hp->port, NULL);
52801
52802 spin_lock_irqsave(&hp->lock, flags);
52803- hp->port.count = 0;
52804+ atomic_set(&hp->port.count, 0);
52805 hp->n_outbuf = 0;
52806 spin_unlock_irqrestore(&hp->lock, flags);
52807 }
52808diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52809index a270f04..7c77b5d 100644
52810--- a/drivers/tty/hvc/hvsi_lib.c
52811+++ b/drivers/tty/hvc/hvsi_lib.c
52812@@ -8,7 +8,7 @@
52813
52814 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52815 {
52816- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52817+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52818
52819 /* Assumes that always succeeds, works in practice */
52820 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52821@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52822
52823 /* Reset state */
52824 pv->established = 0;
52825- atomic_set(&pv->seqno, 0);
52826+ atomic_set_unchecked(&pv->seqno, 0);
52827
52828 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52829
52830diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52831index 345cebb..d5a1e9e 100644
52832--- a/drivers/tty/ipwireless/tty.c
52833+++ b/drivers/tty/ipwireless/tty.c
52834@@ -28,6 +28,7 @@
52835 #include <linux/tty_driver.h>
52836 #include <linux/tty_flip.h>
52837 #include <linux/uaccess.h>
52838+#include <asm/local.h>
52839
52840 #include "tty.h"
52841 #include "network.h"
52842@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52843 return -ENODEV;
52844
52845 mutex_lock(&tty->ipw_tty_mutex);
52846- if (tty->port.count == 0)
52847+ if (atomic_read(&tty->port.count) == 0)
52848 tty->tx_bytes_queued = 0;
52849
52850- tty->port.count++;
52851+ atomic_inc(&tty->port.count);
52852
52853 tty->port.tty = linux_tty;
52854 linux_tty->driver_data = tty;
52855@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52856
52857 static void do_ipw_close(struct ipw_tty *tty)
52858 {
52859- tty->port.count--;
52860-
52861- if (tty->port.count == 0) {
52862+ if (atomic_dec_return(&tty->port.count) == 0) {
52863 struct tty_struct *linux_tty = tty->port.tty;
52864
52865 if (linux_tty != NULL) {
52866@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52867 return;
52868
52869 mutex_lock(&tty->ipw_tty_mutex);
52870- if (tty->port.count == 0) {
52871+ if (atomic_read(&tty->port.count) == 0) {
52872 mutex_unlock(&tty->ipw_tty_mutex);
52873 return;
52874 }
52875@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52876
52877 mutex_lock(&tty->ipw_tty_mutex);
52878
52879- if (!tty->port.count) {
52880+ if (!atomic_read(&tty->port.count)) {
52881 mutex_unlock(&tty->ipw_tty_mutex);
52882 return;
52883 }
52884@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52885 return -ENODEV;
52886
52887 mutex_lock(&tty->ipw_tty_mutex);
52888- if (!tty->port.count) {
52889+ if (!atomic_read(&tty->port.count)) {
52890 mutex_unlock(&tty->ipw_tty_mutex);
52891 return -EINVAL;
52892 }
52893@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52894 if (!tty)
52895 return -ENODEV;
52896
52897- if (!tty->port.count)
52898+ if (!atomic_read(&tty->port.count))
52899 return -EINVAL;
52900
52901 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52902@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52903 if (!tty)
52904 return 0;
52905
52906- if (!tty->port.count)
52907+ if (!atomic_read(&tty->port.count))
52908 return 0;
52909
52910 return tty->tx_bytes_queued;
52911@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52912 if (!tty)
52913 return -ENODEV;
52914
52915- if (!tty->port.count)
52916+ if (!atomic_read(&tty->port.count))
52917 return -EINVAL;
52918
52919 return get_control_lines(tty);
52920@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52921 if (!tty)
52922 return -ENODEV;
52923
52924- if (!tty->port.count)
52925+ if (!atomic_read(&tty->port.count))
52926 return -EINVAL;
52927
52928 return set_control_lines(tty, set, clear);
52929@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52930 if (!tty)
52931 return -ENODEV;
52932
52933- if (!tty->port.count)
52934+ if (!atomic_read(&tty->port.count))
52935 return -EINVAL;
52936
52937 /* FIXME: Exactly how is the tty object locked here .. */
52938@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52939 * are gone */
52940 mutex_lock(&ttyj->ipw_tty_mutex);
52941 }
52942- while (ttyj->port.count)
52943+ while (atomic_read(&ttyj->port.count))
52944 do_ipw_close(ttyj);
52945 ipwireless_disassociate_network_ttys(network,
52946 ttyj->channel_idx);
52947diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52948index 14c54e0..1efd4f2 100644
52949--- a/drivers/tty/moxa.c
52950+++ b/drivers/tty/moxa.c
52951@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52952 }
52953
52954 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52955- ch->port.count++;
52956+ atomic_inc(&ch->port.count);
52957 tty->driver_data = ch;
52958 tty_port_tty_set(&ch->port, tty);
52959 mutex_lock(&ch->port.mutex);
52960diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52961index c434376..114ce13 100644
52962--- a/drivers/tty/n_gsm.c
52963+++ b/drivers/tty/n_gsm.c
52964@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52965 spin_lock_init(&dlci->lock);
52966 mutex_init(&dlci->mutex);
52967 dlci->fifo = &dlci->_fifo;
52968- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52969+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52970 kfree(dlci);
52971 return NULL;
52972 }
52973@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52974 struct gsm_dlci *dlci = tty->driver_data;
52975 struct tty_port *port = &dlci->port;
52976
52977- port->count++;
52978+ atomic_inc(&port->count);
52979 tty_port_tty_set(port, tty);
52980
52981 dlci->modem_rx = 0;
52982diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52983index 6f8cf3a..c690dfb 100644
52984--- a/drivers/tty/n_tty.c
52985+++ b/drivers/tty/n_tty.c
52986@@ -115,7 +115,7 @@ struct n_tty_data {
52987 int minimum_to_wake;
52988
52989 /* consumer-published */
52990- size_t read_tail;
52991+ size_t read_tail __intentional_overflow(-1);
52992 size_t line_start;
52993
52994 /* protected by output lock */
52995@@ -2555,6 +2555,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52996 {
52997 *ops = tty_ldisc_N_TTY;
52998 ops->owner = NULL;
52999- ops->refcount = ops->flags = 0;
53000+ atomic_set(&ops->refcount, 0);
53001+ ops->flags = 0;
53002 }
53003 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53004diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53005index 6e1f150..c3ba598 100644
53006--- a/drivers/tty/pty.c
53007+++ b/drivers/tty/pty.c
53008@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
53009 panic("Couldn't register Unix98 pts driver");
53010
53011 /* Now create the /dev/ptmx special device */
53012+ pax_open_kernel();
53013 tty_default_fops(&ptmx_fops);
53014- ptmx_fops.open = ptmx_open;
53015+ *(void **)&ptmx_fops.open = ptmx_open;
53016+ pax_close_kernel();
53017
53018 cdev_init(&ptmx_cdev, &ptmx_fops);
53019 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53020diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53021index 383c4c7..d408e21 100644
53022--- a/drivers/tty/rocket.c
53023+++ b/drivers/tty/rocket.c
53024@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53025 tty->driver_data = info;
53026 tty_port_tty_set(port, tty);
53027
53028- if (port->count++ == 0) {
53029+ if (atomic_inc_return(&port->count) == 1) {
53030 atomic_inc(&rp_num_ports_open);
53031
53032 #ifdef ROCKET_DEBUG_OPEN
53033@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53034 #endif
53035 }
53036 #ifdef ROCKET_DEBUG_OPEN
53037- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53038+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53039 #endif
53040
53041 /*
53042@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53043 spin_unlock_irqrestore(&info->port.lock, flags);
53044 return;
53045 }
53046- if (info->port.count)
53047+ if (atomic_read(&info->port.count))
53048 atomic_dec(&rp_num_ports_open);
53049 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53050 spin_unlock_irqrestore(&info->port.lock, flags);
53051diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53052index aa28209..e08fb85 100644
53053--- a/drivers/tty/serial/ioc4_serial.c
53054+++ b/drivers/tty/serial/ioc4_serial.c
53055@@ -437,7 +437,7 @@ struct ioc4_soft {
53056 } is_intr_info[MAX_IOC4_INTR_ENTS];
53057
53058 /* Number of entries active in the above array */
53059- atomic_t is_num_intrs;
53060+ atomic_unchecked_t is_num_intrs;
53061 } is_intr_type[IOC4_NUM_INTR_TYPES];
53062
53063 /* is_ir_lock must be held while
53064@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53065 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53066 || (type == IOC4_OTHER_INTR_TYPE)));
53067
53068- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53069+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53070 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53071
53072 /* Save off the lower level interrupt handler */
53073@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53074
53075 soft = arg;
53076 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53077- num_intrs = (int)atomic_read(
53078+ num_intrs = (int)atomic_read_unchecked(
53079 &soft->is_intr_type[intr_type].is_num_intrs);
53080
53081 this_mir = this_ir = pending_intrs(soft, intr_type);
53082diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53083index 129dc5b..1da5bb8 100644
53084--- a/drivers/tty/serial/kgdb_nmi.c
53085+++ b/drivers/tty/serial/kgdb_nmi.c
53086@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53087 * I/O utilities that messages sent to the console will automatically
53088 * be displayed on the dbg_io.
53089 */
53090- dbg_io_ops->is_console = true;
53091+ pax_open_kernel();
53092+ *(int *)&dbg_io_ops->is_console = true;
53093+ pax_close_kernel();
53094
53095 return 0;
53096 }
53097diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53098index a260cde..6b2b5ce 100644
53099--- a/drivers/tty/serial/kgdboc.c
53100+++ b/drivers/tty/serial/kgdboc.c
53101@@ -24,8 +24,9 @@
53102 #define MAX_CONFIG_LEN 40
53103
53104 static struct kgdb_io kgdboc_io_ops;
53105+static struct kgdb_io kgdboc_io_ops_console;
53106
53107-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53108+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53109 static int configured = -1;
53110
53111 static char config[MAX_CONFIG_LEN];
53112@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53113 kgdboc_unregister_kbd();
53114 if (configured == 1)
53115 kgdb_unregister_io_module(&kgdboc_io_ops);
53116+ else if (configured == 2)
53117+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53118 }
53119
53120 static int configure_kgdboc(void)
53121@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53122 int err;
53123 char *cptr = config;
53124 struct console *cons;
53125+ int is_console = 0;
53126
53127 err = kgdboc_option_setup(config);
53128 if (err || !strlen(config) || isspace(config[0]))
53129 goto noconfig;
53130
53131 err = -ENODEV;
53132- kgdboc_io_ops.is_console = 0;
53133 kgdb_tty_driver = NULL;
53134
53135 kgdboc_use_kms = 0;
53136@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53137 int idx;
53138 if (cons->device && cons->device(cons, &idx) == p &&
53139 idx == tty_line) {
53140- kgdboc_io_ops.is_console = 1;
53141+ is_console = 1;
53142 break;
53143 }
53144 cons = cons->next;
53145@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53146 kgdb_tty_line = tty_line;
53147
53148 do_register:
53149- err = kgdb_register_io_module(&kgdboc_io_ops);
53150+ if (is_console) {
53151+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53152+ configured = 2;
53153+ } else {
53154+ err = kgdb_register_io_module(&kgdboc_io_ops);
53155+ configured = 1;
53156+ }
53157 if (err)
53158 goto noconfig;
53159
53160@@ -205,8 +214,6 @@ do_register:
53161 if (err)
53162 goto nmi_con_failed;
53163
53164- configured = 1;
53165-
53166 return 0;
53167
53168 nmi_con_failed:
53169@@ -223,7 +230,7 @@ noconfig:
53170 static int __init init_kgdboc(void)
53171 {
53172 /* Already configured? */
53173- if (configured == 1)
53174+ if (configured >= 1)
53175 return 0;
53176
53177 return configure_kgdboc();
53178@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53179 if (config[len - 1] == '\n')
53180 config[len - 1] = '\0';
53181
53182- if (configured == 1)
53183+ if (configured >= 1)
53184 cleanup_kgdboc();
53185
53186 /* Go and configure with the new params. */
53187@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53188 .post_exception = kgdboc_post_exp_handler,
53189 };
53190
53191+static struct kgdb_io kgdboc_io_ops_console = {
53192+ .name = "kgdboc",
53193+ .read_char = kgdboc_get_char,
53194+ .write_char = kgdboc_put_char,
53195+ .pre_exception = kgdboc_pre_exp_handler,
53196+ .post_exception = kgdboc_post_exp_handler,
53197+ .is_console = 1
53198+};
53199+
53200 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53201 /* This is only available if kgdboc is a built in for early debugging */
53202 static int __init kgdboc_early_init(char *opt)
53203diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53204index c88b522..e763029 100644
53205--- a/drivers/tty/serial/msm_serial.c
53206+++ b/drivers/tty/serial/msm_serial.c
53207@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53208 .cons = MSM_CONSOLE,
53209 };
53210
53211-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53212+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53213
53214 static const struct of_device_id msm_uartdm_table[] = {
53215 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53216@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53217 line = pdev->id;
53218
53219 if (line < 0)
53220- line = atomic_inc_return(&msm_uart_next_id) - 1;
53221+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53222
53223 if (unlikely(line < 0 || line >= UART_NR))
53224 return -ENXIO;
53225diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53226index 107e807..d4a02fa 100644
53227--- a/drivers/tty/serial/samsung.c
53228+++ b/drivers/tty/serial/samsung.c
53229@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53230 }
53231 }
53232
53233+static int s3c64xx_serial_startup(struct uart_port *port);
53234 static int s3c24xx_serial_startup(struct uart_port *port)
53235 {
53236 struct s3c24xx_uart_port *ourport = to_ourport(port);
53237 int ret;
53238
53239+ /* Startup sequence is different for s3c64xx and higher SoC's */
53240+ if (s3c24xx_serial_has_interrupt_mask(port))
53241+ return s3c64xx_serial_startup(port);
53242+
53243 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53244 port, (unsigned long long)port->mapbase, port->membase);
53245
53246@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53247 /* setup info for port */
53248 port->dev = &platdev->dev;
53249
53250- /* Startup sequence is different for s3c64xx and higher SoC's */
53251- if (s3c24xx_serial_has_interrupt_mask(port))
53252- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53253-
53254 port->uartclk = 1;
53255
53256 if (cfg->uart_flags & UPF_CONS_FLOW) {
53257diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53258index 984605b..e538330 100644
53259--- a/drivers/tty/serial/serial_core.c
53260+++ b/drivers/tty/serial/serial_core.c
53261@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53262 state = drv->state + tty->index;
53263 port = &state->port;
53264 spin_lock_irq(&port->lock);
53265- --port->count;
53266+ atomic_dec(&port->count);
53267 spin_unlock_irq(&port->lock);
53268 return;
53269 }
53270@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53271
53272 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53273
53274- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53275+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53276 return;
53277
53278 /*
53279@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53280 uart_flush_buffer(tty);
53281 uart_shutdown(tty, state);
53282 spin_lock_irqsave(&port->lock, flags);
53283- port->count = 0;
53284+ atomic_set(&port->count, 0);
53285 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53286 spin_unlock_irqrestore(&port->lock, flags);
53287 tty_port_tty_set(port, NULL);
53288@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53289 pr_debug("uart_open(%d) called\n", line);
53290
53291 spin_lock_irq(&port->lock);
53292- ++port->count;
53293+ atomic_inc(&port->count);
53294 spin_unlock_irq(&port->lock);
53295
53296 /*
53297diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53298index b799170..87dafd5 100644
53299--- a/drivers/tty/synclink.c
53300+++ b/drivers/tty/synclink.c
53301@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53302
53303 if (debug_level >= DEBUG_LEVEL_INFO)
53304 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53305- __FILE__,__LINE__, info->device_name, info->port.count);
53306+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53307
53308 if (tty_port_close_start(&info->port, tty, filp) == 0)
53309 goto cleanup;
53310@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53311 cleanup:
53312 if (debug_level >= DEBUG_LEVEL_INFO)
53313 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53314- tty->driver->name, info->port.count);
53315+ tty->driver->name, atomic_read(&info->port.count));
53316
53317 } /* end of mgsl_close() */
53318
53319@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53320
53321 mgsl_flush_buffer(tty);
53322 shutdown(info);
53323-
53324- info->port.count = 0;
53325+
53326+ atomic_set(&info->port.count, 0);
53327 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53328 info->port.tty = NULL;
53329
53330@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53331
53332 if (debug_level >= DEBUG_LEVEL_INFO)
53333 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53334- __FILE__,__LINE__, tty->driver->name, port->count );
53335+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53336
53337 spin_lock_irqsave(&info->irq_spinlock, flags);
53338- port->count--;
53339+ atomic_dec(&port->count);
53340 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53341 port->blocked_open++;
53342
53343@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53344
53345 if (debug_level >= DEBUG_LEVEL_INFO)
53346 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53347- __FILE__,__LINE__, tty->driver->name, port->count );
53348+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53349
53350 tty_unlock(tty);
53351 schedule();
53352@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53353
53354 /* FIXME: Racy on hangup during close wait */
53355 if (!tty_hung_up_p(filp))
53356- port->count++;
53357+ atomic_inc(&port->count);
53358 port->blocked_open--;
53359
53360 if (debug_level >= DEBUG_LEVEL_INFO)
53361 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53362- __FILE__,__LINE__, tty->driver->name, port->count );
53363+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53364
53365 if (!retval)
53366 port->flags |= ASYNC_NORMAL_ACTIVE;
53367@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53368
53369 if (debug_level >= DEBUG_LEVEL_INFO)
53370 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53371- __FILE__,__LINE__,tty->driver->name, info->port.count);
53372+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53373
53374 /* If port is closing, signal caller to try again */
53375 if (info->port.flags & ASYNC_CLOSING){
53376@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53377 spin_unlock_irqrestore(&info->netlock, flags);
53378 goto cleanup;
53379 }
53380- info->port.count++;
53381+ atomic_inc(&info->port.count);
53382 spin_unlock_irqrestore(&info->netlock, flags);
53383
53384- if (info->port.count == 1) {
53385+ if (atomic_read(&info->port.count) == 1) {
53386 /* 1st open on this device, init hardware */
53387 retval = startup(info);
53388 if (retval < 0)
53389@@ -3442,8 +3442,8 @@ cleanup:
53390 if (retval) {
53391 if (tty->count == 1)
53392 info->port.tty = NULL; /* tty layer will release tty struct */
53393- if(info->port.count)
53394- info->port.count--;
53395+ if (atomic_read(&info->port.count))
53396+ atomic_dec(&info->port.count);
53397 }
53398
53399 return retval;
53400@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53401 unsigned short new_crctype;
53402
53403 /* return error if TTY interface open */
53404- if (info->port.count)
53405+ if (atomic_read(&info->port.count))
53406 return -EBUSY;
53407
53408 switch (encoding)
53409@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53410
53411 /* arbitrate between network and tty opens */
53412 spin_lock_irqsave(&info->netlock, flags);
53413- if (info->port.count != 0 || info->netcount != 0) {
53414+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53415 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53416 spin_unlock_irqrestore(&info->netlock, flags);
53417 return -EBUSY;
53418@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53419 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53420
53421 /* return error if TTY interface open */
53422- if (info->port.count)
53423+ if (atomic_read(&info->port.count))
53424 return -EBUSY;
53425
53426 if (cmd != SIOCWANDEV)
53427diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53428index 0e8c39b..e0cb171 100644
53429--- a/drivers/tty/synclink_gt.c
53430+++ b/drivers/tty/synclink_gt.c
53431@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53432 tty->driver_data = info;
53433 info->port.tty = tty;
53434
53435- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53436+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53437
53438 /* If port is closing, signal caller to try again */
53439 if (info->port.flags & ASYNC_CLOSING){
53440@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53441 mutex_unlock(&info->port.mutex);
53442 goto cleanup;
53443 }
53444- info->port.count++;
53445+ atomic_inc(&info->port.count);
53446 spin_unlock_irqrestore(&info->netlock, flags);
53447
53448- if (info->port.count == 1) {
53449+ if (atomic_read(&info->port.count) == 1) {
53450 /* 1st open on this device, init hardware */
53451 retval = startup(info);
53452 if (retval < 0) {
53453@@ -715,8 +715,8 @@ cleanup:
53454 if (retval) {
53455 if (tty->count == 1)
53456 info->port.tty = NULL; /* tty layer will release tty struct */
53457- if(info->port.count)
53458- info->port.count--;
53459+ if(atomic_read(&info->port.count))
53460+ atomic_dec(&info->port.count);
53461 }
53462
53463 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53464@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53465
53466 if (sanity_check(info, tty->name, "close"))
53467 return;
53468- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53469+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53470
53471 if (tty_port_close_start(&info->port, tty, filp) == 0)
53472 goto cleanup;
53473@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53474 tty_port_close_end(&info->port, tty);
53475 info->port.tty = NULL;
53476 cleanup:
53477- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53478+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53479 }
53480
53481 static void hangup(struct tty_struct *tty)
53482@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53483 shutdown(info);
53484
53485 spin_lock_irqsave(&info->port.lock, flags);
53486- info->port.count = 0;
53487+ atomic_set(&info->port.count, 0);
53488 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53489 info->port.tty = NULL;
53490 spin_unlock_irqrestore(&info->port.lock, flags);
53491@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53492 unsigned short new_crctype;
53493
53494 /* return error if TTY interface open */
53495- if (info->port.count)
53496+ if (atomic_read(&info->port.count))
53497 return -EBUSY;
53498
53499 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53500@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53501
53502 /* arbitrate between network and tty opens */
53503 spin_lock_irqsave(&info->netlock, flags);
53504- if (info->port.count != 0 || info->netcount != 0) {
53505+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53506 DBGINFO(("%s hdlc_open busy\n", dev->name));
53507 spin_unlock_irqrestore(&info->netlock, flags);
53508 return -EBUSY;
53509@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53510 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53511
53512 /* return error if TTY interface open */
53513- if (info->port.count)
53514+ if (atomic_read(&info->port.count))
53515 return -EBUSY;
53516
53517 if (cmd != SIOCWANDEV)
53518@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53519 if (port == NULL)
53520 continue;
53521 spin_lock(&port->lock);
53522- if ((port->port.count || port->netcount) &&
53523+ if ((atomic_read(&port->port.count) || port->netcount) &&
53524 port->pending_bh && !port->bh_running &&
53525 !port->bh_requested) {
53526 DBGISR(("%s bh queued\n", port->device_name));
53527@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53528 add_wait_queue(&port->open_wait, &wait);
53529
53530 spin_lock_irqsave(&info->lock, flags);
53531- port->count--;
53532+ atomic_dec(&port->count);
53533 spin_unlock_irqrestore(&info->lock, flags);
53534 port->blocked_open++;
53535
53536@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53537 remove_wait_queue(&port->open_wait, &wait);
53538
53539 if (!tty_hung_up_p(filp))
53540- port->count++;
53541+ atomic_inc(&port->count);
53542 port->blocked_open--;
53543
53544 if (!retval)
53545diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53546index c3f9091..abe4601 100644
53547--- a/drivers/tty/synclinkmp.c
53548+++ b/drivers/tty/synclinkmp.c
53549@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53550
53551 if (debug_level >= DEBUG_LEVEL_INFO)
53552 printk("%s(%d):%s open(), old ref count = %d\n",
53553- __FILE__,__LINE__,tty->driver->name, info->port.count);
53554+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53555
53556 /* If port is closing, signal caller to try again */
53557 if (info->port.flags & ASYNC_CLOSING){
53558@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53559 spin_unlock_irqrestore(&info->netlock, flags);
53560 goto cleanup;
53561 }
53562- info->port.count++;
53563+ atomic_inc(&info->port.count);
53564 spin_unlock_irqrestore(&info->netlock, flags);
53565
53566- if (info->port.count == 1) {
53567+ if (atomic_read(&info->port.count) == 1) {
53568 /* 1st open on this device, init hardware */
53569 retval = startup(info);
53570 if (retval < 0)
53571@@ -796,8 +796,8 @@ cleanup:
53572 if (retval) {
53573 if (tty->count == 1)
53574 info->port.tty = NULL; /* tty layer will release tty struct */
53575- if(info->port.count)
53576- info->port.count--;
53577+ if(atomic_read(&info->port.count))
53578+ atomic_dec(&info->port.count);
53579 }
53580
53581 return retval;
53582@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53583
53584 if (debug_level >= DEBUG_LEVEL_INFO)
53585 printk("%s(%d):%s close() entry, count=%d\n",
53586- __FILE__,__LINE__, info->device_name, info->port.count);
53587+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53588
53589 if (tty_port_close_start(&info->port, tty, filp) == 0)
53590 goto cleanup;
53591@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53592 cleanup:
53593 if (debug_level >= DEBUG_LEVEL_INFO)
53594 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53595- tty->driver->name, info->port.count);
53596+ tty->driver->name, atomic_read(&info->port.count));
53597 }
53598
53599 /* Called by tty_hangup() when a hangup is signaled.
53600@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53601 shutdown(info);
53602
53603 spin_lock_irqsave(&info->port.lock, flags);
53604- info->port.count = 0;
53605+ atomic_set(&info->port.count, 0);
53606 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53607 info->port.tty = NULL;
53608 spin_unlock_irqrestore(&info->port.lock, flags);
53609@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53610 unsigned short new_crctype;
53611
53612 /* return error if TTY interface open */
53613- if (info->port.count)
53614+ if (atomic_read(&info->port.count))
53615 return -EBUSY;
53616
53617 switch (encoding)
53618@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53619
53620 /* arbitrate between network and tty opens */
53621 spin_lock_irqsave(&info->netlock, flags);
53622- if (info->port.count != 0 || info->netcount != 0) {
53623+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53624 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53625 spin_unlock_irqrestore(&info->netlock, flags);
53626 return -EBUSY;
53627@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53628 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53629
53630 /* return error if TTY interface open */
53631- if (info->port.count)
53632+ if (atomic_read(&info->port.count))
53633 return -EBUSY;
53634
53635 if (cmd != SIOCWANDEV)
53636@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53637 * do not request bottom half processing if the
53638 * device is not open in a normal mode.
53639 */
53640- if ( port && (port->port.count || port->netcount) &&
53641+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53642 port->pending_bh && !port->bh_running &&
53643 !port->bh_requested ) {
53644 if ( debug_level >= DEBUG_LEVEL_ISR )
53645@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53646
53647 if (debug_level >= DEBUG_LEVEL_INFO)
53648 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53649- __FILE__,__LINE__, tty->driver->name, port->count );
53650+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53651
53652 spin_lock_irqsave(&info->lock, flags);
53653- port->count--;
53654+ atomic_dec(&port->count);
53655 spin_unlock_irqrestore(&info->lock, flags);
53656 port->blocked_open++;
53657
53658@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53659
53660 if (debug_level >= DEBUG_LEVEL_INFO)
53661 printk("%s(%d):%s block_til_ready() count=%d\n",
53662- __FILE__,__LINE__, tty->driver->name, port->count );
53663+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53664
53665 tty_unlock(tty);
53666 schedule();
53667@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53668 set_current_state(TASK_RUNNING);
53669 remove_wait_queue(&port->open_wait, &wait);
53670 if (!tty_hung_up_p(filp))
53671- port->count++;
53672+ atomic_inc(&port->count);
53673 port->blocked_open--;
53674
53675 if (debug_level >= DEBUG_LEVEL_INFO)
53676 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53677- __FILE__,__LINE__, tty->driver->name, port->count );
53678+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53679
53680 if (!retval)
53681 port->flags |= ASYNC_NORMAL_ACTIVE;
53682diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53683index 42bad18..447d7a2 100644
53684--- a/drivers/tty/sysrq.c
53685+++ b/drivers/tty/sysrq.c
53686@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53687 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53688 size_t count, loff_t *ppos)
53689 {
53690- if (count) {
53691+ if (count && capable(CAP_SYS_ADMIN)) {
53692 char c;
53693
53694 if (get_user(c, buf))
53695diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53696index 2bb4dfc..a7f6e86 100644
53697--- a/drivers/tty/tty_io.c
53698+++ b/drivers/tty/tty_io.c
53699@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53700
53701 void tty_default_fops(struct file_operations *fops)
53702 {
53703- *fops = tty_fops;
53704+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53705 }
53706
53707 /*
53708diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53709index 3737f55..7cef448 100644
53710--- a/drivers/tty/tty_ldisc.c
53711+++ b/drivers/tty/tty_ldisc.c
53712@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53713 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53714 tty_ldiscs[disc] = new_ldisc;
53715 new_ldisc->num = disc;
53716- new_ldisc->refcount = 0;
53717+ atomic_set(&new_ldisc->refcount, 0);
53718 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53719
53720 return ret;
53721@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53722 return -EINVAL;
53723
53724 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53725- if (tty_ldiscs[disc]->refcount)
53726+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53727 ret = -EBUSY;
53728 else
53729 tty_ldiscs[disc] = NULL;
53730@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53731 if (ldops) {
53732 ret = ERR_PTR(-EAGAIN);
53733 if (try_module_get(ldops->owner)) {
53734- ldops->refcount++;
53735+ atomic_inc(&ldops->refcount);
53736 ret = ldops;
53737 }
53738 }
53739@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53740 unsigned long flags;
53741
53742 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53743- ldops->refcount--;
53744+ atomic_dec(&ldops->refcount);
53745 module_put(ldops->owner);
53746 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53747 }
53748diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53749index 40b31835..94d92ae 100644
53750--- a/drivers/tty/tty_port.c
53751+++ b/drivers/tty/tty_port.c
53752@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53753 unsigned long flags;
53754
53755 spin_lock_irqsave(&port->lock, flags);
53756- port->count = 0;
53757+ atomic_set(&port->count, 0);
53758 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53759 tty = port->tty;
53760 if (tty)
53761@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53762
53763 /* The port lock protects the port counts */
53764 spin_lock_irqsave(&port->lock, flags);
53765- port->count--;
53766+ atomic_dec(&port->count);
53767 port->blocked_open++;
53768 spin_unlock_irqrestore(&port->lock, flags);
53769
53770@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53771 we must not mess that up further */
53772 spin_lock_irqsave(&port->lock, flags);
53773 if (!tty_hung_up_p(filp))
53774- port->count++;
53775+ atomic_inc(&port->count);
53776 port->blocked_open--;
53777 if (retval == 0)
53778 port->flags |= ASYNC_NORMAL_ACTIVE;
53779@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53780 return 0;
53781
53782 spin_lock_irqsave(&port->lock, flags);
53783- if (tty->count == 1 && port->count != 1) {
53784+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53785 printk(KERN_WARNING
53786 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53787- port->count);
53788- port->count = 1;
53789+ atomic_read(&port->count));
53790+ atomic_set(&port->count, 1);
53791 }
53792- if (--port->count < 0) {
53793+ if (atomic_dec_return(&port->count) < 0) {
53794 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53795- port->count);
53796- port->count = 0;
53797+ atomic_read(&port->count));
53798+ atomic_set(&port->count, 0);
53799 }
53800
53801- if (port->count) {
53802+ if (atomic_read(&port->count)) {
53803 spin_unlock_irqrestore(&port->lock, flags);
53804 return 0;
53805 }
53806@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53807 struct file *filp)
53808 {
53809 spin_lock_irq(&port->lock);
53810- ++port->count;
53811+ atomic_inc(&port->count);
53812 spin_unlock_irq(&port->lock);
53813 tty_port_tty_set(port, tty);
53814
53815diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53816index 8a89f6e..50b32af 100644
53817--- a/drivers/tty/vt/keyboard.c
53818+++ b/drivers/tty/vt/keyboard.c
53819@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53820 kbd->kbdmode == VC_OFF) &&
53821 value != KVAL(K_SAK))
53822 return; /* SAK is allowed even in raw mode */
53823+
53824+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53825+ {
53826+ void *func = fn_handler[value];
53827+ if (func == fn_show_state || func == fn_show_ptregs ||
53828+ func == fn_show_mem)
53829+ return;
53830+ }
53831+#endif
53832+
53833 fn_handler[value](vc);
53834 }
53835
53836@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53837 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53838 return -EFAULT;
53839
53840- if (!capable(CAP_SYS_TTY_CONFIG))
53841- perm = 0;
53842-
53843 switch (cmd) {
53844 case KDGKBENT:
53845 /* Ensure another thread doesn't free it under us */
53846@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53847 spin_unlock_irqrestore(&kbd_event_lock, flags);
53848 return put_user(val, &user_kbe->kb_value);
53849 case KDSKBENT:
53850+ if (!capable(CAP_SYS_TTY_CONFIG))
53851+ perm = 0;
53852+
53853 if (!perm)
53854 return -EPERM;
53855 if (!i && v == K_NOSUCHMAP) {
53856@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53857 int i, j, k;
53858 int ret;
53859
53860- if (!capable(CAP_SYS_TTY_CONFIG))
53861- perm = 0;
53862-
53863 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53864 if (!kbs) {
53865 ret = -ENOMEM;
53866@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53867 kfree(kbs);
53868 return ((p && *p) ? -EOVERFLOW : 0);
53869 case KDSKBSENT:
53870+ if (!capable(CAP_SYS_TTY_CONFIG))
53871+ perm = 0;
53872+
53873 if (!perm) {
53874 ret = -EPERM;
53875 goto reterr;
53876diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53877index 6276f13..84f2449 100644
53878--- a/drivers/uio/uio.c
53879+++ b/drivers/uio/uio.c
53880@@ -25,6 +25,7 @@
53881 #include <linux/kobject.h>
53882 #include <linux/cdev.h>
53883 #include <linux/uio_driver.h>
53884+#include <asm/local.h>
53885
53886 #define UIO_MAX_DEVICES (1U << MINORBITS)
53887
53888@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53889 struct device_attribute *attr, char *buf)
53890 {
53891 struct uio_device *idev = dev_get_drvdata(dev);
53892- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53893+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53894 }
53895 static DEVICE_ATTR_RO(event);
53896
53897@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53898 {
53899 struct uio_device *idev = info->uio_dev;
53900
53901- atomic_inc(&idev->event);
53902+ atomic_inc_unchecked(&idev->event);
53903 wake_up_interruptible(&idev->wait);
53904 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53905 }
53906@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53907 }
53908
53909 listener->dev = idev;
53910- listener->event_count = atomic_read(&idev->event);
53911+ listener->event_count = atomic_read_unchecked(&idev->event);
53912 filep->private_data = listener;
53913
53914 if (idev->info->open) {
53915@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53916 return -EIO;
53917
53918 poll_wait(filep, &idev->wait, wait);
53919- if (listener->event_count != atomic_read(&idev->event))
53920+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53921 return POLLIN | POLLRDNORM;
53922 return 0;
53923 }
53924@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53925 do {
53926 set_current_state(TASK_INTERRUPTIBLE);
53927
53928- event_count = atomic_read(&idev->event);
53929+ event_count = atomic_read_unchecked(&idev->event);
53930 if (event_count != listener->event_count) {
53931 if (copy_to_user(buf, &event_count, count))
53932 retval = -EFAULT;
53933@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53934 static int uio_find_mem_index(struct vm_area_struct *vma)
53935 {
53936 struct uio_device *idev = vma->vm_private_data;
53937+ unsigned long size;
53938
53939 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53940- if (idev->info->mem[vma->vm_pgoff].size == 0)
53941+ size = idev->info->mem[vma->vm_pgoff].size;
53942+ if (size == 0)
53943+ return -1;
53944+ if (vma->vm_end - vma->vm_start > size)
53945 return -1;
53946 return (int)vma->vm_pgoff;
53947 }
53948@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53949 idev->owner = owner;
53950 idev->info = info;
53951 init_waitqueue_head(&idev->wait);
53952- atomic_set(&idev->event, 0);
53953+ atomic_set_unchecked(&idev->event, 0);
53954
53955 ret = uio_get_minor(idev);
53956 if (ret)
53957diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53958index 813d4d3..a71934f 100644
53959--- a/drivers/usb/atm/cxacru.c
53960+++ b/drivers/usb/atm/cxacru.c
53961@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53962 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53963 if (ret < 2)
53964 return -EINVAL;
53965- if (index < 0 || index > 0x7f)
53966+ if (index > 0x7f)
53967 return -EINVAL;
53968 pos += tmp;
53969
53970diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53971index dada014..1d0d517 100644
53972--- a/drivers/usb/atm/usbatm.c
53973+++ b/drivers/usb/atm/usbatm.c
53974@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53975 if (printk_ratelimit())
53976 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53977 __func__, vpi, vci);
53978- atomic_inc(&vcc->stats->rx_err);
53979+ atomic_inc_unchecked(&vcc->stats->rx_err);
53980 return;
53981 }
53982
53983@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53984 if (length > ATM_MAX_AAL5_PDU) {
53985 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53986 __func__, length, vcc);
53987- atomic_inc(&vcc->stats->rx_err);
53988+ atomic_inc_unchecked(&vcc->stats->rx_err);
53989 goto out;
53990 }
53991
53992@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53993 if (sarb->len < pdu_length) {
53994 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53995 __func__, pdu_length, sarb->len, vcc);
53996- atomic_inc(&vcc->stats->rx_err);
53997+ atomic_inc_unchecked(&vcc->stats->rx_err);
53998 goto out;
53999 }
54000
54001 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54002 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54003 __func__, vcc);
54004- atomic_inc(&vcc->stats->rx_err);
54005+ atomic_inc_unchecked(&vcc->stats->rx_err);
54006 goto out;
54007 }
54008
54009@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54010 if (printk_ratelimit())
54011 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54012 __func__, length);
54013- atomic_inc(&vcc->stats->rx_drop);
54014+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54015 goto out;
54016 }
54017
54018@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54019
54020 vcc->push(vcc, skb);
54021
54022- atomic_inc(&vcc->stats->rx);
54023+ atomic_inc_unchecked(&vcc->stats->rx);
54024 out:
54025 skb_trim(sarb, 0);
54026 }
54027@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54028 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54029
54030 usbatm_pop(vcc, skb);
54031- atomic_inc(&vcc->stats->tx);
54032+ atomic_inc_unchecked(&vcc->stats->tx);
54033
54034 skb = skb_dequeue(&instance->sndqueue);
54035 }
54036@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54037 if (!left--)
54038 return sprintf(page,
54039 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54040- atomic_read(&atm_dev->stats.aal5.tx),
54041- atomic_read(&atm_dev->stats.aal5.tx_err),
54042- atomic_read(&atm_dev->stats.aal5.rx),
54043- atomic_read(&atm_dev->stats.aal5.rx_err),
54044- atomic_read(&atm_dev->stats.aal5.rx_drop));
54045+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54046+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54047+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54048+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54049+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54050
54051 if (!left--) {
54052 if (instance->disconnected)
54053diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54054index 2a3bbdf..91d72cf 100644
54055--- a/drivers/usb/core/devices.c
54056+++ b/drivers/usb/core/devices.c
54057@@ -126,7 +126,7 @@ static const char format_endpt[] =
54058 * time it gets called.
54059 */
54060 static struct device_connect_event {
54061- atomic_t count;
54062+ atomic_unchecked_t count;
54063 wait_queue_head_t wait;
54064 } device_event = {
54065 .count = ATOMIC_INIT(1),
54066@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54067
54068 void usbfs_conn_disc_event(void)
54069 {
54070- atomic_add(2, &device_event.count);
54071+ atomic_add_unchecked(2, &device_event.count);
54072 wake_up(&device_event.wait);
54073 }
54074
54075@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54076
54077 poll_wait(file, &device_event.wait, wait);
54078
54079- event_count = atomic_read(&device_event.count);
54080+ event_count = atomic_read_unchecked(&device_event.count);
54081 if (file->f_version != event_count) {
54082 file->f_version = event_count;
54083 return POLLIN | POLLRDNORM;
54084diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54085index e500243..401300f 100644
54086--- a/drivers/usb/core/devio.c
54087+++ b/drivers/usb/core/devio.c
54088@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54089 struct usb_dev_state *ps = file->private_data;
54090 struct usb_device *dev = ps->dev;
54091 ssize_t ret = 0;
54092- unsigned len;
54093+ size_t len;
54094 loff_t pos;
54095 int i;
54096
54097@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54098 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54099 struct usb_config_descriptor *config =
54100 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54101- unsigned int length = le16_to_cpu(config->wTotalLength);
54102+ size_t length = le16_to_cpu(config->wTotalLength);
54103
54104 if (*ppos < pos + length) {
54105
54106 /* The descriptor may claim to be longer than it
54107 * really is. Here is the actual allocated length. */
54108- unsigned alloclen =
54109+ size_t alloclen =
54110 le16_to_cpu(dev->config[i].desc.wTotalLength);
54111
54112- len = length - (*ppos - pos);
54113+ len = length + pos - *ppos;
54114 if (len > nbytes)
54115 len = nbytes;
54116
54117 /* Simply don't write (skip over) unallocated parts */
54118 if (alloclen > (*ppos - pos)) {
54119- alloclen -= (*ppos - pos);
54120+ alloclen = alloclen + pos - *ppos;
54121 if (copy_to_user(buf,
54122 dev->rawdescriptors[i] + (*ppos - pos),
54123 min(len, alloclen))) {
54124diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54125index 45a915c..09f9735 100644
54126--- a/drivers/usb/core/hcd.c
54127+++ b/drivers/usb/core/hcd.c
54128@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54129 */
54130 usb_get_urb(urb);
54131 atomic_inc(&urb->use_count);
54132- atomic_inc(&urb->dev->urbnum);
54133+ atomic_inc_unchecked(&urb->dev->urbnum);
54134 usbmon_urb_submit(&hcd->self, urb);
54135
54136 /* NOTE requirements on root-hub callers (usbfs and the hub
54137@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54138 urb->hcpriv = NULL;
54139 INIT_LIST_HEAD(&urb->urb_list);
54140 atomic_dec(&urb->use_count);
54141- atomic_dec(&urb->dev->urbnum);
54142+ atomic_dec_unchecked(&urb->dev->urbnum);
54143 if (atomic_read(&urb->reject))
54144 wake_up(&usb_kill_urb_queue);
54145 usb_put_urb(urb);
54146diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54147index b4bfa3a..008f926 100644
54148--- a/drivers/usb/core/hub.c
54149+++ b/drivers/usb/core/hub.c
54150@@ -26,6 +26,7 @@
54151 #include <linux/mutex.h>
54152 #include <linux/random.h>
54153 #include <linux/pm_qos.h>
54154+#include <linux/grsecurity.h>
54155
54156 #include <asm/uaccess.h>
54157 #include <asm/byteorder.h>
54158@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54159 goto done;
54160 return;
54161 }
54162+
54163+ if (gr_handle_new_usb())
54164+ goto done;
54165+
54166 if (hub_is_superspeed(hub->hdev))
54167 unit_load = 150;
54168 else
54169diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54170index f368d20..0c30ac5 100644
54171--- a/drivers/usb/core/message.c
54172+++ b/drivers/usb/core/message.c
54173@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54174 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54175 * error number.
54176 */
54177-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54178+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54179 __u8 requesttype, __u16 value, __u16 index, void *data,
54180 __u16 size, int timeout)
54181 {
54182@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54183 * If successful, 0. Otherwise a negative error number. The number of actual
54184 * bytes transferred will be stored in the @actual_length parameter.
54185 */
54186-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54187+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54188 void *data, int len, int *actual_length, int timeout)
54189 {
54190 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54191@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54192 * bytes transferred will be stored in the @actual_length parameter.
54193 *
54194 */
54195-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54196+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54197 void *data, int len, int *actual_length, int timeout)
54198 {
54199 struct urb *urb;
54200diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54201index d269738..7340cd7 100644
54202--- a/drivers/usb/core/sysfs.c
54203+++ b/drivers/usb/core/sysfs.c
54204@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54205 struct usb_device *udev;
54206
54207 udev = to_usb_device(dev);
54208- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54209+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54210 }
54211 static DEVICE_ATTR_RO(urbnum);
54212
54213diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54214index b1fb9ae..4224885 100644
54215--- a/drivers/usb/core/usb.c
54216+++ b/drivers/usb/core/usb.c
54217@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54218 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54219 dev->state = USB_STATE_ATTACHED;
54220 dev->lpm_disable_count = 1;
54221- atomic_set(&dev->urbnum, 0);
54222+ atomic_set_unchecked(&dev->urbnum, 0);
54223
54224 INIT_LIST_HEAD(&dev->ep0.urb_list);
54225 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54226diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54227index 8cfc319..4868255 100644
54228--- a/drivers/usb/early/ehci-dbgp.c
54229+++ b/drivers/usb/early/ehci-dbgp.c
54230@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54231
54232 #ifdef CONFIG_KGDB
54233 static struct kgdb_io kgdbdbgp_io_ops;
54234-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54235+static struct kgdb_io kgdbdbgp_io_ops_console;
54236+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54237 #else
54238 #define dbgp_kgdb_mode (0)
54239 #endif
54240@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54241 .write_char = kgdbdbgp_write_char,
54242 };
54243
54244+static struct kgdb_io kgdbdbgp_io_ops_console = {
54245+ .name = "kgdbdbgp",
54246+ .read_char = kgdbdbgp_read_char,
54247+ .write_char = kgdbdbgp_write_char,
54248+ .is_console = 1
54249+};
54250+
54251 static int kgdbdbgp_wait_time;
54252
54253 static int __init kgdbdbgp_parse_config(char *str)
54254@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54255 ptr++;
54256 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54257 }
54258- kgdb_register_io_module(&kgdbdbgp_io_ops);
54259- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54260+ if (early_dbgp_console.index != -1)
54261+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54262+ else
54263+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54264
54265 return 0;
54266 }
54267diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54268index e971584..03495ab 100644
54269--- a/drivers/usb/gadget/function/f_uac1.c
54270+++ b/drivers/usb/gadget/function/f_uac1.c
54271@@ -14,6 +14,7 @@
54272 #include <linux/module.h>
54273 #include <linux/device.h>
54274 #include <linux/atomic.h>
54275+#include <linux/module.h>
54276
54277 #include "u_uac1.h"
54278
54279diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54280index 491082a..dfd7d17 100644
54281--- a/drivers/usb/gadget/function/u_serial.c
54282+++ b/drivers/usb/gadget/function/u_serial.c
54283@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54284 spin_lock_irq(&port->port_lock);
54285
54286 /* already open? Great. */
54287- if (port->port.count) {
54288+ if (atomic_read(&port->port.count)) {
54289 status = 0;
54290- port->port.count++;
54291+ atomic_inc(&port->port.count);
54292
54293 /* currently opening/closing? wait ... */
54294 } else if (port->openclose) {
54295@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54296 tty->driver_data = port;
54297 port->port.tty = tty;
54298
54299- port->port.count = 1;
54300+ atomic_set(&port->port.count, 1);
54301 port->openclose = false;
54302
54303 /* if connected, start the I/O stream */
54304@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54305
54306 spin_lock_irq(&port->port_lock);
54307
54308- if (port->port.count != 1) {
54309- if (port->port.count == 0)
54310+ if (atomic_read(&port->port.count) != 1) {
54311+ if (atomic_read(&port->port.count) == 0)
54312 WARN_ON(1);
54313 else
54314- --port->port.count;
54315+ atomic_dec(&port->port.count);
54316 goto exit;
54317 }
54318
54319@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54320 * and sleep if necessary
54321 */
54322 port->openclose = true;
54323- port->port.count = 0;
54324+ atomic_set(&port->port.count, 0);
54325
54326 gser = port->port_usb;
54327 if (gser && gser->disconnect)
54328@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54329 int cond;
54330
54331 spin_lock_irq(&port->port_lock);
54332- cond = (port->port.count == 0) && !port->openclose;
54333+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54334 spin_unlock_irq(&port->port_lock);
54335 return cond;
54336 }
54337@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54338 /* if it's already open, start I/O ... and notify the serial
54339 * protocol about open/close status (connect/disconnect).
54340 */
54341- if (port->port.count) {
54342+ if (atomic_read(&port->port.count)) {
54343 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54344 gs_start_io(port);
54345 if (gser->connect)
54346@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54347
54348 port->port_usb = NULL;
54349 gser->ioport = NULL;
54350- if (port->port.count > 0 || port->openclose) {
54351+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54352 wake_up_interruptible(&port->drain_wait);
54353 if (port->port.tty)
54354 tty_hangup(port->port.tty);
54355@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54356
54357 /* finally, free any unused/unusable I/O buffers */
54358 spin_lock_irqsave(&port->port_lock, flags);
54359- if (port->port.count == 0 && !port->openclose)
54360+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54361 gs_buf_free(&port->port_write_buf);
54362 gs_free_requests(gser->out, &port->read_pool, NULL);
54363 gs_free_requests(gser->out, &port->read_queue, NULL);
54364diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54365index 53842a1..2bef3b6 100644
54366--- a/drivers/usb/gadget/function/u_uac1.c
54367+++ b/drivers/usb/gadget/function/u_uac1.c
54368@@ -17,6 +17,7 @@
54369 #include <linux/ctype.h>
54370 #include <linux/random.h>
54371 #include <linux/syscalls.h>
54372+#include <linux/module.h>
54373
54374 #include "u_uac1.h"
54375
54376diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54377index 118edb7..7a6415f 100644
54378--- a/drivers/usb/host/ehci-hub.c
54379+++ b/drivers/usb/host/ehci-hub.c
54380@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54381 urb->transfer_flags = URB_DIR_IN;
54382 usb_get_urb(urb);
54383 atomic_inc(&urb->use_count);
54384- atomic_inc(&urb->dev->urbnum);
54385+ atomic_inc_unchecked(&urb->dev->urbnum);
54386 urb->setup_dma = dma_map_single(
54387 hcd->self.controller,
54388 urb->setup_packet,
54389@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54390 urb->status = -EINPROGRESS;
54391 usb_get_urb(urb);
54392 atomic_inc(&urb->use_count);
54393- atomic_inc(&urb->dev->urbnum);
54394+ atomic_inc_unchecked(&urb->dev->urbnum);
54395 retval = submit_single_step_set_feature(hcd, urb, 0);
54396 if (!retval && !wait_for_completion_timeout(&done,
54397 msecs_to_jiffies(2000))) {
54398diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54399index 1db0626..4948782 100644
54400--- a/drivers/usb/host/hwa-hc.c
54401+++ b/drivers/usb/host/hwa-hc.c
54402@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54403 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54404 struct wahc *wa = &hwahc->wa;
54405 struct device *dev = &wa->usb_iface->dev;
54406- u8 mas_le[UWB_NUM_MAS/8];
54407+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54408+
54409+ if (mas_le == NULL)
54410+ return -ENOMEM;
54411
54412 /* Set the stream index */
54413 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54414@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54415 WUSB_REQ_SET_WUSB_MAS,
54416 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54417 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54418- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54419+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54420 if (result < 0)
54421 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54422 out:
54423+ kfree(mas_le);
54424+
54425 return result;
54426 }
54427
54428diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54429index b3d245e..99549ed 100644
54430--- a/drivers/usb/misc/appledisplay.c
54431+++ b/drivers/usb/misc/appledisplay.c
54432@@ -84,7 +84,7 @@ struct appledisplay {
54433 struct mutex sysfslock; /* concurrent read and write */
54434 };
54435
54436-static atomic_t count_displays = ATOMIC_INIT(0);
54437+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54438 static struct workqueue_struct *wq;
54439
54440 static void appledisplay_complete(struct urb *urb)
54441@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54442
54443 /* Register backlight device */
54444 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54445- atomic_inc_return(&count_displays) - 1);
54446+ atomic_inc_return_unchecked(&count_displays) - 1);
54447 memset(&props, 0, sizeof(struct backlight_properties));
54448 props.type = BACKLIGHT_RAW;
54449 props.max_brightness = 0xff;
54450diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54451index 29fa1c3..a57b08e 100644
54452--- a/drivers/usb/serial/console.c
54453+++ b/drivers/usb/serial/console.c
54454@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54455
54456 info->port = port;
54457
54458- ++port->port.count;
54459+ atomic_inc(&port->port.count);
54460 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54461 if (serial->type->set_termios) {
54462 /*
54463@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54464 }
54465 /* Now that any required fake tty operations are completed restore
54466 * the tty port count */
54467- --port->port.count;
54468+ atomic_dec(&port->port.count);
54469 /* The console is special in terms of closing the device so
54470 * indicate this port is now acting as a system console. */
54471 port->port.console = 1;
54472@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54473 put_tty:
54474 tty_kref_put(tty);
54475 reset_open_count:
54476- port->port.count = 0;
54477+ atomic_set(&port->port.count, 0);
54478 usb_autopm_put_interface(serial->interface);
54479 error_get_interface:
54480 usb_serial_put(serial);
54481@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54482 static void usb_console_write(struct console *co,
54483 const char *buf, unsigned count)
54484 {
54485- static struct usbcons_info *info = &usbcons_info;
54486+ struct usbcons_info *info = &usbcons_info;
54487 struct usb_serial_port *port = info->port;
54488 struct usb_serial *serial;
54489 int retval = -ENODEV;
54490diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54491index 307e339..6aa97cb 100644
54492--- a/drivers/usb/storage/usb.h
54493+++ b/drivers/usb/storage/usb.h
54494@@ -63,7 +63,7 @@ struct us_unusual_dev {
54495 __u8 useProtocol;
54496 __u8 useTransport;
54497 int (*initFunction)(struct us_data *);
54498-};
54499+} __do_const;
54500
54501
54502 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54503diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54504index a863a98..d272795 100644
54505--- a/drivers/usb/usbip/vhci.h
54506+++ b/drivers/usb/usbip/vhci.h
54507@@ -83,7 +83,7 @@ struct vhci_hcd {
54508 unsigned resuming:1;
54509 unsigned long re_timeout;
54510
54511- atomic_t seqnum;
54512+ atomic_unchecked_t seqnum;
54513
54514 /*
54515 * NOTE:
54516diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54517index 1ae9d40..c62604b 100644
54518--- a/drivers/usb/usbip/vhci_hcd.c
54519+++ b/drivers/usb/usbip/vhci_hcd.c
54520@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54521
54522 spin_lock(&vdev->priv_lock);
54523
54524- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54525+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54526 if (priv->seqnum == 0xffff)
54527 dev_info(&urb->dev->dev, "seqnum max\n");
54528
54529@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54530 return -ENOMEM;
54531 }
54532
54533- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54534+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54535 if (unlink->seqnum == 0xffff)
54536 pr_info("seqnum max\n");
54537
54538@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54539 vdev->rhport = rhport;
54540 }
54541
54542- atomic_set(&vhci->seqnum, 0);
54543+ atomic_set_unchecked(&vhci->seqnum, 0);
54544 spin_lock_init(&vhci->lock);
54545
54546 hcd->power_budget = 0; /* no limit */
54547diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54548index 00e4a54..d676f85 100644
54549--- a/drivers/usb/usbip/vhci_rx.c
54550+++ b/drivers/usb/usbip/vhci_rx.c
54551@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54552 if (!urb) {
54553 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54554 pr_info("max seqnum %d\n",
54555- atomic_read(&the_controller->seqnum));
54556+ atomic_read_unchecked(&the_controller->seqnum));
54557 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54558 return;
54559 }
54560diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54561index edc7267..9f65ce2 100644
54562--- a/drivers/usb/wusbcore/wa-hc.h
54563+++ b/drivers/usb/wusbcore/wa-hc.h
54564@@ -240,7 +240,7 @@ struct wahc {
54565 spinlock_t xfer_list_lock;
54566 struct work_struct xfer_enqueue_work;
54567 struct work_struct xfer_error_work;
54568- atomic_t xfer_id_count;
54569+ atomic_unchecked_t xfer_id_count;
54570
54571 kernel_ulong_t quirks;
54572 };
54573@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54574 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54575 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54576 wa->dto_in_use = 0;
54577- atomic_set(&wa->xfer_id_count, 1);
54578+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54579 /* init the buf in URBs */
54580 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54581 usb_init_urb(&(wa->buf_in_urbs[index]));
54582diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54583index 69af4fd..da390d7 100644
54584--- a/drivers/usb/wusbcore/wa-xfer.c
54585+++ b/drivers/usb/wusbcore/wa-xfer.c
54586@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54587 */
54588 static void wa_xfer_id_init(struct wa_xfer *xfer)
54589 {
54590- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54591+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54592 }
54593
54594 /* Return the xfer's ID. */
54595diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54596index f018d8d..ccab63f 100644
54597--- a/drivers/vfio/vfio.c
54598+++ b/drivers/vfio/vfio.c
54599@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54600 return 0;
54601
54602 /* TODO Prevent device auto probing */
54603- WARN("Device %s added to live group %d!\n", dev_name(dev),
54604+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54605 iommu_group_id(group->iommu_group));
54606
54607 return 0;
54608diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54609index 9484d56..d415d69 100644
54610--- a/drivers/vhost/net.c
54611+++ b/drivers/vhost/net.c
54612@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54613 break;
54614 }
54615 /* TODO: Should check and handle checksum. */
54616-
54617- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54618 if (likely(mergeable) &&
54619- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54620+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54621 offsetof(typeof(hdr), num_buffers),
54622 sizeof hdr.num_buffers)) {
54623 vq_err(vq, "Failed num_buffers write");
54624diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54625index 3bb02c6..a01ff38 100644
54626--- a/drivers/vhost/vringh.c
54627+++ b/drivers/vhost/vringh.c
54628@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54629 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54630 {
54631 __virtio16 v = 0;
54632- int rc = get_user(v, (__force __virtio16 __user *)p);
54633+ int rc = get_user(v, (__force_user __virtio16 *)p);
54634 *val = vringh16_to_cpu(vrh, v);
54635 return rc;
54636 }
54637@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54638 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54639 {
54640 __virtio16 v = cpu_to_vringh16(vrh, val);
54641- return put_user(v, (__force __virtio16 __user *)p);
54642+ return put_user(v, (__force_user __virtio16 *)p);
54643 }
54644
54645 static inline int copydesc_user(void *dst, const void *src, size_t len)
54646 {
54647- return copy_from_user(dst, (__force void __user *)src, len) ?
54648+ return copy_from_user(dst, (void __force_user *)src, len) ?
54649 -EFAULT : 0;
54650 }
54651
54652@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54653 const struct vring_used_elem *src,
54654 unsigned int num)
54655 {
54656- return copy_to_user((__force void __user *)dst, src,
54657+ return copy_to_user((void __force_user *)dst, src,
54658 sizeof(*dst) * num) ? -EFAULT : 0;
54659 }
54660
54661 static inline int xfer_from_user(void *src, void *dst, size_t len)
54662 {
54663- return copy_from_user(dst, (__force void __user *)src, len) ?
54664+ return copy_from_user(dst, (void __force_user *)src, len) ?
54665 -EFAULT : 0;
54666 }
54667
54668 static inline int xfer_to_user(void *dst, void *src, size_t len)
54669 {
54670- return copy_to_user((__force void __user *)dst, src, len) ?
54671+ return copy_to_user((void __force_user *)dst, src, len) ?
54672 -EFAULT : 0;
54673 }
54674
54675@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54676 vrh->last_used_idx = 0;
54677 vrh->vring.num = num;
54678 /* vring expects kernel addresses, but only used via accessors. */
54679- vrh->vring.desc = (__force struct vring_desc *)desc;
54680- vrh->vring.avail = (__force struct vring_avail *)avail;
54681- vrh->vring.used = (__force struct vring_used *)used;
54682+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54683+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54684+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54685 return 0;
54686 }
54687 EXPORT_SYMBOL(vringh_init_user);
54688@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54689
54690 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54691 {
54692- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54693+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54694 return 0;
54695 }
54696
54697diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54698index 84a110a..96312c3 100644
54699--- a/drivers/video/backlight/kb3886_bl.c
54700+++ b/drivers/video/backlight/kb3886_bl.c
54701@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54702 static unsigned long kb3886bl_flags;
54703 #define KB3886BL_SUSPENDED 0x01
54704
54705-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54706+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54707 {
54708 .ident = "Sahara Touch-iT",
54709 .matches = {
54710diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54711index 1b0b233..6f34c2c 100644
54712--- a/drivers/video/fbdev/arcfb.c
54713+++ b/drivers/video/fbdev/arcfb.c
54714@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54715 return -ENOSPC;
54716
54717 err = 0;
54718- if ((count + p) > fbmemlength) {
54719+ if (count > (fbmemlength - p)) {
54720 count = fbmemlength - p;
54721 err = -ENOSPC;
54722 }
54723diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54724index aedf2fb..47c9aca 100644
54725--- a/drivers/video/fbdev/aty/aty128fb.c
54726+++ b/drivers/video/fbdev/aty/aty128fb.c
54727@@ -149,7 +149,7 @@ enum {
54728 };
54729
54730 /* Must match above enum */
54731-static char * const r128_family[] = {
54732+static const char * const r128_family[] = {
54733 "AGP",
54734 "PCI",
54735 "PRO AGP",
54736diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54737index 37ec09b..98f8862 100644
54738--- a/drivers/video/fbdev/aty/atyfb_base.c
54739+++ b/drivers/video/fbdev/aty/atyfb_base.c
54740@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54741 par->accel_flags = var->accel_flags; /* hack */
54742
54743 if (var->accel_flags) {
54744- info->fbops->fb_sync = atyfb_sync;
54745+ pax_open_kernel();
54746+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54747+ pax_close_kernel();
54748 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54749 } else {
54750- info->fbops->fb_sync = NULL;
54751+ pax_open_kernel();
54752+ *(void **)&info->fbops->fb_sync = NULL;
54753+ pax_close_kernel();
54754 info->flags |= FBINFO_HWACCEL_DISABLED;
54755 }
54756
54757diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54758index 2fa0317..4983f2a 100644
54759--- a/drivers/video/fbdev/aty/mach64_cursor.c
54760+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54761@@ -8,6 +8,7 @@
54762 #include "../core/fb_draw.h"
54763
54764 #include <asm/io.h>
54765+#include <asm/pgtable.h>
54766
54767 #ifdef __sparc__
54768 #include <asm/fbio.h>
54769@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54770 info->sprite.buf_align = 16; /* and 64 lines tall. */
54771 info->sprite.flags = FB_PIXMAP_IO;
54772
54773- info->fbops->fb_cursor = atyfb_cursor;
54774+ pax_open_kernel();
54775+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54776+ pax_close_kernel();
54777
54778 return 0;
54779 }
54780diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54781index d6cab1f..112f680 100644
54782--- a/drivers/video/fbdev/core/fb_defio.c
54783+++ b/drivers/video/fbdev/core/fb_defio.c
54784@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54785
54786 BUG_ON(!fbdefio);
54787 mutex_init(&fbdefio->lock);
54788- info->fbops->fb_mmap = fb_deferred_io_mmap;
54789+ pax_open_kernel();
54790+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54791+ pax_close_kernel();
54792 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54793 INIT_LIST_HEAD(&fbdefio->pagelist);
54794 if (fbdefio->delay == 0) /* set a default of 1 s */
54795@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54796 page->mapping = NULL;
54797 }
54798
54799- info->fbops->fb_mmap = NULL;
54800+ *(void **)&info->fbops->fb_mmap = NULL;
54801 mutex_destroy(&fbdefio->lock);
54802 }
54803 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54804diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54805index 0705d88..d9429bf 100644
54806--- a/drivers/video/fbdev/core/fbmem.c
54807+++ b/drivers/video/fbdev/core/fbmem.c
54808@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54809 __u32 data;
54810 int err;
54811
54812- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54813+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54814
54815 data = (__u32) (unsigned long) fix->smem_start;
54816 err |= put_user(data, &fix32->smem_start);
54817diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54818index 4254336..282567e 100644
54819--- a/drivers/video/fbdev/hyperv_fb.c
54820+++ b/drivers/video/fbdev/hyperv_fb.c
54821@@ -240,7 +240,7 @@ static uint screen_fb_size;
54822 static inline int synthvid_send(struct hv_device *hdev,
54823 struct synthvid_msg *msg)
54824 {
54825- static atomic64_t request_id = ATOMIC64_INIT(0);
54826+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54827 int ret;
54828
54829 msg->pipe_hdr.type = PIPE_MSG_DATA;
54830@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54831
54832 ret = vmbus_sendpacket(hdev->channel, msg,
54833 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54834- atomic64_inc_return(&request_id),
54835+ atomic64_inc_return_unchecked(&request_id),
54836 VM_PKT_DATA_INBAND, 0);
54837
54838 if (ret)
54839diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54840index 7672d2e..b56437f 100644
54841--- a/drivers/video/fbdev/i810/i810_accel.c
54842+++ b/drivers/video/fbdev/i810/i810_accel.c
54843@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54844 }
54845 }
54846 printk("ringbuffer lockup!!!\n");
54847+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54848 i810_report_error(mmio);
54849 par->dev_flags |= LOCKUP;
54850 info->pixmap.scan_align = 1;
54851diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54852index a01147f..5d896f8 100644
54853--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54854+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54855@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54856
54857 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54858 struct matrox_switch matrox_mystique = {
54859- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54860+ .preinit = MGA1064_preinit,
54861+ .reset = MGA1064_reset,
54862+ .init = MGA1064_init,
54863+ .restore = MGA1064_restore,
54864 };
54865 EXPORT_SYMBOL(matrox_mystique);
54866 #endif
54867
54868 #ifdef CONFIG_FB_MATROX_G
54869 struct matrox_switch matrox_G100 = {
54870- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54871+ .preinit = MGAG100_preinit,
54872+ .reset = MGAG100_reset,
54873+ .init = MGAG100_init,
54874+ .restore = MGAG100_restore,
54875 };
54876 EXPORT_SYMBOL(matrox_G100);
54877 #endif
54878diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54879index 195ad7c..09743fc 100644
54880--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54881+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54882@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54883 }
54884
54885 struct matrox_switch matrox_millennium = {
54886- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54887+ .preinit = Ti3026_preinit,
54888+ .reset = Ti3026_reset,
54889+ .init = Ti3026_init,
54890+ .restore = Ti3026_restore
54891 };
54892 EXPORT_SYMBOL(matrox_millennium);
54893 #endif
54894diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54895index fe92eed..106e085 100644
54896--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54897+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54898@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54899 struct mb862xxfb_par *par = info->par;
54900
54901 if (info->var.bits_per_pixel == 32) {
54902- info->fbops->fb_fillrect = cfb_fillrect;
54903- info->fbops->fb_copyarea = cfb_copyarea;
54904- info->fbops->fb_imageblit = cfb_imageblit;
54905+ pax_open_kernel();
54906+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54907+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54908+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54909+ pax_close_kernel();
54910 } else {
54911 outreg(disp, GC_L0EM, 3);
54912- info->fbops->fb_fillrect = mb86290fb_fillrect;
54913- info->fbops->fb_copyarea = mb86290fb_copyarea;
54914- info->fbops->fb_imageblit = mb86290fb_imageblit;
54915+ pax_open_kernel();
54916+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54917+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54918+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54919+ pax_close_kernel();
54920 }
54921 outreg(draw, GDC_REG_DRAW_BASE, 0);
54922 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54923diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54924index def0412..fed6529 100644
54925--- a/drivers/video/fbdev/nvidia/nvidia.c
54926+++ b/drivers/video/fbdev/nvidia/nvidia.c
54927@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54928 info->fix.line_length = (info->var.xres_virtual *
54929 info->var.bits_per_pixel) >> 3;
54930 if (info->var.accel_flags) {
54931- info->fbops->fb_imageblit = nvidiafb_imageblit;
54932- info->fbops->fb_fillrect = nvidiafb_fillrect;
54933- info->fbops->fb_copyarea = nvidiafb_copyarea;
54934- info->fbops->fb_sync = nvidiafb_sync;
54935+ pax_open_kernel();
54936+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54937+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54938+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54939+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54940+ pax_close_kernel();
54941 info->pixmap.scan_align = 4;
54942 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54943 info->flags |= FBINFO_READS_FAST;
54944 NVResetGraphics(info);
54945 } else {
54946- info->fbops->fb_imageblit = cfb_imageblit;
54947- info->fbops->fb_fillrect = cfb_fillrect;
54948- info->fbops->fb_copyarea = cfb_copyarea;
54949- info->fbops->fb_sync = NULL;
54950+ pax_open_kernel();
54951+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54952+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54953+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54954+ *(void **)&info->fbops->fb_sync = NULL;
54955+ pax_close_kernel();
54956 info->pixmap.scan_align = 1;
54957 info->flags |= FBINFO_HWACCEL_DISABLED;
54958 info->flags &= ~FBINFO_READS_FAST;
54959@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54960 info->pixmap.size = 8 * 1024;
54961 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54962
54963- if (!hwcur)
54964- info->fbops->fb_cursor = NULL;
54965+ if (!hwcur) {
54966+ pax_open_kernel();
54967+ *(void **)&info->fbops->fb_cursor = NULL;
54968+ pax_close_kernel();
54969+ }
54970
54971 info->var.accel_flags = (!noaccel);
54972
54973diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54974index 2412a0d..294215b 100644
54975--- a/drivers/video/fbdev/omap2/dss/display.c
54976+++ b/drivers/video/fbdev/omap2/dss/display.c
54977@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54978 if (dssdev->name == NULL)
54979 dssdev->name = dssdev->alias;
54980
54981+ pax_open_kernel();
54982 if (drv && drv->get_resolution == NULL)
54983- drv->get_resolution = omapdss_default_get_resolution;
54984+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54985 if (drv && drv->get_recommended_bpp == NULL)
54986- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54987+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54988 if (drv && drv->get_timings == NULL)
54989- drv->get_timings = omapdss_default_get_timings;
54990+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54991+ pax_close_kernel();
54992
54993 mutex_lock(&panel_list_mutex);
54994 list_add_tail(&dssdev->panel_list, &panel_list);
54995diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54996index 83433cb..71e9b98 100644
54997--- a/drivers/video/fbdev/s1d13xxxfb.c
54998+++ b/drivers/video/fbdev/s1d13xxxfb.c
54999@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55000
55001 switch(prod_id) {
55002 case S1D13506_PROD_ID: /* activate acceleration */
55003- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55004- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55005+ pax_open_kernel();
55006+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55007+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55008+ pax_close_kernel();
55009 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55010 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55011 break;
55012diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55013index d3013cd..95b8285 100644
55014--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55015+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55016@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55017 }
55018
55019 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55020- lcdc_sys_write_index,
55021- lcdc_sys_write_data,
55022- lcdc_sys_read_data,
55023+ .write_index = lcdc_sys_write_index,
55024+ .write_data = lcdc_sys_write_data,
55025+ .read_data = lcdc_sys_read_data,
55026 };
55027
55028 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55029diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55030index 9279e5f..d5f5276 100644
55031--- a/drivers/video/fbdev/smscufx.c
55032+++ b/drivers/video/fbdev/smscufx.c
55033@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55034 fb_deferred_io_cleanup(info);
55035 kfree(info->fbdefio);
55036 info->fbdefio = NULL;
55037- info->fbops->fb_mmap = ufx_ops_mmap;
55038+ pax_open_kernel();
55039+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55040+ pax_close_kernel();
55041 }
55042
55043 pr_debug("released /dev/fb%d user=%d count=%d",
55044diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55045index ff2b873..626a8d5 100644
55046--- a/drivers/video/fbdev/udlfb.c
55047+++ b/drivers/video/fbdev/udlfb.c
55048@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55049 dlfb_urb_completion(urb);
55050
55051 error:
55052- atomic_add(bytes_sent, &dev->bytes_sent);
55053- atomic_add(bytes_identical, &dev->bytes_identical);
55054- atomic_add(width*height*2, &dev->bytes_rendered);
55055+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55056+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55057+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55058 end_cycles = get_cycles();
55059- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55060+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55061 >> 10)), /* Kcycles */
55062 &dev->cpu_kcycles_used);
55063
55064@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55065 dlfb_urb_completion(urb);
55066
55067 error:
55068- atomic_add(bytes_sent, &dev->bytes_sent);
55069- atomic_add(bytes_identical, &dev->bytes_identical);
55070- atomic_add(bytes_rendered, &dev->bytes_rendered);
55071+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55072+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55073+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55074 end_cycles = get_cycles();
55075- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55076+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55077 >> 10)), /* Kcycles */
55078 &dev->cpu_kcycles_used);
55079 }
55080@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55081 fb_deferred_io_cleanup(info);
55082 kfree(info->fbdefio);
55083 info->fbdefio = NULL;
55084- info->fbops->fb_mmap = dlfb_ops_mmap;
55085+ pax_open_kernel();
55086+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55087+ pax_close_kernel();
55088 }
55089
55090 pr_warn("released /dev/fb%d user=%d count=%d\n",
55091@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55092 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55093 struct dlfb_data *dev = fb_info->par;
55094 return snprintf(buf, PAGE_SIZE, "%u\n",
55095- atomic_read(&dev->bytes_rendered));
55096+ atomic_read_unchecked(&dev->bytes_rendered));
55097 }
55098
55099 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55100@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55101 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55102 struct dlfb_data *dev = fb_info->par;
55103 return snprintf(buf, PAGE_SIZE, "%u\n",
55104- atomic_read(&dev->bytes_identical));
55105+ atomic_read_unchecked(&dev->bytes_identical));
55106 }
55107
55108 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55109@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55110 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55111 struct dlfb_data *dev = fb_info->par;
55112 return snprintf(buf, PAGE_SIZE, "%u\n",
55113- atomic_read(&dev->bytes_sent));
55114+ atomic_read_unchecked(&dev->bytes_sent));
55115 }
55116
55117 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55118@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55119 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55120 struct dlfb_data *dev = fb_info->par;
55121 return snprintf(buf, PAGE_SIZE, "%u\n",
55122- atomic_read(&dev->cpu_kcycles_used));
55123+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55124 }
55125
55126 static ssize_t edid_show(
55127@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55128 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55129 struct dlfb_data *dev = fb_info->par;
55130
55131- atomic_set(&dev->bytes_rendered, 0);
55132- atomic_set(&dev->bytes_identical, 0);
55133- atomic_set(&dev->bytes_sent, 0);
55134- atomic_set(&dev->cpu_kcycles_used, 0);
55135+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55136+ atomic_set_unchecked(&dev->bytes_identical, 0);
55137+ atomic_set_unchecked(&dev->bytes_sent, 0);
55138+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55139
55140 return count;
55141 }
55142diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55143index d32d1c4..46722e6 100644
55144--- a/drivers/video/fbdev/uvesafb.c
55145+++ b/drivers/video/fbdev/uvesafb.c
55146@@ -19,6 +19,7 @@
55147 #include <linux/io.h>
55148 #include <linux/mutex.h>
55149 #include <linux/slab.h>
55150+#include <linux/moduleloader.h>
55151 #include <video/edid.h>
55152 #include <video/uvesafb.h>
55153 #ifdef CONFIG_X86
55154@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55155 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55156 par->pmi_setpal = par->ypan = 0;
55157 } else {
55158+
55159+#ifdef CONFIG_PAX_KERNEXEC
55160+#ifdef CONFIG_MODULES
55161+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55162+#endif
55163+ if (!par->pmi_code) {
55164+ par->pmi_setpal = par->ypan = 0;
55165+ return 0;
55166+ }
55167+#endif
55168+
55169 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55170 + task->t.regs.edi);
55171+
55172+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55173+ pax_open_kernel();
55174+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55175+ pax_close_kernel();
55176+
55177+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55178+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55179+#else
55180 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55181 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55182+#endif
55183+
55184 printk(KERN_INFO "uvesafb: protected mode interface info at "
55185 "%04x:%04x\n",
55186 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55187@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55188 par->ypan = ypan;
55189
55190 if (par->pmi_setpal || par->ypan) {
55191+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55192 if (__supported_pte_mask & _PAGE_NX) {
55193 par->pmi_setpal = par->ypan = 0;
55194 printk(KERN_WARNING "uvesafb: NX protection is active, "
55195 "better not use the PMI.\n");
55196- } else {
55197+ } else
55198+#endif
55199 uvesafb_vbe_getpmi(task, par);
55200- }
55201 }
55202 #else
55203 /* The protected mode interface is not available on non-x86. */
55204@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55205 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55206
55207 /* Disable blanking if the user requested so. */
55208- if (!blank)
55209- info->fbops->fb_blank = NULL;
55210+ if (!blank) {
55211+ pax_open_kernel();
55212+ *(void **)&info->fbops->fb_blank = NULL;
55213+ pax_close_kernel();
55214+ }
55215
55216 /*
55217 * Find out how much IO memory is required for the mode with
55218@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55219 info->flags = FBINFO_FLAG_DEFAULT |
55220 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55221
55222- if (!par->ypan)
55223- info->fbops->fb_pan_display = NULL;
55224+ if (!par->ypan) {
55225+ pax_open_kernel();
55226+ *(void **)&info->fbops->fb_pan_display = NULL;
55227+ pax_close_kernel();
55228+ }
55229 }
55230
55231 static void uvesafb_init_mtrr(struct fb_info *info)
55232@@ -1786,6 +1816,11 @@ out_mode:
55233 out:
55234 kfree(par->vbe_modes);
55235
55236+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55237+ if (par->pmi_code)
55238+ module_memfree_exec(par->pmi_code);
55239+#endif
55240+
55241 framebuffer_release(info);
55242 return err;
55243 }
55244@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55245 kfree(par->vbe_state_orig);
55246 kfree(par->vbe_state_saved);
55247
55248+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55249+ if (par->pmi_code)
55250+ module_memfree_exec(par->pmi_code);
55251+#endif
55252+
55253 framebuffer_release(info);
55254 }
55255 return 0;
55256diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55257index d79a0ac..2d0c3d4 100644
55258--- a/drivers/video/fbdev/vesafb.c
55259+++ b/drivers/video/fbdev/vesafb.c
55260@@ -9,6 +9,7 @@
55261 */
55262
55263 #include <linux/module.h>
55264+#include <linux/moduleloader.h>
55265 #include <linux/kernel.h>
55266 #include <linux/errno.h>
55267 #include <linux/string.h>
55268@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55269 static int vram_total; /* Set total amount of memory */
55270 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55271 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55272-static void (*pmi_start)(void) __read_mostly;
55273-static void (*pmi_pal) (void) __read_mostly;
55274+static void (*pmi_start)(void) __read_only;
55275+static void (*pmi_pal) (void) __read_only;
55276 static int depth __read_mostly;
55277 static int vga_compat __read_mostly;
55278 /* --------------------------------------------------------------------- */
55279@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55280 unsigned int size_remap;
55281 unsigned int size_total;
55282 char *option = NULL;
55283+ void *pmi_code = NULL;
55284
55285 /* ignore error return of fb_get_options */
55286 fb_get_options("vesafb", &option);
55287@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55288 size_remap = size_total;
55289 vesafb_fix.smem_len = size_remap;
55290
55291-#ifndef __i386__
55292- screen_info.vesapm_seg = 0;
55293-#endif
55294-
55295 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55296 printk(KERN_WARNING
55297 "vesafb: cannot reserve video memory at 0x%lx\n",
55298@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55299 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55300 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55301
55302+#ifdef __i386__
55303+
55304+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55305+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55306+ if (!pmi_code)
55307+#elif !defined(CONFIG_PAX_KERNEXEC)
55308+ if (0)
55309+#endif
55310+
55311+#endif
55312+ screen_info.vesapm_seg = 0;
55313+
55314 if (screen_info.vesapm_seg) {
55315- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55316- screen_info.vesapm_seg,screen_info.vesapm_off);
55317+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55318+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55319 }
55320
55321 if (screen_info.vesapm_seg < 0xc000)
55322@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55323
55324 if (ypan || pmi_setpal) {
55325 unsigned short *pmi_base;
55326+
55327 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55328- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55329- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55330+
55331+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55332+ pax_open_kernel();
55333+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55334+#else
55335+ pmi_code = pmi_base;
55336+#endif
55337+
55338+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55339+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55340+
55341+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55342+ pmi_start = ktva_ktla(pmi_start);
55343+ pmi_pal = ktva_ktla(pmi_pal);
55344+ pax_close_kernel();
55345+#endif
55346+
55347 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55348 if (pmi_base[3]) {
55349 printk(KERN_INFO "vesafb: pmi: ports = ");
55350@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55351 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55352 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55353
55354- if (!ypan)
55355- info->fbops->fb_pan_display = NULL;
55356+ if (!ypan) {
55357+ pax_open_kernel();
55358+ *(void **)&info->fbops->fb_pan_display = NULL;
55359+ pax_close_kernel();
55360+ }
55361
55362 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55363 err = -ENOMEM;
55364@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55365 fb_info(info, "%s frame buffer device\n", info->fix.id);
55366 return 0;
55367 err:
55368+
55369+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55370+ module_memfree_exec(pmi_code);
55371+#endif
55372+
55373 if (info->screen_base)
55374 iounmap(info->screen_base);
55375 framebuffer_release(info);
55376diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55377index 88714ae..16c2e11 100644
55378--- a/drivers/video/fbdev/via/via_clock.h
55379+++ b/drivers/video/fbdev/via/via_clock.h
55380@@ -56,7 +56,7 @@ struct via_clock {
55381
55382 void (*set_engine_pll_state)(u8 state);
55383 void (*set_engine_pll)(struct via_pll_config config);
55384-};
55385+} __no_const;
55386
55387
55388 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55389diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55390index 3c14e43..2630570 100644
55391--- a/drivers/video/logo/logo_linux_clut224.ppm
55392+++ b/drivers/video/logo/logo_linux_clut224.ppm
55393@@ -2,1603 +2,1123 @@ P3
55394 # Standard 224-color Linux logo
55395 80 80
55396 255
55397- 0 0 0 0 0 0 0 0 0 0 0 0
55398- 0 0 0 0 0 0 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 6 6 6 6 6 6 10 10 10 10 10 10
55407- 10 10 10 6 6 6 6 6 6 6 6 6
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 0 0 0 0 0 0 0 0 0 0
55411- 0 0 0 0 0 0 0 0 0 0 0 0
55412- 0 0 0 0 0 0 0 0 0 0 0 0
55413- 0 0 0 0 0 0 0 0 0 0 0 0
55414- 0 0 0 0 0 0 0 0 0 0 0 0
55415- 0 0 0 0 0 0 0 0 0 0 0 0
55416- 0 0 0 0 0 0 0 0 0 0 0 0
55417- 0 0 0 0 0 0 0 0 0 0 0 0
55418- 0 0 0 0 0 0 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 6 6 6 10 10 10 14 14 14
55426- 22 22 22 26 26 26 30 30 30 34 34 34
55427- 30 30 30 30 30 30 26 26 26 18 18 18
55428- 14 14 14 10 10 10 6 6 6 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 0 0 0 0 0 0 0 0 0 0
55431- 0 0 0 0 0 0 0 0 0 0 0 0
55432- 0 0 0 0 0 0 0 0 0 0 0 0
55433- 0 0 0 0 0 0 0 0 0 0 0 0
55434- 0 0 0 0 0 0 0 0 0 0 0 0
55435- 0 0 0 0 0 0 0 0 0 0 0 0
55436- 0 0 0 0 0 0 0 0 0 0 0 0
55437- 0 0 0 0 0 0 0 0 0 0 0 0
55438- 0 0 0 0 0 1 0 0 1 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 0 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 6 6 6 14 14 14 26 26 26 42 42 42
55446- 54 54 54 66 66 66 78 78 78 78 78 78
55447- 78 78 78 74 74 74 66 66 66 54 54 54
55448- 42 42 42 26 26 26 18 18 18 10 10 10
55449- 6 6 6 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 0 0 0
55451- 0 0 0 0 0 0 0 0 0 0 0 0
55452- 0 0 0 0 0 0 0 0 0 0 0 0
55453- 0 0 0 0 0 0 0 0 0 0 0 0
55454- 0 0 0 0 0 0 0 0 0 0 0 0
55455- 0 0 0 0 0 0 0 0 0 0 0 0
55456- 0 0 0 0 0 0 0 0 0 0 0 0
55457- 0 0 0 0 0 0 0 0 0 0 0 0
55458- 0 0 1 0 0 0 0 0 0 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 0 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 10 10 10
55465- 22 22 22 42 42 42 66 66 66 86 86 86
55466- 66 66 66 38 38 38 38 38 38 22 22 22
55467- 26 26 26 34 34 34 54 54 54 66 66 66
55468- 86 86 86 70 70 70 46 46 46 26 26 26
55469- 14 14 14 6 6 6 0 0 0 0 0 0
55470- 0 0 0 0 0 0 0 0 0 0 0 0
55471- 0 0 0 0 0 0 0 0 0 0 0 0
55472- 0 0 0 0 0 0 0 0 0 0 0 0
55473- 0 0 0 0 0 0 0 0 0 0 0 0
55474- 0 0 0 0 0 0 0 0 0 0 0 0
55475- 0 0 0 0 0 0 0 0 0 0 0 0
55476- 0 0 0 0 0 0 0 0 0 0 0 0
55477- 0 0 0 0 0 0 0 0 0 0 0 0
55478- 0 0 1 0 0 1 0 0 1 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 10 10 10 26 26 26
55485- 50 50 50 82 82 82 58 58 58 6 6 6
55486- 2 2 6 2 2 6 2 2 6 2 2 6
55487- 2 2 6 2 2 6 2 2 6 2 2 6
55488- 6 6 6 54 54 54 86 86 86 66 66 66
55489- 38 38 38 18 18 18 6 6 6 0 0 0
55490- 0 0 0 0 0 0 0 0 0 0 0 0
55491- 0 0 0 0 0 0 0 0 0 0 0 0
55492- 0 0 0 0 0 0 0 0 0 0 0 0
55493- 0 0 0 0 0 0 0 0 0 0 0 0
55494- 0 0 0 0 0 0 0 0 0 0 0 0
55495- 0 0 0 0 0 0 0 0 0 0 0 0
55496- 0 0 0 0 0 0 0 0 0 0 0 0
55497- 0 0 0 0 0 0 0 0 0 0 0 0
55498- 0 0 0 0 0 0 0 0 0 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 0 0 0 0 0 0 0 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 6 6 6 22 22 22 50 50 50
55505- 78 78 78 34 34 34 2 2 6 2 2 6
55506- 2 2 6 2 2 6 2 2 6 2 2 6
55507- 2 2 6 2 2 6 2 2 6 2 2 6
55508- 2 2 6 2 2 6 6 6 6 70 70 70
55509- 78 78 78 46 46 46 22 22 22 6 6 6
55510- 0 0 0 0 0 0 0 0 0 0 0 0
55511- 0 0 0 0 0 0 0 0 0 0 0 0
55512- 0 0 0 0 0 0 0 0 0 0 0 0
55513- 0 0 0 0 0 0 0 0 0 0 0 0
55514- 0 0 0 0 0 0 0 0 0 0 0 0
55515- 0 0 0 0 0 0 0 0 0 0 0 0
55516- 0 0 0 0 0 0 0 0 0 0 0 0
55517- 0 0 0 0 0 0 0 0 0 0 0 0
55518- 0 0 1 0 0 1 0 0 1 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 6 6 6 18 18 18 42 42 42 82 82 82
55525- 26 26 26 2 2 6 2 2 6 2 2 6
55526- 2 2 6 2 2 6 2 2 6 2 2 6
55527- 2 2 6 2 2 6 2 2 6 14 14 14
55528- 46 46 46 34 34 34 6 6 6 2 2 6
55529- 42 42 42 78 78 78 42 42 42 18 18 18
55530- 6 6 6 0 0 0 0 0 0 0 0 0
55531- 0 0 0 0 0 0 0 0 0 0 0 0
55532- 0 0 0 0 0 0 0 0 0 0 0 0
55533- 0 0 0 0 0 0 0 0 0 0 0 0
55534- 0 0 0 0 0 0 0 0 0 0 0 0
55535- 0 0 0 0 0 0 0 0 0 0 0 0
55536- 0 0 0 0 0 0 0 0 0 0 0 0
55537- 0 0 0 0 0 0 0 0 0 0 0 0
55538- 0 0 1 0 0 0 0 0 1 0 0 0
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 0 0 0 0
55542- 0 0 0 0 0 0 0 0 0 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 10 10 10 30 30 30 66 66 66 58 58 58
55545- 2 2 6 2 2 6 2 2 6 2 2 6
55546- 2 2 6 2 2 6 2 2 6 2 2 6
55547- 2 2 6 2 2 6 2 2 6 26 26 26
55548- 86 86 86 101 101 101 46 46 46 10 10 10
55549- 2 2 6 58 58 58 70 70 70 34 34 34
55550- 10 10 10 0 0 0 0 0 0 0 0 0
55551- 0 0 0 0 0 0 0 0 0 0 0 0
55552- 0 0 0 0 0 0 0 0 0 0 0 0
55553- 0 0 0 0 0 0 0 0 0 0 0 0
55554- 0 0 0 0 0 0 0 0 0 0 0 0
55555- 0 0 0 0 0 0 0 0 0 0 0 0
55556- 0 0 0 0 0 0 0 0 0 0 0 0
55557- 0 0 0 0 0 0 0 0 0 0 0 0
55558- 0 0 1 0 0 1 0 0 1 0 0 0
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 14 14 14 42 42 42 86 86 86 10 10 10
55565- 2 2 6 2 2 6 2 2 6 2 2 6
55566- 2 2 6 2 2 6 2 2 6 2 2 6
55567- 2 2 6 2 2 6 2 2 6 30 30 30
55568- 94 94 94 94 94 94 58 58 58 26 26 26
55569- 2 2 6 6 6 6 78 78 78 54 54 54
55570- 22 22 22 6 6 6 0 0 0 0 0 0
55571- 0 0 0 0 0 0 0 0 0 0 0 0
55572- 0 0 0 0 0 0 0 0 0 0 0 0
55573- 0 0 0 0 0 0 0 0 0 0 0 0
55574- 0 0 0 0 0 0 0 0 0 0 0 0
55575- 0 0 0 0 0 0 0 0 0 0 0 0
55576- 0 0 0 0 0 0 0 0 0 0 0 0
55577- 0 0 0 0 0 0 0 0 0 0 0 0
55578- 0 0 0 0 0 0 0 0 0 0 0 0
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 0 0 0 0
55582- 0 0 0 0 0 0 0 0 0 0 0 0
55583- 0 0 0 0 0 0 0 0 0 6 6 6
55584- 22 22 22 62 62 62 62 62 62 2 2 6
55585- 2 2 6 2 2 6 2 2 6 2 2 6
55586- 2 2 6 2 2 6 2 2 6 2 2 6
55587- 2 2 6 2 2 6 2 2 6 26 26 26
55588- 54 54 54 38 38 38 18 18 18 10 10 10
55589- 2 2 6 2 2 6 34 34 34 82 82 82
55590- 38 38 38 14 14 14 0 0 0 0 0 0
55591- 0 0 0 0 0 0 0 0 0 0 0 0
55592- 0 0 0 0 0 0 0 0 0 0 0 0
55593- 0 0 0 0 0 0 0 0 0 0 0 0
55594- 0 0 0 0 0 0 0 0 0 0 0 0
55595- 0 0 0 0 0 0 0 0 0 0 0 0
55596- 0 0 0 0 0 0 0 0 0 0 0 0
55597- 0 0 0 0 0 0 0 0 0 0 0 0
55598- 0 0 0 0 0 1 0 0 1 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 6 6 6
55604- 30 30 30 78 78 78 30 30 30 2 2 6
55605- 2 2 6 2 2 6 2 2 6 2 2 6
55606- 2 2 6 2 2 6 2 2 6 2 2 6
55607- 2 2 6 2 2 6 2 2 6 10 10 10
55608- 10 10 10 2 2 6 2 2 6 2 2 6
55609- 2 2 6 2 2 6 2 2 6 78 78 78
55610- 50 50 50 18 18 18 6 6 6 0 0 0
55611- 0 0 0 0 0 0 0 0 0 0 0 0
55612- 0 0 0 0 0 0 0 0 0 0 0 0
55613- 0 0 0 0 0 0 0 0 0 0 0 0
55614- 0 0 0 0 0 0 0 0 0 0 0 0
55615- 0 0 0 0 0 0 0 0 0 0 0 0
55616- 0 0 0 0 0 0 0 0 0 0 0 0
55617- 0 0 0 0 0 0 0 0 0 0 0 0
55618- 0 0 1 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 10 10 10
55624- 38 38 38 86 86 86 14 14 14 2 2 6
55625- 2 2 6 2 2 6 2 2 6 2 2 6
55626- 2 2 6 2 2 6 2 2 6 2 2 6
55627- 2 2 6 2 2 6 2 2 6 2 2 6
55628- 2 2 6 2 2 6 2 2 6 2 2 6
55629- 2 2 6 2 2 6 2 2 6 54 54 54
55630- 66 66 66 26 26 26 6 6 6 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 0 0 0
55633- 0 0 0 0 0 0 0 0 0 0 0 0
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 1 0 0 1 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 14 14 14
55644- 42 42 42 82 82 82 2 2 6 2 2 6
55645- 2 2 6 6 6 6 10 10 10 2 2 6
55646- 2 2 6 2 2 6 2 2 6 2 2 6
55647- 2 2 6 2 2 6 2 2 6 6 6 6
55648- 14 14 14 10 10 10 2 2 6 2 2 6
55649- 2 2 6 2 2 6 2 2 6 18 18 18
55650- 82 82 82 34 34 34 10 10 10 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 0 0 0
55653- 0 0 0 0 0 0 0 0 0 0 0 0
55654- 0 0 0 0 0 0 0 0 0 0 0 0
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 1 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 14 14 14
55664- 46 46 46 86 86 86 2 2 6 2 2 6
55665- 6 6 6 6 6 6 22 22 22 34 34 34
55666- 6 6 6 2 2 6 2 2 6 2 2 6
55667- 2 2 6 2 2 6 18 18 18 34 34 34
55668- 10 10 10 50 50 50 22 22 22 2 2 6
55669- 2 2 6 2 2 6 2 2 6 10 10 10
55670- 86 86 86 42 42 42 14 14 14 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 0 0 0 0 0 0 0
55673- 0 0 0 0 0 0 0 0 0 0 0 0
55674- 0 0 0 0 0 0 0 0 0 0 0 0
55675- 0 0 0 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 1 0 0 1 0 0 1 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 14 14 14
55684- 46 46 46 86 86 86 2 2 6 2 2 6
55685- 38 38 38 116 116 116 94 94 94 22 22 22
55686- 22 22 22 2 2 6 2 2 6 2 2 6
55687- 14 14 14 86 86 86 138 138 138 162 162 162
55688-154 154 154 38 38 38 26 26 26 6 6 6
55689- 2 2 6 2 2 6 2 2 6 2 2 6
55690- 86 86 86 46 46 46 14 14 14 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 0 0 0 0 0 0 0 0 0 0
55693- 0 0 0 0 0 0 0 0 0 0 0 0
55694- 0 0 0 0 0 0 0 0 0 0 0 0
55695- 0 0 0 0 0 0 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 14 14 14
55704- 46 46 46 86 86 86 2 2 6 14 14 14
55705-134 134 134 198 198 198 195 195 195 116 116 116
55706- 10 10 10 2 2 6 2 2 6 6 6 6
55707-101 98 89 187 187 187 210 210 210 218 218 218
55708-214 214 214 134 134 134 14 14 14 6 6 6
55709- 2 2 6 2 2 6 2 2 6 2 2 6
55710- 86 86 86 50 50 50 18 18 18 6 6 6
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 0 0 0 0 0 0 0 0 0 0
55713- 0 0 0 0 0 0 0 0 0 0 0 0
55714- 0 0 0 0 0 0 0 0 0 0 0 0
55715- 0 0 0 0 0 0 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 1 0 0 0
55718- 0 0 1 0 0 1 0 0 1 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 14 14 14
55724- 46 46 46 86 86 86 2 2 6 54 54 54
55725-218 218 218 195 195 195 226 226 226 246 246 246
55726- 58 58 58 2 2 6 2 2 6 30 30 30
55727-210 210 210 253 253 253 174 174 174 123 123 123
55728-221 221 221 234 234 234 74 74 74 2 2 6
55729- 2 2 6 2 2 6 2 2 6 2 2 6
55730- 70 70 70 58 58 58 22 22 22 6 6 6
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 0 0 0
55733- 0 0 0 0 0 0 0 0 0 0 0 0
55734- 0 0 0 0 0 0 0 0 0 0 0 0
55735- 0 0 0 0 0 0 0 0 0 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 14 14 14
55744- 46 46 46 82 82 82 2 2 6 106 106 106
55745-170 170 170 26 26 26 86 86 86 226 226 226
55746-123 123 123 10 10 10 14 14 14 46 46 46
55747-231 231 231 190 190 190 6 6 6 70 70 70
55748- 90 90 90 238 238 238 158 158 158 2 2 6
55749- 2 2 6 2 2 6 2 2 6 2 2 6
55750- 70 70 70 58 58 58 22 22 22 6 6 6
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 0 0 0 0 0 0 0 0 0 0
55753- 0 0 0 0 0 0 0 0 0 0 0 0
55754- 0 0 0 0 0 0 0 0 0 0 0 0
55755- 0 0 0 0 0 0 0 0 0 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 1 0 0 0
55758- 0 0 1 0 0 1 0 0 1 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 14 14 14
55764- 42 42 42 86 86 86 6 6 6 116 116 116
55765-106 106 106 6 6 6 70 70 70 149 149 149
55766-128 128 128 18 18 18 38 38 38 54 54 54
55767-221 221 221 106 106 106 2 2 6 14 14 14
55768- 46 46 46 190 190 190 198 198 198 2 2 6
55769- 2 2 6 2 2 6 2 2 6 2 2 6
55770- 74 74 74 62 62 62 22 22 22 6 6 6
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 0 0 0 0 0 0 0 0 0 0
55773- 0 0 0 0 0 0 0 0 0 0 0 0
55774- 0 0 0 0 0 0 0 0 0 0 0 0
55775- 0 0 0 0 0 0 0 0 0 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 1 0 0 0
55778- 0 0 1 0 0 0 0 0 1 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 14 14 14
55784- 42 42 42 94 94 94 14 14 14 101 101 101
55785-128 128 128 2 2 6 18 18 18 116 116 116
55786-118 98 46 121 92 8 121 92 8 98 78 10
55787-162 162 162 106 106 106 2 2 6 2 2 6
55788- 2 2 6 195 195 195 195 195 195 6 6 6
55789- 2 2 6 2 2 6 2 2 6 2 2 6
55790- 74 74 74 62 62 62 22 22 22 6 6 6
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 0 0 0 0 0 0 0 0 0 0
55793- 0 0 0 0 0 0 0 0 0 0 0 0
55794- 0 0 0 0 0 0 0 0 0 0 0 0
55795- 0 0 0 0 0 0 0 0 0 0 0 0
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 1 0 0 1
55798- 0 0 1 0 0 0 0 0 1 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 10 10 10
55804- 38 38 38 90 90 90 14 14 14 58 58 58
55805-210 210 210 26 26 26 54 38 6 154 114 10
55806-226 170 11 236 186 11 225 175 15 184 144 12
55807-215 174 15 175 146 61 37 26 9 2 2 6
55808- 70 70 70 246 246 246 138 138 138 2 2 6
55809- 2 2 6 2 2 6 2 2 6 2 2 6
55810- 70 70 70 66 66 66 26 26 26 6 6 6
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 0 0 0
55813- 0 0 0 0 0 0 0 0 0 0 0 0
55814- 0 0 0 0 0 0 0 0 0 0 0 0
55815- 0 0 0 0 0 0 0 0 0 0 0 0
55816- 0 0 0 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 10 10 10
55824- 38 38 38 86 86 86 14 14 14 10 10 10
55825-195 195 195 188 164 115 192 133 9 225 175 15
55826-239 182 13 234 190 10 232 195 16 232 200 30
55827-245 207 45 241 208 19 232 195 16 184 144 12
55828-218 194 134 211 206 186 42 42 42 2 2 6
55829- 2 2 6 2 2 6 2 2 6 2 2 6
55830- 50 50 50 74 74 74 30 30 30 6 6 6
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 0 0 0 0 0 0 0
55833- 0 0 0 0 0 0 0 0 0 0 0 0
55834- 0 0 0 0 0 0 0 0 0 0 0 0
55835- 0 0 0 0 0 0 0 0 0 0 0 0
55836- 0 0 0 0 0 0 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 10 10 10
55844- 34 34 34 86 86 86 14 14 14 2 2 6
55845-121 87 25 192 133 9 219 162 10 239 182 13
55846-236 186 11 232 195 16 241 208 19 244 214 54
55847-246 218 60 246 218 38 246 215 20 241 208 19
55848-241 208 19 226 184 13 121 87 25 2 2 6
55849- 2 2 6 2 2 6 2 2 6 2 2 6
55850- 50 50 50 82 82 82 34 34 34 10 10 10
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 0 0 0 0 0 0 0 0 0 0
55853- 0 0 0 0 0 0 0 0 0 0 0 0
55854- 0 0 0 0 0 0 0 0 0 0 0 0
55855- 0 0 0 0 0 0 0 0 0 0 0 0
55856- 0 0 0 0 0 0 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 10 10 10
55864- 34 34 34 82 82 82 30 30 30 61 42 6
55865-180 123 7 206 145 10 230 174 11 239 182 13
55866-234 190 10 238 202 15 241 208 19 246 218 74
55867-246 218 38 246 215 20 246 215 20 246 215 20
55868-226 184 13 215 174 15 184 144 12 6 6 6
55869- 2 2 6 2 2 6 2 2 6 2 2 6
55870- 26 26 26 94 94 94 42 42 42 14 14 14
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 0 0 0 0 0 0 0
55873- 0 0 0 0 0 0 0 0 0 0 0 0
55874- 0 0 0 0 0 0 0 0 0 0 0 0
55875- 0 0 0 0 0 0 0 0 0 0 0 0
55876- 0 0 0 0 0 0 0 0 0 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 0 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 10 10 10
55884- 30 30 30 78 78 78 50 50 50 104 69 6
55885-192 133 9 216 158 10 236 178 12 236 186 11
55886-232 195 16 241 208 19 244 214 54 245 215 43
55887-246 215 20 246 215 20 241 208 19 198 155 10
55888-200 144 11 216 158 10 156 118 10 2 2 6
55889- 2 2 6 2 2 6 2 2 6 2 2 6
55890- 6 6 6 90 90 90 54 54 54 18 18 18
55891- 6 6 6 0 0 0 0 0 0 0 0 0
55892- 0 0 0 0 0 0 0 0 0 0 0 0
55893- 0 0 0 0 0 0 0 0 0 0 0 0
55894- 0 0 0 0 0 0 0 0 0 0 0 0
55895- 0 0 0 0 0 0 0 0 0 0 0 0
55896- 0 0 0 0 0 0 0 0 0 0 0 0
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 10 10 10
55904- 30 30 30 78 78 78 46 46 46 22 22 22
55905-137 92 6 210 162 10 239 182 13 238 190 10
55906-238 202 15 241 208 19 246 215 20 246 215 20
55907-241 208 19 203 166 17 185 133 11 210 150 10
55908-216 158 10 210 150 10 102 78 10 2 2 6
55909- 6 6 6 54 54 54 14 14 14 2 2 6
55910- 2 2 6 62 62 62 74 74 74 30 30 30
55911- 10 10 10 0 0 0 0 0 0 0 0 0
55912- 0 0 0 0 0 0 0 0 0 0 0 0
55913- 0 0 0 0 0 0 0 0 0 0 0 0
55914- 0 0 0 0 0 0 0 0 0 0 0 0
55915- 0 0 0 0 0 0 0 0 0 0 0 0
55916- 0 0 0 0 0 0 0 0 0 0 0 0
55917- 0 0 0 0 0 0 0 0 0 0 0 0
55918- 0 0 0 0 0 0 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 10 10 10
55924- 34 34 34 78 78 78 50 50 50 6 6 6
55925- 94 70 30 139 102 15 190 146 13 226 184 13
55926-232 200 30 232 195 16 215 174 15 190 146 13
55927-168 122 10 192 133 9 210 150 10 213 154 11
55928-202 150 34 182 157 106 101 98 89 2 2 6
55929- 2 2 6 78 78 78 116 116 116 58 58 58
55930- 2 2 6 22 22 22 90 90 90 46 46 46
55931- 18 18 18 6 6 6 0 0 0 0 0 0
55932- 0 0 0 0 0 0 0 0 0 0 0 0
55933- 0 0 0 0 0 0 0 0 0 0 0 0
55934- 0 0 0 0 0 0 0 0 0 0 0 0
55935- 0 0 0 0 0 0 0 0 0 0 0 0
55936- 0 0 0 0 0 0 0 0 0 0 0 0
55937- 0 0 0 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 10 10 10
55944- 38 38 38 86 86 86 50 50 50 6 6 6
55945-128 128 128 174 154 114 156 107 11 168 122 10
55946-198 155 10 184 144 12 197 138 11 200 144 11
55947-206 145 10 206 145 10 197 138 11 188 164 115
55948-195 195 195 198 198 198 174 174 174 14 14 14
55949- 2 2 6 22 22 22 116 116 116 116 116 116
55950- 22 22 22 2 2 6 74 74 74 70 70 70
55951- 30 30 30 10 10 10 0 0 0 0 0 0
55952- 0 0 0 0 0 0 0 0 0 0 0 0
55953- 0 0 0 0 0 0 0 0 0 0 0 0
55954- 0 0 0 0 0 0 0 0 0 0 0 0
55955- 0 0 0 0 0 0 0 0 0 0 0 0
55956- 0 0 0 0 0 0 0 0 0 0 0 0
55957- 0 0 0 0 0 0 0 0 0 0 0 0
55958- 0 0 0 0 0 0 0 0 0 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 6 6 6 18 18 18
55964- 50 50 50 101 101 101 26 26 26 10 10 10
55965-138 138 138 190 190 190 174 154 114 156 107 11
55966-197 138 11 200 144 11 197 138 11 192 133 9
55967-180 123 7 190 142 34 190 178 144 187 187 187
55968-202 202 202 221 221 221 214 214 214 66 66 66
55969- 2 2 6 2 2 6 50 50 50 62 62 62
55970- 6 6 6 2 2 6 10 10 10 90 90 90
55971- 50 50 50 18 18 18 6 6 6 0 0 0
55972- 0 0 0 0 0 0 0 0 0 0 0 0
55973- 0 0 0 0 0 0 0 0 0 0 0 0
55974- 0 0 0 0 0 0 0 0 0 0 0 0
55975- 0 0 0 0 0 0 0 0 0 0 0 0
55976- 0 0 0 0 0 0 0 0 0 0 0 0
55977- 0 0 0 0 0 0 0 0 0 0 0 0
55978- 0 0 0 0 0 0 0 0 0 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 10 10 10 34 34 34
55984- 74 74 74 74 74 74 2 2 6 6 6 6
55985-144 144 144 198 198 198 190 190 190 178 166 146
55986-154 121 60 156 107 11 156 107 11 168 124 44
55987-174 154 114 187 187 187 190 190 190 210 210 210
55988-246 246 246 253 253 253 253 253 253 182 182 182
55989- 6 6 6 2 2 6 2 2 6 2 2 6
55990- 2 2 6 2 2 6 2 2 6 62 62 62
55991- 74 74 74 34 34 34 14 14 14 0 0 0
55992- 0 0 0 0 0 0 0 0 0 0 0 0
55993- 0 0 0 0 0 0 0 0 0 0 0 0
55994- 0 0 0 0 0 0 0 0 0 0 0 0
55995- 0 0 0 0 0 0 0 0 0 0 0 0
55996- 0 0 0 0 0 0 0 0 0 0 0 0
55997- 0 0 0 0 0 0 0 0 0 0 0 0
55998- 0 0 0 0 0 0 0 0 0 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 10 10 10 22 22 22 54 54 54
56004- 94 94 94 18 18 18 2 2 6 46 46 46
56005-234 234 234 221 221 221 190 190 190 190 190 190
56006-190 190 190 187 187 187 187 187 187 190 190 190
56007-190 190 190 195 195 195 214 214 214 242 242 242
56008-253 253 253 253 253 253 253 253 253 253 253 253
56009- 82 82 82 2 2 6 2 2 6 2 2 6
56010- 2 2 6 2 2 6 2 2 6 14 14 14
56011- 86 86 86 54 54 54 22 22 22 6 6 6
56012- 0 0 0 0 0 0 0 0 0 0 0 0
56013- 0 0 0 0 0 0 0 0 0 0 0 0
56014- 0 0 0 0 0 0 0 0 0 0 0 0
56015- 0 0 0 0 0 0 0 0 0 0 0 0
56016- 0 0 0 0 0 0 0 0 0 0 0 0
56017- 0 0 0 0 0 0 0 0 0 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 6 6 6 18 18 18 46 46 46 90 90 90
56024- 46 46 46 18 18 18 6 6 6 182 182 182
56025-253 253 253 246 246 246 206 206 206 190 190 190
56026-190 190 190 190 190 190 190 190 190 190 190 190
56027-206 206 206 231 231 231 250 250 250 253 253 253
56028-253 253 253 253 253 253 253 253 253 253 253 253
56029-202 202 202 14 14 14 2 2 6 2 2 6
56030- 2 2 6 2 2 6 2 2 6 2 2 6
56031- 42 42 42 86 86 86 42 42 42 18 18 18
56032- 6 6 6 0 0 0 0 0 0 0 0 0
56033- 0 0 0 0 0 0 0 0 0 0 0 0
56034- 0 0 0 0 0 0 0 0 0 0 0 0
56035- 0 0 0 0 0 0 0 0 0 0 0 0
56036- 0 0 0 0 0 0 0 0 0 0 0 0
56037- 0 0 0 0 0 0 0 0 0 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 6 6 6
56043- 14 14 14 38 38 38 74 74 74 66 66 66
56044- 2 2 6 6 6 6 90 90 90 250 250 250
56045-253 253 253 253 253 253 238 238 238 198 198 198
56046-190 190 190 190 190 190 195 195 195 221 221 221
56047-246 246 246 253 253 253 253 253 253 253 253 253
56048-253 253 253 253 253 253 253 253 253 253 253 253
56049-253 253 253 82 82 82 2 2 6 2 2 6
56050- 2 2 6 2 2 6 2 2 6 2 2 6
56051- 2 2 6 78 78 78 70 70 70 34 34 34
56052- 14 14 14 6 6 6 0 0 0 0 0 0
56053- 0 0 0 0 0 0 0 0 0 0 0 0
56054- 0 0 0 0 0 0 0 0 0 0 0 0
56055- 0 0 0 0 0 0 0 0 0 0 0 0
56056- 0 0 0 0 0 0 0 0 0 0 0 0
56057- 0 0 0 0 0 0 0 0 0 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 14 14 14
56063- 34 34 34 66 66 66 78 78 78 6 6 6
56064- 2 2 6 18 18 18 218 218 218 253 253 253
56065-253 253 253 253 253 253 253 253 253 246 246 246
56066-226 226 226 231 231 231 246 246 246 253 253 253
56067-253 253 253 253 253 253 253 253 253 253 253 253
56068-253 253 253 253 253 253 253 253 253 253 253 253
56069-253 253 253 178 178 178 2 2 6 2 2 6
56070- 2 2 6 2 2 6 2 2 6 2 2 6
56071- 2 2 6 18 18 18 90 90 90 62 62 62
56072- 30 30 30 10 10 10 0 0 0 0 0 0
56073- 0 0 0 0 0 0 0 0 0 0 0 0
56074- 0 0 0 0 0 0 0 0 0 0 0 0
56075- 0 0 0 0 0 0 0 0 0 0 0 0
56076- 0 0 0 0 0 0 0 0 0 0 0 0
56077- 0 0 0 0 0 0 0 0 0 0 0 0
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 10 10 10 26 26 26
56083- 58 58 58 90 90 90 18 18 18 2 2 6
56084- 2 2 6 110 110 110 253 253 253 253 253 253
56085-253 253 253 253 253 253 253 253 253 253 253 253
56086-250 250 250 253 253 253 253 253 253 253 253 253
56087-253 253 253 253 253 253 253 253 253 253 253 253
56088-253 253 253 253 253 253 253 253 253 253 253 253
56089-253 253 253 231 231 231 18 18 18 2 2 6
56090- 2 2 6 2 2 6 2 2 6 2 2 6
56091- 2 2 6 2 2 6 18 18 18 94 94 94
56092- 54 54 54 26 26 26 10 10 10 0 0 0
56093- 0 0 0 0 0 0 0 0 0 0 0 0
56094- 0 0 0 0 0 0 0 0 0 0 0 0
56095- 0 0 0 0 0 0 0 0 0 0 0 0
56096- 0 0 0 0 0 0 0 0 0 0 0 0
56097- 0 0 0 0 0 0 0 0 0 0 0 0
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 6 6 6 22 22 22 50 50 50
56103- 90 90 90 26 26 26 2 2 6 2 2 6
56104- 14 14 14 195 195 195 250 250 250 253 253 253
56105-253 253 253 253 253 253 253 253 253 253 253 253
56106-253 253 253 253 253 253 253 253 253 253 253 253
56107-253 253 253 253 253 253 253 253 253 253 253 253
56108-253 253 253 253 253 253 253 253 253 253 253 253
56109-250 250 250 242 242 242 54 54 54 2 2 6
56110- 2 2 6 2 2 6 2 2 6 2 2 6
56111- 2 2 6 2 2 6 2 2 6 38 38 38
56112- 86 86 86 50 50 50 22 22 22 6 6 6
56113- 0 0 0 0 0 0 0 0 0 0 0 0
56114- 0 0 0 0 0 0 0 0 0 0 0 0
56115- 0 0 0 0 0 0 0 0 0 0 0 0
56116- 0 0 0 0 0 0 0 0 0 0 0 0
56117- 0 0 0 0 0 0 0 0 0 0 0 0
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 6 6 6 14 14 14 38 38 38 82 82 82
56123- 34 34 34 2 2 6 2 2 6 2 2 6
56124- 42 42 42 195 195 195 246 246 246 253 253 253
56125-253 253 253 253 253 253 253 253 253 250 250 250
56126-242 242 242 242 242 242 250 250 250 253 253 253
56127-253 253 253 253 253 253 253 253 253 253 253 253
56128-253 253 253 250 250 250 246 246 246 238 238 238
56129-226 226 226 231 231 231 101 101 101 6 6 6
56130- 2 2 6 2 2 6 2 2 6 2 2 6
56131- 2 2 6 2 2 6 2 2 6 2 2 6
56132- 38 38 38 82 82 82 42 42 42 14 14 14
56133- 6 6 6 0 0 0 0 0 0 0 0 0
56134- 0 0 0 0 0 0 0 0 0 0 0 0
56135- 0 0 0 0 0 0 0 0 0 0 0 0
56136- 0 0 0 0 0 0 0 0 0 0 0 0
56137- 0 0 0 0 0 0 0 0 0 0 0 0
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 10 10 10 26 26 26 62 62 62 66 66 66
56143- 2 2 6 2 2 6 2 2 6 6 6 6
56144- 70 70 70 170 170 170 206 206 206 234 234 234
56145-246 246 246 250 250 250 250 250 250 238 238 238
56146-226 226 226 231 231 231 238 238 238 250 250 250
56147-250 250 250 250 250 250 246 246 246 231 231 231
56148-214 214 214 206 206 206 202 202 202 202 202 202
56149-198 198 198 202 202 202 182 182 182 18 18 18
56150- 2 2 6 2 2 6 2 2 6 2 2 6
56151- 2 2 6 2 2 6 2 2 6 2 2 6
56152- 2 2 6 62 62 62 66 66 66 30 30 30
56153- 10 10 10 0 0 0 0 0 0 0 0 0
56154- 0 0 0 0 0 0 0 0 0 0 0 0
56155- 0 0 0 0 0 0 0 0 0 0 0 0
56156- 0 0 0 0 0 0 0 0 0 0 0 0
56157- 0 0 0 0 0 0 0 0 0 0 0 0
56158- 0 0 0 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 14 14 14 42 42 42 82 82 82 18 18 18
56163- 2 2 6 2 2 6 2 2 6 10 10 10
56164- 94 94 94 182 182 182 218 218 218 242 242 242
56165-250 250 250 253 253 253 253 253 253 250 250 250
56166-234 234 234 253 253 253 253 253 253 253 253 253
56167-253 253 253 253 253 253 253 253 253 246 246 246
56168-238 238 238 226 226 226 210 210 210 202 202 202
56169-195 195 195 195 195 195 210 210 210 158 158 158
56170- 6 6 6 14 14 14 50 50 50 14 14 14
56171- 2 2 6 2 2 6 2 2 6 2 2 6
56172- 2 2 6 6 6 6 86 86 86 46 46 46
56173- 18 18 18 6 6 6 0 0 0 0 0 0
56174- 0 0 0 0 0 0 0 0 0 0 0 0
56175- 0 0 0 0 0 0 0 0 0 0 0 0
56176- 0 0 0 0 0 0 0 0 0 0 0 0
56177- 0 0 0 0 0 0 0 0 0 0 0 0
56178- 0 0 0 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 6 6 6
56182- 22 22 22 54 54 54 70 70 70 2 2 6
56183- 2 2 6 10 10 10 2 2 6 22 22 22
56184-166 166 166 231 231 231 250 250 250 253 253 253
56185-253 253 253 253 253 253 253 253 253 250 250 250
56186-242 242 242 253 253 253 253 253 253 253 253 253
56187-253 253 253 253 253 253 253 253 253 253 253 253
56188-253 253 253 253 253 253 253 253 253 246 246 246
56189-231 231 231 206 206 206 198 198 198 226 226 226
56190- 94 94 94 2 2 6 6 6 6 38 38 38
56191- 30 30 30 2 2 6 2 2 6 2 2 6
56192- 2 2 6 2 2 6 62 62 62 66 66 66
56193- 26 26 26 10 10 10 0 0 0 0 0 0
56194- 0 0 0 0 0 0 0 0 0 0 0 0
56195- 0 0 0 0 0 0 0 0 0 0 0 0
56196- 0 0 0 0 0 0 0 0 0 0 0 0
56197- 0 0 0 0 0 0 0 0 0 0 0 0
56198- 0 0 0 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 10 10 10
56202- 30 30 30 74 74 74 50 50 50 2 2 6
56203- 26 26 26 26 26 26 2 2 6 106 106 106
56204-238 238 238 253 253 253 253 253 253 253 253 253
56205-253 253 253 253 253 253 253 253 253 253 253 253
56206-253 253 253 253 253 253 253 253 253 253 253 253
56207-253 253 253 253 253 253 253 253 253 253 253 253
56208-253 253 253 253 253 253 253 253 253 253 253 253
56209-253 253 253 246 246 246 218 218 218 202 202 202
56210-210 210 210 14 14 14 2 2 6 2 2 6
56211- 30 30 30 22 22 22 2 2 6 2 2 6
56212- 2 2 6 2 2 6 18 18 18 86 86 86
56213- 42 42 42 14 14 14 0 0 0 0 0 0
56214- 0 0 0 0 0 0 0 0 0 0 0 0
56215- 0 0 0 0 0 0 0 0 0 0 0 0
56216- 0 0 0 0 0 0 0 0 0 0 0 0
56217- 0 0 0 0 0 0 0 0 0 0 0 0
56218- 0 0 0 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 0 0 0 0 0 0 0 0 0
56221- 0 0 0 0 0 0 0 0 0 14 14 14
56222- 42 42 42 90 90 90 22 22 22 2 2 6
56223- 42 42 42 2 2 6 18 18 18 218 218 218
56224-253 253 253 253 253 253 253 253 253 253 253 253
56225-253 253 253 253 253 253 253 253 253 253 253 253
56226-253 253 253 253 253 253 253 253 253 253 253 253
56227-253 253 253 253 253 253 253 253 253 253 253 253
56228-253 253 253 253 253 253 253 253 253 253 253 253
56229-253 253 253 253 253 253 250 250 250 221 221 221
56230-218 218 218 101 101 101 2 2 6 14 14 14
56231- 18 18 18 38 38 38 10 10 10 2 2 6
56232- 2 2 6 2 2 6 2 2 6 78 78 78
56233- 58 58 58 22 22 22 6 6 6 0 0 0
56234- 0 0 0 0 0 0 0 0 0 0 0 0
56235- 0 0 0 0 0 0 0 0 0 0 0 0
56236- 0 0 0 0 0 0 0 0 0 0 0 0
56237- 0 0 0 0 0 0 0 0 0 0 0 0
56238- 0 0 0 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 0 0 0 0 0 0 0 0 0 0 0 0
56241- 0 0 0 0 0 0 6 6 6 18 18 18
56242- 54 54 54 82 82 82 2 2 6 26 26 26
56243- 22 22 22 2 2 6 123 123 123 253 253 253
56244-253 253 253 253 253 253 253 253 253 253 253 253
56245-253 253 253 253 253 253 253 253 253 253 253 253
56246-253 253 253 253 253 253 253 253 253 253 253 253
56247-253 253 253 253 253 253 253 253 253 253 253 253
56248-253 253 253 253 253 253 253 253 253 253 253 253
56249-253 253 253 253 253 253 253 253 253 250 250 250
56250-238 238 238 198 198 198 6 6 6 38 38 38
56251- 58 58 58 26 26 26 38 38 38 2 2 6
56252- 2 2 6 2 2 6 2 2 6 46 46 46
56253- 78 78 78 30 30 30 10 10 10 0 0 0
56254- 0 0 0 0 0 0 0 0 0 0 0 0
56255- 0 0 0 0 0 0 0 0 0 0 0 0
56256- 0 0 0 0 0 0 0 0 0 0 0 0
56257- 0 0 0 0 0 0 0 0 0 0 0 0
56258- 0 0 0 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 0 0 0
56260- 0 0 0 0 0 0 0 0 0 0 0 0
56261- 0 0 0 0 0 0 10 10 10 30 30 30
56262- 74 74 74 58 58 58 2 2 6 42 42 42
56263- 2 2 6 22 22 22 231 231 231 253 253 253
56264-253 253 253 253 253 253 253 253 253 253 253 253
56265-253 253 253 253 253 253 253 253 253 250 250 250
56266-253 253 253 253 253 253 253 253 253 253 253 253
56267-253 253 253 253 253 253 253 253 253 253 253 253
56268-253 253 253 253 253 253 253 253 253 253 253 253
56269-253 253 253 253 253 253 253 253 253 253 253 253
56270-253 253 253 246 246 246 46 46 46 38 38 38
56271- 42 42 42 14 14 14 38 38 38 14 14 14
56272- 2 2 6 2 2 6 2 2 6 6 6 6
56273- 86 86 86 46 46 46 14 14 14 0 0 0
56274- 0 0 0 0 0 0 0 0 0 0 0 0
56275- 0 0 0 0 0 0 0 0 0 0 0 0
56276- 0 0 0 0 0 0 0 0 0 0 0 0
56277- 0 0 0 0 0 0 0 0 0 0 0 0
56278- 0 0 0 0 0 0 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 0 0 0
56280- 0 0 0 0 0 0 0 0 0 0 0 0
56281- 0 0 0 6 6 6 14 14 14 42 42 42
56282- 90 90 90 18 18 18 18 18 18 26 26 26
56283- 2 2 6 116 116 116 253 253 253 253 253 253
56284-253 253 253 253 253 253 253 253 253 253 253 253
56285-253 253 253 253 253 253 250 250 250 238 238 238
56286-253 253 253 253 253 253 253 253 253 253 253 253
56287-253 253 253 253 253 253 253 253 253 253 253 253
56288-253 253 253 253 253 253 253 253 253 253 253 253
56289-253 253 253 253 253 253 253 253 253 253 253 253
56290-253 253 253 253 253 253 94 94 94 6 6 6
56291- 2 2 6 2 2 6 10 10 10 34 34 34
56292- 2 2 6 2 2 6 2 2 6 2 2 6
56293- 74 74 74 58 58 58 22 22 22 6 6 6
56294- 0 0 0 0 0 0 0 0 0 0 0 0
56295- 0 0 0 0 0 0 0 0 0 0 0 0
56296- 0 0 0 0 0 0 0 0 0 0 0 0
56297- 0 0 0 0 0 0 0 0 0 0 0 0
56298- 0 0 0 0 0 0 0 0 0 0 0 0
56299- 0 0 0 0 0 0 0 0 0 0 0 0
56300- 0 0 0 0 0 0 0 0 0 0 0 0
56301- 0 0 0 10 10 10 26 26 26 66 66 66
56302- 82 82 82 2 2 6 38 38 38 6 6 6
56303- 14 14 14 210 210 210 253 253 253 253 253 253
56304-253 253 253 253 253 253 253 253 253 253 253 253
56305-253 253 253 253 253 253 246 246 246 242 242 242
56306-253 253 253 253 253 253 253 253 253 253 253 253
56307-253 253 253 253 253 253 253 253 253 253 253 253
56308-253 253 253 253 253 253 253 253 253 253 253 253
56309-253 253 253 253 253 253 253 253 253 253 253 253
56310-253 253 253 253 253 253 144 144 144 2 2 6
56311- 2 2 6 2 2 6 2 2 6 46 46 46
56312- 2 2 6 2 2 6 2 2 6 2 2 6
56313- 42 42 42 74 74 74 30 30 30 10 10 10
56314- 0 0 0 0 0 0 0 0 0 0 0 0
56315- 0 0 0 0 0 0 0 0 0 0 0 0
56316- 0 0 0 0 0 0 0 0 0 0 0 0
56317- 0 0 0 0 0 0 0 0 0 0 0 0
56318- 0 0 0 0 0 0 0 0 0 0 0 0
56319- 0 0 0 0 0 0 0 0 0 0 0 0
56320- 0 0 0 0 0 0 0 0 0 0 0 0
56321- 6 6 6 14 14 14 42 42 42 90 90 90
56322- 26 26 26 6 6 6 42 42 42 2 2 6
56323- 74 74 74 250 250 250 253 253 253 253 253 253
56324-253 253 253 253 253 253 253 253 253 253 253 253
56325-253 253 253 253 253 253 242 242 242 242 242 242
56326-253 253 253 253 253 253 253 253 253 253 253 253
56327-253 253 253 253 253 253 253 253 253 253 253 253
56328-253 253 253 253 253 253 253 253 253 253 253 253
56329-253 253 253 253 253 253 253 253 253 253 253 253
56330-253 253 253 253 253 253 182 182 182 2 2 6
56331- 2 2 6 2 2 6 2 2 6 46 46 46
56332- 2 2 6 2 2 6 2 2 6 2 2 6
56333- 10 10 10 86 86 86 38 38 38 10 10 10
56334- 0 0 0 0 0 0 0 0 0 0 0 0
56335- 0 0 0 0 0 0 0 0 0 0 0 0
56336- 0 0 0 0 0 0 0 0 0 0 0 0
56337- 0 0 0 0 0 0 0 0 0 0 0 0
56338- 0 0 0 0 0 0 0 0 0 0 0 0
56339- 0 0 0 0 0 0 0 0 0 0 0 0
56340- 0 0 0 0 0 0 0 0 0 0 0 0
56341- 10 10 10 26 26 26 66 66 66 82 82 82
56342- 2 2 6 22 22 22 18 18 18 2 2 6
56343-149 149 149 253 253 253 253 253 253 253 253 253
56344-253 253 253 253 253 253 253 253 253 253 253 253
56345-253 253 253 253 253 253 234 234 234 242 242 242
56346-253 253 253 253 253 253 253 253 253 253 253 253
56347-253 253 253 253 253 253 253 253 253 253 253 253
56348-253 253 253 253 253 253 253 253 253 253 253 253
56349-253 253 253 253 253 253 253 253 253 253 253 253
56350-253 253 253 253 253 253 206 206 206 2 2 6
56351- 2 2 6 2 2 6 2 2 6 38 38 38
56352- 2 2 6 2 2 6 2 2 6 2 2 6
56353- 6 6 6 86 86 86 46 46 46 14 14 14
56354- 0 0 0 0 0 0 0 0 0 0 0 0
56355- 0 0 0 0 0 0 0 0 0 0 0 0
56356- 0 0 0 0 0 0 0 0 0 0 0 0
56357- 0 0 0 0 0 0 0 0 0 0 0 0
56358- 0 0 0 0 0 0 0 0 0 0 0 0
56359- 0 0 0 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 6 6 6
56361- 18 18 18 46 46 46 86 86 86 18 18 18
56362- 2 2 6 34 34 34 10 10 10 6 6 6
56363-210 210 210 253 253 253 253 253 253 253 253 253
56364-253 253 253 253 253 253 253 253 253 253 253 253
56365-253 253 253 253 253 253 234 234 234 242 242 242
56366-253 253 253 253 253 253 253 253 253 253 253 253
56367-253 253 253 253 253 253 253 253 253 253 253 253
56368-253 253 253 253 253 253 253 253 253 253 253 253
56369-253 253 253 253 253 253 253 253 253 253 253 253
56370-253 253 253 253 253 253 221 221 221 6 6 6
56371- 2 2 6 2 2 6 6 6 6 30 30 30
56372- 2 2 6 2 2 6 2 2 6 2 2 6
56373- 2 2 6 82 82 82 54 54 54 18 18 18
56374- 6 6 6 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 0 0 0 0 0 0 0 0 0 0 0 0
56377- 0 0 0 0 0 0 0 0 0 0 0 0
56378- 0 0 0 0 0 0 0 0 0 0 0 0
56379- 0 0 0 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 10 10 10
56381- 26 26 26 66 66 66 62 62 62 2 2 6
56382- 2 2 6 38 38 38 10 10 10 26 26 26
56383-238 238 238 253 253 253 253 253 253 253 253 253
56384-253 253 253 253 253 253 253 253 253 253 253 253
56385-253 253 253 253 253 253 231 231 231 238 238 238
56386-253 253 253 253 253 253 253 253 253 253 253 253
56387-253 253 253 253 253 253 253 253 253 253 253 253
56388-253 253 253 253 253 253 253 253 253 253 253 253
56389-253 253 253 253 253 253 253 253 253 253 253 253
56390-253 253 253 253 253 253 231 231 231 6 6 6
56391- 2 2 6 2 2 6 10 10 10 30 30 30
56392- 2 2 6 2 2 6 2 2 6 2 2 6
56393- 2 2 6 66 66 66 58 58 58 22 22 22
56394- 6 6 6 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 0 0 0 0 0 0 0 0 0 0 0 0
56397- 0 0 0 0 0 0 0 0 0 0 0 0
56398- 0 0 0 0 0 0 0 0 0 0 0 0
56399- 0 0 0 0 0 0 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 10 10 10
56401- 38 38 38 78 78 78 6 6 6 2 2 6
56402- 2 2 6 46 46 46 14 14 14 42 42 42
56403-246 246 246 253 253 253 253 253 253 253 253 253
56404-253 253 253 253 253 253 253 253 253 253 253 253
56405-253 253 253 253 253 253 231 231 231 242 242 242
56406-253 253 253 253 253 253 253 253 253 253 253 253
56407-253 253 253 253 253 253 253 253 253 253 253 253
56408-253 253 253 253 253 253 253 253 253 253 253 253
56409-253 253 253 253 253 253 253 253 253 253 253 253
56410-253 253 253 253 253 253 234 234 234 10 10 10
56411- 2 2 6 2 2 6 22 22 22 14 14 14
56412- 2 2 6 2 2 6 2 2 6 2 2 6
56413- 2 2 6 66 66 66 62 62 62 22 22 22
56414- 6 6 6 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 0 0 0
56416- 0 0 0 0 0 0 0 0 0 0 0 0
56417- 0 0 0 0 0 0 0 0 0 0 0 0
56418- 0 0 0 0 0 0 0 0 0 0 0 0
56419- 0 0 0 0 0 0 0 0 0 0 0 0
56420- 0 0 0 0 0 0 6 6 6 18 18 18
56421- 50 50 50 74 74 74 2 2 6 2 2 6
56422- 14 14 14 70 70 70 34 34 34 62 62 62
56423-250 250 250 253 253 253 253 253 253 253 253 253
56424-253 253 253 253 253 253 253 253 253 253 253 253
56425-253 253 253 253 253 253 231 231 231 246 246 246
56426-253 253 253 253 253 253 253 253 253 253 253 253
56427-253 253 253 253 253 253 253 253 253 253 253 253
56428-253 253 253 253 253 253 253 253 253 253 253 253
56429-253 253 253 253 253 253 253 253 253 253 253 253
56430-253 253 253 253 253 253 234 234 234 14 14 14
56431- 2 2 6 2 2 6 30 30 30 2 2 6
56432- 2 2 6 2 2 6 2 2 6 2 2 6
56433- 2 2 6 66 66 66 62 62 62 22 22 22
56434- 6 6 6 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 0 0 0
56436- 0 0 0 0 0 0 0 0 0 0 0 0
56437- 0 0 0 0 0 0 0 0 0 0 0 0
56438- 0 0 0 0 0 0 0 0 0 0 0 0
56439- 0 0 0 0 0 0 0 0 0 0 0 0
56440- 0 0 0 0 0 0 6 6 6 18 18 18
56441- 54 54 54 62 62 62 2 2 6 2 2 6
56442- 2 2 6 30 30 30 46 46 46 70 70 70
56443-250 250 250 253 253 253 253 253 253 253 253 253
56444-253 253 253 253 253 253 253 253 253 253 253 253
56445-253 253 253 253 253 253 231 231 231 246 246 246
56446-253 253 253 253 253 253 253 253 253 253 253 253
56447-253 253 253 253 253 253 253 253 253 253 253 253
56448-253 253 253 253 253 253 253 253 253 253 253 253
56449-253 253 253 253 253 253 253 253 253 253 253 253
56450-253 253 253 253 253 253 226 226 226 10 10 10
56451- 2 2 6 6 6 6 30 30 30 2 2 6
56452- 2 2 6 2 2 6 2 2 6 2 2 6
56453- 2 2 6 66 66 66 58 58 58 22 22 22
56454- 6 6 6 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 0 0 0
56456- 0 0 0 0 0 0 0 0 0 0 0 0
56457- 0 0 0 0 0 0 0 0 0 0 0 0
56458- 0 0 0 0 0 0 0 0 0 0 0 0
56459- 0 0 0 0 0 0 0 0 0 0 0 0
56460- 0 0 0 0 0 0 6 6 6 22 22 22
56461- 58 58 58 62 62 62 2 2 6 2 2 6
56462- 2 2 6 2 2 6 30 30 30 78 78 78
56463-250 250 250 253 253 253 253 253 253 253 253 253
56464-253 253 253 253 253 253 253 253 253 253 253 253
56465-253 253 253 253 253 253 231 231 231 246 246 246
56466-253 253 253 253 253 253 253 253 253 253 253 253
56467-253 253 253 253 253 253 253 253 253 253 253 253
56468-253 253 253 253 253 253 253 253 253 253 253 253
56469-253 253 253 253 253 253 253 253 253 253 253 253
56470-253 253 253 253 253 253 206 206 206 2 2 6
56471- 22 22 22 34 34 34 18 14 6 22 22 22
56472- 26 26 26 18 18 18 6 6 6 2 2 6
56473- 2 2 6 82 82 82 54 54 54 18 18 18
56474- 6 6 6 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 0 0 0 0 0 0
56476- 0 0 0 0 0 0 0 0 0 0 0 0
56477- 0 0 0 0 0 0 0 0 0 0 0 0
56478- 0 0 0 0 0 0 0 0 0 0 0 0
56479- 0 0 0 0 0 0 0 0 0 0 0 0
56480- 0 0 0 0 0 0 6 6 6 26 26 26
56481- 62 62 62 106 106 106 74 54 14 185 133 11
56482-210 162 10 121 92 8 6 6 6 62 62 62
56483-238 238 238 253 253 253 253 253 253 253 253 253
56484-253 253 253 253 253 253 253 253 253 253 253 253
56485-253 253 253 253 253 253 231 231 231 246 246 246
56486-253 253 253 253 253 253 253 253 253 253 253 253
56487-253 253 253 253 253 253 253 253 253 253 253 253
56488-253 253 253 253 253 253 253 253 253 253 253 253
56489-253 253 253 253 253 253 253 253 253 253 253 253
56490-253 253 253 253 253 253 158 158 158 18 18 18
56491- 14 14 14 2 2 6 2 2 6 2 2 6
56492- 6 6 6 18 18 18 66 66 66 38 38 38
56493- 6 6 6 94 94 94 50 50 50 18 18 18
56494- 6 6 6 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 0 0 0 0 0 0
56496- 0 0 0 0 0 0 0 0 0 0 0 0
56497- 0 0 0 0 0 0 0 0 0 0 0 0
56498- 0 0 0 0 0 0 0 0 0 0 0 0
56499- 0 0 0 0 0 0 0 0 0 6 6 6
56500- 10 10 10 10 10 10 18 18 18 38 38 38
56501- 78 78 78 142 134 106 216 158 10 242 186 14
56502-246 190 14 246 190 14 156 118 10 10 10 10
56503- 90 90 90 238 238 238 253 253 253 253 253 253
56504-253 253 253 253 253 253 253 253 253 253 253 253
56505-253 253 253 253 253 253 231 231 231 250 250 250
56506-253 253 253 253 253 253 253 253 253 253 253 253
56507-253 253 253 253 253 253 253 253 253 253 253 253
56508-253 253 253 253 253 253 253 253 253 253 253 253
56509-253 253 253 253 253 253 253 253 253 246 230 190
56510-238 204 91 238 204 91 181 142 44 37 26 9
56511- 2 2 6 2 2 6 2 2 6 2 2 6
56512- 2 2 6 2 2 6 38 38 38 46 46 46
56513- 26 26 26 106 106 106 54 54 54 18 18 18
56514- 6 6 6 0 0 0 0 0 0 0 0 0
56515- 0 0 0 0 0 0 0 0 0 0 0 0
56516- 0 0 0 0 0 0 0 0 0 0 0 0
56517- 0 0 0 0 0 0 0 0 0 0 0 0
56518- 0 0 0 0 0 0 0 0 0 0 0 0
56519- 0 0 0 6 6 6 14 14 14 22 22 22
56520- 30 30 30 38 38 38 50 50 50 70 70 70
56521-106 106 106 190 142 34 226 170 11 242 186 14
56522-246 190 14 246 190 14 246 190 14 154 114 10
56523- 6 6 6 74 74 74 226 226 226 253 253 253
56524-253 253 253 253 253 253 253 253 253 253 253 253
56525-253 253 253 253 253 253 231 231 231 250 250 250
56526-253 253 253 253 253 253 253 253 253 253 253 253
56527-253 253 253 253 253 253 253 253 253 253 253 253
56528-253 253 253 253 253 253 253 253 253 253 253 253
56529-253 253 253 253 253 253 253 253 253 228 184 62
56530-241 196 14 241 208 19 232 195 16 38 30 10
56531- 2 2 6 2 2 6 2 2 6 2 2 6
56532- 2 2 6 6 6 6 30 30 30 26 26 26
56533-203 166 17 154 142 90 66 66 66 26 26 26
56534- 6 6 6 0 0 0 0 0 0 0 0 0
56535- 0 0 0 0 0 0 0 0 0 0 0 0
56536- 0 0 0 0 0 0 0 0 0 0 0 0
56537- 0 0 0 0 0 0 0 0 0 0 0 0
56538- 0 0 0 0 0 0 0 0 0 0 0 0
56539- 6 6 6 18 18 18 38 38 38 58 58 58
56540- 78 78 78 86 86 86 101 101 101 123 123 123
56541-175 146 61 210 150 10 234 174 13 246 186 14
56542-246 190 14 246 190 14 246 190 14 238 190 10
56543-102 78 10 2 2 6 46 46 46 198 198 198
56544-253 253 253 253 253 253 253 253 253 253 253 253
56545-253 253 253 253 253 253 234 234 234 242 242 242
56546-253 253 253 253 253 253 253 253 253 253 253 253
56547-253 253 253 253 253 253 253 253 253 253 253 253
56548-253 253 253 253 253 253 253 253 253 253 253 253
56549-253 253 253 253 253 253 253 253 253 224 178 62
56550-242 186 14 241 196 14 210 166 10 22 18 6
56551- 2 2 6 2 2 6 2 2 6 2 2 6
56552- 2 2 6 2 2 6 6 6 6 121 92 8
56553-238 202 15 232 195 16 82 82 82 34 34 34
56554- 10 10 10 0 0 0 0 0 0 0 0 0
56555- 0 0 0 0 0 0 0 0 0 0 0 0
56556- 0 0 0 0 0 0 0 0 0 0 0 0
56557- 0 0 0 0 0 0 0 0 0 0 0 0
56558- 0 0 0 0 0 0 0 0 0 0 0 0
56559- 14 14 14 38 38 38 70 70 70 154 122 46
56560-190 142 34 200 144 11 197 138 11 197 138 11
56561-213 154 11 226 170 11 242 186 14 246 190 14
56562-246 190 14 246 190 14 246 190 14 246 190 14
56563-225 175 15 46 32 6 2 2 6 22 22 22
56564-158 158 158 250 250 250 253 253 253 253 253 253
56565-253 253 253 253 253 253 253 253 253 253 253 253
56566-253 253 253 253 253 253 253 253 253 253 253 253
56567-253 253 253 253 253 253 253 253 253 253 253 253
56568-253 253 253 253 253 253 253 253 253 253 253 253
56569-253 253 253 250 250 250 242 242 242 224 178 62
56570-239 182 13 236 186 11 213 154 11 46 32 6
56571- 2 2 6 2 2 6 2 2 6 2 2 6
56572- 2 2 6 2 2 6 61 42 6 225 175 15
56573-238 190 10 236 186 11 112 100 78 42 42 42
56574- 14 14 14 0 0 0 0 0 0 0 0 0
56575- 0 0 0 0 0 0 0 0 0 0 0 0
56576- 0 0 0 0 0 0 0 0 0 0 0 0
56577- 0 0 0 0 0 0 0 0 0 0 0 0
56578- 0 0 0 0 0 0 0 0 0 6 6 6
56579- 22 22 22 54 54 54 154 122 46 213 154 11
56580-226 170 11 230 174 11 226 170 11 226 170 11
56581-236 178 12 242 186 14 246 190 14 246 190 14
56582-246 190 14 246 190 14 246 190 14 246 190 14
56583-241 196 14 184 144 12 10 10 10 2 2 6
56584- 6 6 6 116 116 116 242 242 242 253 253 253
56585-253 253 253 253 253 253 253 253 253 253 253 253
56586-253 253 253 253 253 253 253 253 253 253 253 253
56587-253 253 253 253 253 253 253 253 253 253 253 253
56588-253 253 253 253 253 253 253 253 253 253 253 253
56589-253 253 253 231 231 231 198 198 198 214 170 54
56590-236 178 12 236 178 12 210 150 10 137 92 6
56591- 18 14 6 2 2 6 2 2 6 2 2 6
56592- 6 6 6 70 47 6 200 144 11 236 178 12
56593-239 182 13 239 182 13 124 112 88 58 58 58
56594- 22 22 22 6 6 6 0 0 0 0 0 0
56595- 0 0 0 0 0 0 0 0 0 0 0 0
56596- 0 0 0 0 0 0 0 0 0 0 0 0
56597- 0 0 0 0 0 0 0 0 0 0 0 0
56598- 0 0 0 0 0 0 0 0 0 10 10 10
56599- 30 30 30 70 70 70 180 133 36 226 170 11
56600-239 182 13 242 186 14 242 186 14 246 186 14
56601-246 190 14 246 190 14 246 190 14 246 190 14
56602-246 190 14 246 190 14 246 190 14 246 190 14
56603-246 190 14 232 195 16 98 70 6 2 2 6
56604- 2 2 6 2 2 6 66 66 66 221 221 221
56605-253 253 253 253 253 253 253 253 253 253 253 253
56606-253 253 253 253 253 253 253 253 253 253 253 253
56607-253 253 253 253 253 253 253 253 253 253 253 253
56608-253 253 253 253 253 253 253 253 253 253 253 253
56609-253 253 253 206 206 206 198 198 198 214 166 58
56610-230 174 11 230 174 11 216 158 10 192 133 9
56611-163 110 8 116 81 8 102 78 10 116 81 8
56612-167 114 7 197 138 11 226 170 11 239 182 13
56613-242 186 14 242 186 14 162 146 94 78 78 78
56614- 34 34 34 14 14 14 6 6 6 0 0 0
56615- 0 0 0 0 0 0 0 0 0 0 0 0
56616- 0 0 0 0 0 0 0 0 0 0 0 0
56617- 0 0 0 0 0 0 0 0 0 0 0 0
56618- 0 0 0 0 0 0 0 0 0 6 6 6
56619- 30 30 30 78 78 78 190 142 34 226 170 11
56620-239 182 13 246 190 14 246 190 14 246 190 14
56621-246 190 14 246 190 14 246 190 14 246 190 14
56622-246 190 14 246 190 14 246 190 14 246 190 14
56623-246 190 14 241 196 14 203 166 17 22 18 6
56624- 2 2 6 2 2 6 2 2 6 38 38 38
56625-218 218 218 253 253 253 253 253 253 253 253 253
56626-253 253 253 253 253 253 253 253 253 253 253 253
56627-253 253 253 253 253 253 253 253 253 253 253 253
56628-253 253 253 253 253 253 253 253 253 253 253 253
56629-250 250 250 206 206 206 198 198 198 202 162 69
56630-226 170 11 236 178 12 224 166 10 210 150 10
56631-200 144 11 197 138 11 192 133 9 197 138 11
56632-210 150 10 226 170 11 242 186 14 246 190 14
56633-246 190 14 246 186 14 225 175 15 124 112 88
56634- 62 62 62 30 30 30 14 14 14 6 6 6
56635- 0 0 0 0 0 0 0 0 0 0 0 0
56636- 0 0 0 0 0 0 0 0 0 0 0 0
56637- 0 0 0 0 0 0 0 0 0 0 0 0
56638- 0 0 0 0 0 0 0 0 0 10 10 10
56639- 30 30 30 78 78 78 174 135 50 224 166 10
56640-239 182 13 246 190 14 246 190 14 246 190 14
56641-246 190 14 246 190 14 246 190 14 246 190 14
56642-246 190 14 246 190 14 246 190 14 246 190 14
56643-246 190 14 246 190 14 241 196 14 139 102 15
56644- 2 2 6 2 2 6 2 2 6 2 2 6
56645- 78 78 78 250 250 250 253 253 253 253 253 253
56646-253 253 253 253 253 253 253 253 253 253 253 253
56647-253 253 253 253 253 253 253 253 253 253 253 253
56648-253 253 253 253 253 253 253 253 253 253 253 253
56649-250 250 250 214 214 214 198 198 198 190 150 46
56650-219 162 10 236 178 12 234 174 13 224 166 10
56651-216 158 10 213 154 11 213 154 11 216 158 10
56652-226 170 11 239 182 13 246 190 14 246 190 14
56653-246 190 14 246 190 14 242 186 14 206 162 42
56654-101 101 101 58 58 58 30 30 30 14 14 14
56655- 6 6 6 0 0 0 0 0 0 0 0 0
56656- 0 0 0 0 0 0 0 0 0 0 0 0
56657- 0 0 0 0 0 0 0 0 0 0 0 0
56658- 0 0 0 0 0 0 0 0 0 10 10 10
56659- 30 30 30 74 74 74 174 135 50 216 158 10
56660-236 178 12 246 190 14 246 190 14 246 190 14
56661-246 190 14 246 190 14 246 190 14 246 190 14
56662-246 190 14 246 190 14 246 190 14 246 190 14
56663-246 190 14 246 190 14 241 196 14 226 184 13
56664- 61 42 6 2 2 6 2 2 6 2 2 6
56665- 22 22 22 238 238 238 253 253 253 253 253 253
56666-253 253 253 253 253 253 253 253 253 253 253 253
56667-253 253 253 253 253 253 253 253 253 253 253 253
56668-253 253 253 253 253 253 253 253 253 253 253 253
56669-253 253 253 226 226 226 187 187 187 180 133 36
56670-216 158 10 236 178 12 239 182 13 236 178 12
56671-230 174 11 226 170 11 226 170 11 230 174 11
56672-236 178 12 242 186 14 246 190 14 246 190 14
56673-246 190 14 246 190 14 246 186 14 239 182 13
56674-206 162 42 106 106 106 66 66 66 34 34 34
56675- 14 14 14 6 6 6 0 0 0 0 0 0
56676- 0 0 0 0 0 0 0 0 0 0 0 0
56677- 0 0 0 0 0 0 0 0 0 0 0 0
56678- 0 0 0 0 0 0 0 0 0 6 6 6
56679- 26 26 26 70 70 70 163 133 67 213 154 11
56680-236 178 12 246 190 14 246 190 14 246 190 14
56681-246 190 14 246 190 14 246 190 14 246 190 14
56682-246 190 14 246 190 14 246 190 14 246 190 14
56683-246 190 14 246 190 14 246 190 14 241 196 14
56684-190 146 13 18 14 6 2 2 6 2 2 6
56685- 46 46 46 246 246 246 253 253 253 253 253 253
56686-253 253 253 253 253 253 253 253 253 253 253 253
56687-253 253 253 253 253 253 253 253 253 253 253 253
56688-253 253 253 253 253 253 253 253 253 253 253 253
56689-253 253 253 221 221 221 86 86 86 156 107 11
56690-216 158 10 236 178 12 242 186 14 246 186 14
56691-242 186 14 239 182 13 239 182 13 242 186 14
56692-242 186 14 246 186 14 246 190 14 246 190 14
56693-246 190 14 246 190 14 246 190 14 246 190 14
56694-242 186 14 225 175 15 142 122 72 66 66 66
56695- 30 30 30 10 10 10 0 0 0 0 0 0
56696- 0 0 0 0 0 0 0 0 0 0 0 0
56697- 0 0 0 0 0 0 0 0 0 0 0 0
56698- 0 0 0 0 0 0 0 0 0 6 6 6
56699- 26 26 26 70 70 70 163 133 67 210 150 10
56700-236 178 12 246 190 14 246 190 14 246 190 14
56701-246 190 14 246 190 14 246 190 14 246 190 14
56702-246 190 14 246 190 14 246 190 14 246 190 14
56703-246 190 14 246 190 14 246 190 14 246 190 14
56704-232 195 16 121 92 8 34 34 34 106 106 106
56705-221 221 221 253 253 253 253 253 253 253 253 253
56706-253 253 253 253 253 253 253 253 253 253 253 253
56707-253 253 253 253 253 253 253 253 253 253 253 253
56708-253 253 253 253 253 253 253 253 253 253 253 253
56709-242 242 242 82 82 82 18 14 6 163 110 8
56710-216 158 10 236 178 12 242 186 14 246 190 14
56711-246 190 14 246 190 14 246 190 14 246 190 14
56712-246 190 14 246 190 14 246 190 14 246 190 14
56713-246 190 14 246 190 14 246 190 14 246 190 14
56714-246 190 14 246 190 14 242 186 14 163 133 67
56715- 46 46 46 18 18 18 6 6 6 0 0 0
56716- 0 0 0 0 0 0 0 0 0 0 0 0
56717- 0 0 0 0 0 0 0 0 0 0 0 0
56718- 0 0 0 0 0 0 0 0 0 10 10 10
56719- 30 30 30 78 78 78 163 133 67 210 150 10
56720-236 178 12 246 186 14 246 190 14 246 190 14
56721-246 190 14 246 190 14 246 190 14 246 190 14
56722-246 190 14 246 190 14 246 190 14 246 190 14
56723-246 190 14 246 190 14 246 190 14 246 190 14
56724-241 196 14 215 174 15 190 178 144 253 253 253
56725-253 253 253 253 253 253 253 253 253 253 253 253
56726-253 253 253 253 253 253 253 253 253 253 253 253
56727-253 253 253 253 253 253 253 253 253 253 253 253
56728-253 253 253 253 253 253 253 253 253 218 218 218
56729- 58 58 58 2 2 6 22 18 6 167 114 7
56730-216 158 10 236 178 12 246 186 14 246 190 14
56731-246 190 14 246 190 14 246 190 14 246 190 14
56732-246 190 14 246 190 14 246 190 14 246 190 14
56733-246 190 14 246 190 14 246 190 14 246 190 14
56734-246 190 14 246 186 14 242 186 14 190 150 46
56735- 54 54 54 22 22 22 6 6 6 0 0 0
56736- 0 0 0 0 0 0 0 0 0 0 0 0
56737- 0 0 0 0 0 0 0 0 0 0 0 0
56738- 0 0 0 0 0 0 0 0 0 14 14 14
56739- 38 38 38 86 86 86 180 133 36 213 154 11
56740-236 178 12 246 186 14 246 190 14 246 190 14
56741-246 190 14 246 190 14 246 190 14 246 190 14
56742-246 190 14 246 190 14 246 190 14 246 190 14
56743-246 190 14 246 190 14 246 190 14 246 190 14
56744-246 190 14 232 195 16 190 146 13 214 214 214
56745-253 253 253 253 253 253 253 253 253 253 253 253
56746-253 253 253 253 253 253 253 253 253 253 253 253
56747-253 253 253 253 253 253 253 253 253 253 253 253
56748-253 253 253 250 250 250 170 170 170 26 26 26
56749- 2 2 6 2 2 6 37 26 9 163 110 8
56750-219 162 10 239 182 13 246 186 14 246 190 14
56751-246 190 14 246 190 14 246 190 14 246 190 14
56752-246 190 14 246 190 14 246 190 14 246 190 14
56753-246 190 14 246 190 14 246 190 14 246 190 14
56754-246 186 14 236 178 12 224 166 10 142 122 72
56755- 46 46 46 18 18 18 6 6 6 0 0 0
56756- 0 0 0 0 0 0 0 0 0 0 0 0
56757- 0 0 0 0 0 0 0 0 0 0 0 0
56758- 0 0 0 0 0 0 6 6 6 18 18 18
56759- 50 50 50 109 106 95 192 133 9 224 166 10
56760-242 186 14 246 190 14 246 190 14 246 190 14
56761-246 190 14 246 190 14 246 190 14 246 190 14
56762-246 190 14 246 190 14 246 190 14 246 190 14
56763-246 190 14 246 190 14 246 190 14 246 190 14
56764-242 186 14 226 184 13 210 162 10 142 110 46
56765-226 226 226 253 253 253 253 253 253 253 253 253
56766-253 253 253 253 253 253 253 253 253 253 253 253
56767-253 253 253 253 253 253 253 253 253 253 253 253
56768-198 198 198 66 66 66 2 2 6 2 2 6
56769- 2 2 6 2 2 6 50 34 6 156 107 11
56770-219 162 10 239 182 13 246 186 14 246 190 14
56771-246 190 14 246 190 14 246 190 14 246 190 14
56772-246 190 14 246 190 14 246 190 14 246 190 14
56773-246 190 14 246 190 14 246 190 14 242 186 14
56774-234 174 13 213 154 11 154 122 46 66 66 66
56775- 30 30 30 10 10 10 0 0 0 0 0 0
56776- 0 0 0 0 0 0 0 0 0 0 0 0
56777- 0 0 0 0 0 0 0 0 0 0 0 0
56778- 0 0 0 0 0 0 6 6 6 22 22 22
56779- 58 58 58 154 121 60 206 145 10 234 174 13
56780-242 186 14 246 186 14 246 190 14 246 190 14
56781-246 190 14 246 190 14 246 190 14 246 190 14
56782-246 190 14 246 190 14 246 190 14 246 190 14
56783-246 190 14 246 190 14 246 190 14 246 190 14
56784-246 186 14 236 178 12 210 162 10 163 110 8
56785- 61 42 6 138 138 138 218 218 218 250 250 250
56786-253 253 253 253 253 253 253 253 253 250 250 250
56787-242 242 242 210 210 210 144 144 144 66 66 66
56788- 6 6 6 2 2 6 2 2 6 2 2 6
56789- 2 2 6 2 2 6 61 42 6 163 110 8
56790-216 158 10 236 178 12 246 190 14 246 190 14
56791-246 190 14 246 190 14 246 190 14 246 190 14
56792-246 190 14 246 190 14 246 190 14 246 190 14
56793-246 190 14 239 182 13 230 174 11 216 158 10
56794-190 142 34 124 112 88 70 70 70 38 38 38
56795- 18 18 18 6 6 6 0 0 0 0 0 0
56796- 0 0 0 0 0 0 0 0 0 0 0 0
56797- 0 0 0 0 0 0 0 0 0 0 0 0
56798- 0 0 0 0 0 0 6 6 6 22 22 22
56799- 62 62 62 168 124 44 206 145 10 224 166 10
56800-236 178 12 239 182 13 242 186 14 242 186 14
56801-246 186 14 246 190 14 246 190 14 246 190 14
56802-246 190 14 246 190 14 246 190 14 246 190 14
56803-246 190 14 246 190 14 246 190 14 246 190 14
56804-246 190 14 236 178 12 216 158 10 175 118 6
56805- 80 54 7 2 2 6 6 6 6 30 30 30
56806- 54 54 54 62 62 62 50 50 50 38 38 38
56807- 14 14 14 2 2 6 2 2 6 2 2 6
56808- 2 2 6 2 2 6 2 2 6 2 2 6
56809- 2 2 6 6 6 6 80 54 7 167 114 7
56810-213 154 11 236 178 12 246 190 14 246 190 14
56811-246 190 14 246 190 14 246 190 14 246 190 14
56812-246 190 14 242 186 14 239 182 13 239 182 13
56813-230 174 11 210 150 10 174 135 50 124 112 88
56814- 82 82 82 54 54 54 34 34 34 18 18 18
56815- 6 6 6 0 0 0 0 0 0 0 0 0
56816- 0 0 0 0 0 0 0 0 0 0 0 0
56817- 0 0 0 0 0 0 0 0 0 0 0 0
56818- 0 0 0 0 0 0 6 6 6 18 18 18
56819- 50 50 50 158 118 36 192 133 9 200 144 11
56820-216 158 10 219 162 10 224 166 10 226 170 11
56821-230 174 11 236 178 12 239 182 13 239 182 13
56822-242 186 14 246 186 14 246 190 14 246 190 14
56823-246 190 14 246 190 14 246 190 14 246 190 14
56824-246 186 14 230 174 11 210 150 10 163 110 8
56825-104 69 6 10 10 10 2 2 6 2 2 6
56826- 2 2 6 2 2 6 2 2 6 2 2 6
56827- 2 2 6 2 2 6 2 2 6 2 2 6
56828- 2 2 6 2 2 6 2 2 6 2 2 6
56829- 2 2 6 6 6 6 91 60 6 167 114 7
56830-206 145 10 230 174 11 242 186 14 246 190 14
56831-246 190 14 246 190 14 246 186 14 242 186 14
56832-239 182 13 230 174 11 224 166 10 213 154 11
56833-180 133 36 124 112 88 86 86 86 58 58 58
56834- 38 38 38 22 22 22 10 10 10 6 6 6
56835- 0 0 0 0 0 0 0 0 0 0 0 0
56836- 0 0 0 0 0 0 0 0 0 0 0 0
56837- 0 0 0 0 0 0 0 0 0 0 0 0
56838- 0 0 0 0 0 0 0 0 0 14 14 14
56839- 34 34 34 70 70 70 138 110 50 158 118 36
56840-167 114 7 180 123 7 192 133 9 197 138 11
56841-200 144 11 206 145 10 213 154 11 219 162 10
56842-224 166 10 230 174 11 239 182 13 242 186 14
56843-246 186 14 246 186 14 246 186 14 246 186 14
56844-239 182 13 216 158 10 185 133 11 152 99 6
56845-104 69 6 18 14 6 2 2 6 2 2 6
56846- 2 2 6 2 2 6 2 2 6 2 2 6
56847- 2 2 6 2 2 6 2 2 6 2 2 6
56848- 2 2 6 2 2 6 2 2 6 2 2 6
56849- 2 2 6 6 6 6 80 54 7 152 99 6
56850-192 133 9 219 162 10 236 178 12 239 182 13
56851-246 186 14 242 186 14 239 182 13 236 178 12
56852-224 166 10 206 145 10 192 133 9 154 121 60
56853- 94 94 94 62 62 62 42 42 42 22 22 22
56854- 14 14 14 6 6 6 0 0 0 0 0 0
56855- 0 0 0 0 0 0 0 0 0 0 0 0
56856- 0 0 0 0 0 0 0 0 0 0 0 0
56857- 0 0 0 0 0 0 0 0 0 0 0 0
56858- 0 0 0 0 0 0 0 0 0 6 6 6
56859- 18 18 18 34 34 34 58 58 58 78 78 78
56860-101 98 89 124 112 88 142 110 46 156 107 11
56861-163 110 8 167 114 7 175 118 6 180 123 7
56862-185 133 11 197 138 11 210 150 10 219 162 10
56863-226 170 11 236 178 12 236 178 12 234 174 13
56864-219 162 10 197 138 11 163 110 8 130 83 6
56865- 91 60 6 10 10 10 2 2 6 2 2 6
56866- 18 18 18 38 38 38 38 38 38 38 38 38
56867- 38 38 38 38 38 38 38 38 38 38 38 38
56868- 38 38 38 38 38 38 26 26 26 2 2 6
56869- 2 2 6 6 6 6 70 47 6 137 92 6
56870-175 118 6 200 144 11 219 162 10 230 174 11
56871-234 174 13 230 174 11 219 162 10 210 150 10
56872-192 133 9 163 110 8 124 112 88 82 82 82
56873- 50 50 50 30 30 30 14 14 14 6 6 6
56874- 0 0 0 0 0 0 0 0 0 0 0 0
56875- 0 0 0 0 0 0 0 0 0 0 0 0
56876- 0 0 0 0 0 0 0 0 0 0 0 0
56877- 0 0 0 0 0 0 0 0 0 0 0 0
56878- 0 0 0 0 0 0 0 0 0 0 0 0
56879- 6 6 6 14 14 14 22 22 22 34 34 34
56880- 42 42 42 58 58 58 74 74 74 86 86 86
56881-101 98 89 122 102 70 130 98 46 121 87 25
56882-137 92 6 152 99 6 163 110 8 180 123 7
56883-185 133 11 197 138 11 206 145 10 200 144 11
56884-180 123 7 156 107 11 130 83 6 104 69 6
56885- 50 34 6 54 54 54 110 110 110 101 98 89
56886- 86 86 86 82 82 82 78 78 78 78 78 78
56887- 78 78 78 78 78 78 78 78 78 78 78 78
56888- 78 78 78 82 82 82 86 86 86 94 94 94
56889-106 106 106 101 101 101 86 66 34 124 80 6
56890-156 107 11 180 123 7 192 133 9 200 144 11
56891-206 145 10 200 144 11 192 133 9 175 118 6
56892-139 102 15 109 106 95 70 70 70 42 42 42
56893- 22 22 22 10 10 10 0 0 0 0 0 0
56894- 0 0 0 0 0 0 0 0 0 0 0 0
56895- 0 0 0 0 0 0 0 0 0 0 0 0
56896- 0 0 0 0 0 0 0 0 0 0 0 0
56897- 0 0 0 0 0 0 0 0 0 0 0 0
56898- 0 0 0 0 0 0 0 0 0 0 0 0
56899- 0 0 0 0 0 0 6 6 6 10 10 10
56900- 14 14 14 22 22 22 30 30 30 38 38 38
56901- 50 50 50 62 62 62 74 74 74 90 90 90
56902-101 98 89 112 100 78 121 87 25 124 80 6
56903-137 92 6 152 99 6 152 99 6 152 99 6
56904-138 86 6 124 80 6 98 70 6 86 66 30
56905-101 98 89 82 82 82 58 58 58 46 46 46
56906- 38 38 38 34 34 34 34 34 34 34 34 34
56907- 34 34 34 34 34 34 34 34 34 34 34 34
56908- 34 34 34 34 34 34 38 38 38 42 42 42
56909- 54 54 54 82 82 82 94 86 76 91 60 6
56910-134 86 6 156 107 11 167 114 7 175 118 6
56911-175 118 6 167 114 7 152 99 6 121 87 25
56912-101 98 89 62 62 62 34 34 34 18 18 18
56913- 6 6 6 0 0 0 0 0 0 0 0 0
56914- 0 0 0 0 0 0 0 0 0 0 0 0
56915- 0 0 0 0 0 0 0 0 0 0 0 0
56916- 0 0 0 0 0 0 0 0 0 0 0 0
56917- 0 0 0 0 0 0 0 0 0 0 0 0
56918- 0 0 0 0 0 0 0 0 0 0 0 0
56919- 0 0 0 0 0 0 0 0 0 0 0 0
56920- 0 0 0 6 6 6 6 6 6 10 10 10
56921- 18 18 18 22 22 22 30 30 30 42 42 42
56922- 50 50 50 66 66 66 86 86 86 101 98 89
56923-106 86 58 98 70 6 104 69 6 104 69 6
56924-104 69 6 91 60 6 82 62 34 90 90 90
56925- 62 62 62 38 38 38 22 22 22 14 14 14
56926- 10 10 10 10 10 10 10 10 10 10 10 10
56927- 10 10 10 10 10 10 6 6 6 10 10 10
56928- 10 10 10 10 10 10 10 10 10 14 14 14
56929- 22 22 22 42 42 42 70 70 70 89 81 66
56930- 80 54 7 104 69 6 124 80 6 137 92 6
56931-134 86 6 116 81 8 100 82 52 86 86 86
56932- 58 58 58 30 30 30 14 14 14 6 6 6
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 0 0 0 0 0 0 0 0 0 0 0 0
56937- 0 0 0 0 0 0 0 0 0 0 0 0
56938- 0 0 0 0 0 0 0 0 0 0 0 0
56939- 0 0 0 0 0 0 0 0 0 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 6 6 6 10 10 10 14 14 14
56942- 18 18 18 26 26 26 38 38 38 54 54 54
56943- 70 70 70 86 86 86 94 86 76 89 81 66
56944- 89 81 66 86 86 86 74 74 74 50 50 50
56945- 30 30 30 14 14 14 6 6 6 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 6 6 6 18 18 18 34 34 34 58 58 58
56950- 82 82 82 89 81 66 89 81 66 89 81 66
56951- 94 86 66 94 86 76 74 74 74 50 50 50
56952- 26 26 26 14 14 14 6 6 6 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 0 0 0 0 0 0 0 0 0
56957- 0 0 0 0 0 0 0 0 0 0 0 0
56958- 0 0 0 0 0 0 0 0 0 0 0 0
56959- 0 0 0 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 6 6 6 6 6 6 14 14 14 18 18 18
56963- 30 30 30 38 38 38 46 46 46 54 54 54
56964- 50 50 50 42 42 42 30 30 30 18 18 18
56965- 10 10 10 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 6 6 6 14 14 14 26 26 26
56970- 38 38 38 50 50 50 58 58 58 58 58 58
56971- 54 54 54 42 42 42 30 30 30 18 18 18
56972- 10 10 10 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 0 0 0
56977- 0 0 0 0 0 0 0 0 0 0 0 0
56978- 0 0 0 0 0 0 0 0 0 0 0 0
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 6 6 6
56983- 6 6 6 10 10 10 14 14 14 18 18 18
56984- 18 18 18 14 14 14 10 10 10 6 6 6
56985- 0 0 0 0 0 0 0 0 0 0 0 0
56986- 0 0 0 0 0 0 0 0 0 0 0 0
56987- 0 0 0 0 0 0 0 0 0 0 0 0
56988- 0 0 0 0 0 0 0 0 0 0 0 0
56989- 0 0 0 0 0 0 0 0 0 6 6 6
56990- 14 14 14 18 18 18 22 22 22 22 22 22
56991- 18 18 18 14 14 14 10 10 10 6 6 6
56992- 0 0 0 0 0 0 0 0 0 0 0 0
56993- 0 0 0 0 0 0 0 0 0 0 0 0
56994- 0 0 0 0 0 0 0 0 0 0 0 0
56995- 0 0 0 0 0 0 0 0 0 0 0 0
56996- 0 0 0 0 0 0 0 0 0 0 0 0
56997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57010+4 4 4 4 4 4
57011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57024+4 4 4 4 4 4
57025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57038+4 4 4 4 4 4
57039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57052+4 4 4 4 4 4
57053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57066+4 4 4 4 4 4
57067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57080+4 4 4 4 4 4
57081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57085+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57086+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57090+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57091+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57092+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57094+4 4 4 4 4 4
57095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57099+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57100+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57101+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57104+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57105+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57106+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57107+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57108+4 4 4 4 4 4
57109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57113+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57114+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57115+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57118+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57119+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57120+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57121+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57122+4 4 4 4 4 4
57123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57126+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57127+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57128+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57129+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57131+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57132+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57133+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57134+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57135+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57136+4 4 4 4 4 4
57137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57140+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57141+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57142+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57143+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57144+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57145+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57146+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57147+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57148+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57149+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57150+4 4 4 4 4 4
57151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57154+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57155+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57156+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57157+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57158+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57159+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57160+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57161+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57162+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57163+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57164+4 4 4 4 4 4
57165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57167+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57168+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57169+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57170+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57171+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57172+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57173+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57174+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57175+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57176+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57177+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57178+4 4 4 4 4 4
57179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57181+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57182+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57183+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57184+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57185+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57186+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57187+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57188+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57189+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57190+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57191+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57192+4 4 4 4 4 4
57193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57195+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57196+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57197+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57198+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57199+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57200+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57201+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57202+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57203+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57204+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57205+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57206+4 4 4 4 4 4
57207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57209+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57210+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57211+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57212+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57213+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57214+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57215+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57216+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57217+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57218+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57219+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57220+4 4 4 4 4 4
57221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57222+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57223+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57224+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57225+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57226+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57227+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57228+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57229+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57230+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57231+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57232+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57233+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57234+4 4 4 4 4 4
57235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57236+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57237+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57238+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57239+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57240+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57241+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57242+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57243+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57244+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57245+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57246+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57247+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57248+0 0 0 4 4 4
57249+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57250+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57251+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57252+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57253+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57254+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57255+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57256+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57257+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57258+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57259+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57260+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57261+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57262+2 0 0 0 0 0
57263+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57264+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57265+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57266+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57267+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57268+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57269+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57270+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57271+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57272+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57273+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57274+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57275+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57276+37 38 37 0 0 0
57277+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57278+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57279+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57280+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57281+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57282+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57283+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57284+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57285+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57286+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57287+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57288+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57289+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57290+85 115 134 4 0 0
57291+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57292+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57293+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57294+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57295+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57296+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57297+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57298+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57299+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57300+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57301+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57302+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57303+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57304+60 73 81 4 0 0
57305+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57306+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57307+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57308+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57309+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57310+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57311+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57312+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57313+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57314+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57315+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57316+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57317+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57318+16 19 21 4 0 0
57319+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57320+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57321+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57322+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57323+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57324+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57325+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57326+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57327+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57328+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57329+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57330+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57331+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57332+4 0 0 4 3 3
57333+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57334+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57335+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57337+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57338+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57339+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57340+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57341+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57342+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57343+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57344+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57345+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57346+3 2 2 4 4 4
57347+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57348+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57349+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57350+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57351+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57352+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57353+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57354+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57355+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57356+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57357+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57358+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57359+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57360+4 4 4 4 4 4
57361+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57362+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57363+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57364+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57365+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57366+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57367+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57368+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57369+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57370+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57371+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57372+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57373+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57374+4 4 4 4 4 4
57375+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57376+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57377+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57378+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57379+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57380+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57381+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57382+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57383+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57384+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57385+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57386+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57387+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57388+5 5 5 5 5 5
57389+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57390+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57391+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57392+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57393+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57394+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57395+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57396+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57397+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57398+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57399+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57400+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57401+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57402+5 5 5 4 4 4
57403+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57404+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57405+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57406+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57407+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57408+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57409+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57410+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57411+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57412+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57413+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57414+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57416+4 4 4 4 4 4
57417+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57418+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57419+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57420+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57421+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57422+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57423+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57424+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57425+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57426+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57427+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57428+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57430+4 4 4 4 4 4
57431+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57432+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57433+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57434+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57435+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57436+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57437+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57438+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57439+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57440+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57441+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57444+4 4 4 4 4 4
57445+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57446+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57447+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57448+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57449+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57450+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57451+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57452+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57453+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57454+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57455+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57458+4 4 4 4 4 4
57459+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57460+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57461+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57462+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57463+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57464+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57465+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57466+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57467+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57468+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57469+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57472+4 4 4 4 4 4
57473+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57474+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57475+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57476+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57477+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57478+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57479+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57480+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57481+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57482+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57483+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57486+4 4 4 4 4 4
57487+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57488+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57489+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57490+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57491+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57492+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57493+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57494+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57495+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57496+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57497+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57500+4 4 4 4 4 4
57501+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57502+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57503+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57504+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57505+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57506+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57507+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57508+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57509+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57510+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57511+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57514+4 4 4 4 4 4
57515+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57516+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57517+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57518+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57519+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57520+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57521+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57522+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57523+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57524+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57525+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57528+4 4 4 4 4 4
57529+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57530+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57531+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57532+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57533+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57534+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57535+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57536+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57537+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57538+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57539+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57542+4 4 4 4 4 4
57543+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57544+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57545+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57546+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57547+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57548+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57549+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57550+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57551+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57552+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57553+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57556+4 4 4 4 4 4
57557+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57558+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57559+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57560+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57561+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57562+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57563+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57564+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57565+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57566+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57567+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57570+4 4 4 4 4 4
57571+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57572+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57573+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57574+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57575+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57576+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57577+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57578+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57579+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57580+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57581+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57584+4 4 4 4 4 4
57585+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57586+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57587+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57588+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57589+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57590+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57591+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57592+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57593+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57594+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57595+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57598+4 4 4 4 4 4
57599+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57600+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57601+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57602+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57603+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57604+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57605+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57606+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57607+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57608+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57609+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57612+4 4 4 4 4 4
57613+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57614+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57615+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57616+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57617+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57618+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57619+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57620+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57621+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57622+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57623+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57626+4 4 4 4 4 4
57627+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57628+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57629+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57630+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57631+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57632+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57633+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57634+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57635+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57636+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57637+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57640+4 4 4 4 4 4
57641+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57642+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57643+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57644+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57645+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57646+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57647+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57648+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57649+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57650+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57651+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57654+4 4 4 4 4 4
57655+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57656+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57657+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57658+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57659+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57660+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57661+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57662+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57663+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57664+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57665+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57668+4 4 4 4 4 4
57669+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57670+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57671+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57672+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57673+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57674+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57675+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57676+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57677+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57678+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57679+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57682+4 4 4 4 4 4
57683+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57684+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57685+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57686+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57687+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57688+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57689+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57690+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57691+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57692+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57693+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57696+4 4 4 4 4 4
57697+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57698+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57699+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57700+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57701+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57702+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57703+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57704+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57705+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57706+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57707+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57710+4 4 4 4 4 4
57711+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57712+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57713+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57714+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57715+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57716+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57717+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57718+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57719+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57720+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57721+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57724+4 4 4 4 4 4
57725+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57726+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57727+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57728+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57729+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57730+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57731+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57732+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57733+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57734+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57735+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57738+4 4 4 4 4 4
57739+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57740+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57741+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57742+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57743+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57744+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57745+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57746+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57747+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57748+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57749+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57752+4 4 4 4 4 4
57753+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57754+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57755+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57756+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57757+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57758+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57759+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57760+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57761+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57762+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57763+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57766+4 4 4 4 4 4
57767+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57768+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57769+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57770+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57771+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57772+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57773+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57774+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57775+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57776+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57777+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57780+4 4 4 4 4 4
57781+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57782+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57783+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57784+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57785+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57786+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57787+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57788+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57789+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57790+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57791+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57794+4 4 4 4 4 4
57795+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57796+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57797+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57798+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57799+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57800+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57801+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57802+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57803+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57804+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57805+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57808+4 4 4 4 4 4
57809+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57810+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57811+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57812+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57813+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57814+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57815+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57816+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57817+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57818+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57822+4 4 4 4 4 4
57823+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57824+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57825+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57826+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57827+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57828+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57829+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57830+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57831+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57832+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57836+4 4 4 4 4 4
57837+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57838+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57839+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57840+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57841+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57842+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57843+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57844+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57845+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57846+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57850+4 4 4 4 4 4
57851+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57852+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57853+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57854+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57855+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57856+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57857+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57858+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57859+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57860+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57864+4 4 4 4 4 4
57865+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57866+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57867+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57868+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57869+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57870+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57871+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57872+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57873+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4
57879+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57880+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57881+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57882+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57883+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57884+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57885+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57886+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57887+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4
57893+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57894+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57895+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57896+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57897+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57898+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57899+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57900+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57901+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4
57907+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57908+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57909+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57910+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57911+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57912+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57913+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57914+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4
57921+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57922+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57923+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57924+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57925+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57926+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57927+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57928+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57936+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57937+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57938+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57939+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57940+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57941+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57942+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57948+4 4 4 4 4 4
57949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57950+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57951+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57952+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57953+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57954+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57955+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57956+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57965+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57966+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57967+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57968+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57969+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57970+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57980+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57981+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57982+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57983+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57984+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57990+4 4 4 4 4 4
57991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57994+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57995+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57996+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57997+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58004+4 4 4 4 4 4
58005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58009+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58010+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58011+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58018+4 4 4 4 4 4
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58023+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58024+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58025+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58037+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58038+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58039+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4
58047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58051+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58052+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58053+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4
58061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58066+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58067+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+4 4 4 4 4 4
58075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58080+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58081+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58088+4 4 4 4 4 4
58089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58094+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58107+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58108+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4
58117diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58118index fef20db..d28b1ab 100644
58119--- a/drivers/xen/xenfs/xenstored.c
58120+++ b/drivers/xen/xenfs/xenstored.c
58121@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58122 static int xsd_kva_open(struct inode *inode, struct file *file)
58123 {
58124 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58125+#ifdef CONFIG_GRKERNSEC_HIDESYM
58126+ NULL);
58127+#else
58128 xen_store_interface);
58129+#endif
58130+
58131 if (!file->private_data)
58132 return -ENOMEM;
58133 return 0;
58134diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58135index eb14e05..5156de7 100644
58136--- a/fs/9p/vfs_addr.c
58137+++ b/fs/9p/vfs_addr.c
58138@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58139
58140 retval = v9fs_file_write_internal(inode,
58141 v9inode->writeback_fid,
58142- (__force const char __user *)buffer,
58143+ (const char __force_user *)buffer,
58144 len, &offset, 0);
58145 if (retval > 0)
58146 retval = 0;
58147diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58148index 9ee5343..5165e3c 100644
58149--- a/fs/9p/vfs_inode.c
58150+++ b/fs/9p/vfs_inode.c
58151@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58152 void
58153 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58154 {
58155- char *s = nd_get_link(nd);
58156+ const char *s = nd_get_link(nd);
58157
58158 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58159 dentry, IS_ERR(s) ? "<error>" : s);
58160diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58161index c055d56e..a46f4f5 100644
58162--- a/fs/Kconfig.binfmt
58163+++ b/fs/Kconfig.binfmt
58164@@ -106,7 +106,7 @@ config HAVE_AOUT
58165
58166 config BINFMT_AOUT
58167 tristate "Kernel support for a.out and ECOFF binaries"
58168- depends on HAVE_AOUT
58169+ depends on HAVE_AOUT && BROKEN
58170 ---help---
58171 A.out (Assembler.OUTput) is a set of formats for libraries and
58172 executables used in the earliest versions of UNIX. Linux used
58173diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58174index 8a1d38e..300a14e 100644
58175--- a/fs/afs/inode.c
58176+++ b/fs/afs/inode.c
58177@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58178 struct afs_vnode *vnode;
58179 struct super_block *sb;
58180 struct inode *inode;
58181- static atomic_t afs_autocell_ino;
58182+ static atomic_unchecked_t afs_autocell_ino;
58183
58184 _enter("{%x:%u},%*.*s,",
58185 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58186@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58187 data.fid.unique = 0;
58188 data.fid.vnode = 0;
58189
58190- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58191+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58192 afs_iget5_autocell_test, afs_iget5_set,
58193 &data);
58194 if (!inode) {
58195diff --git a/fs/aio.c b/fs/aio.c
58196index ebd0e9b..c577c91 100644
58197--- a/fs/aio.c
58198+++ b/fs/aio.c
58199@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58200 size += sizeof(struct io_event) * nr_events;
58201
58202 nr_pages = PFN_UP(size);
58203- if (nr_pages < 0)
58204+ if (nr_pages <= 0)
58205 return -EINVAL;
58206
58207 file = aio_private_file(ctx, nr_pages);
58208diff --git a/fs/attr.c b/fs/attr.c
58209index 6530ced..4a827e2 100644
58210--- a/fs/attr.c
58211+++ b/fs/attr.c
58212@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58213 unsigned long limit;
58214
58215 limit = rlimit(RLIMIT_FSIZE);
58216+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58217 if (limit != RLIM_INFINITY && offset > limit)
58218 goto out_sig;
58219 if (offset > inode->i_sb->s_maxbytes)
58220diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58221index 116fd38..c04182da 100644
58222--- a/fs/autofs4/waitq.c
58223+++ b/fs/autofs4/waitq.c
58224@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58225 {
58226 unsigned long sigpipe, flags;
58227 mm_segment_t fs;
58228- const char *data = (const char *)addr;
58229+ const char __user *data = (const char __force_user *)addr;
58230 ssize_t wr = 0;
58231
58232 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58233@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58234 return 1;
58235 }
58236
58237+#ifdef CONFIG_GRKERNSEC_HIDESYM
58238+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58239+#endif
58240+
58241 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58242 enum autofs_notify notify)
58243 {
58244@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58245
58246 /* If this is a direct mount request create a dummy name */
58247 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58248+#ifdef CONFIG_GRKERNSEC_HIDESYM
58249+ /* this name does get written to userland via autofs4_write() */
58250+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58251+#else
58252 qstr.len = sprintf(name, "%p", dentry);
58253+#endif
58254 else {
58255 qstr.len = autofs4_getpath(sbi, dentry, &name);
58256 if (!qstr.len) {
58257diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58258index 2722387..56059b5 100644
58259--- a/fs/befs/endian.h
58260+++ b/fs/befs/endian.h
58261@@ -11,7 +11,7 @@
58262
58263 #include <asm/byteorder.h>
58264
58265-static inline u64
58266+static inline u64 __intentional_overflow(-1)
58267 fs64_to_cpu(const struct super_block *sb, fs64 n)
58268 {
58269 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58270@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58271 return (__force fs64)cpu_to_be64(n);
58272 }
58273
58274-static inline u32
58275+static inline u32 __intentional_overflow(-1)
58276 fs32_to_cpu(const struct super_block *sb, fs32 n)
58277 {
58278 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58279@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58280 return (__force fs32)cpu_to_be32(n);
58281 }
58282
58283-static inline u16
58284+static inline u16 __intentional_overflow(-1)
58285 fs16_to_cpu(const struct super_block *sb, fs16 n)
58286 {
58287 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58288diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58289index 4c55668..eeae150 100644
58290--- a/fs/binfmt_aout.c
58291+++ b/fs/binfmt_aout.c
58292@@ -16,6 +16,7 @@
58293 #include <linux/string.h>
58294 #include <linux/fs.h>
58295 #include <linux/file.h>
58296+#include <linux/security.h>
58297 #include <linux/stat.h>
58298 #include <linux/fcntl.h>
58299 #include <linux/ptrace.h>
58300@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58301 #endif
58302 # define START_STACK(u) ((void __user *)u.start_stack)
58303
58304+ memset(&dump, 0, sizeof(dump));
58305+
58306 fs = get_fs();
58307 set_fs(KERNEL_DS);
58308 has_dumped = 1;
58309@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58310
58311 /* If the size of the dump file exceeds the rlimit, then see what would happen
58312 if we wrote the stack, but not the data area. */
58313+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58314 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58315 dump.u_dsize = 0;
58316
58317 /* Make sure we have enough room to write the stack and data areas. */
58318+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58319 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58320 dump.u_ssize = 0;
58321
58322@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58323 rlim = rlimit(RLIMIT_DATA);
58324 if (rlim >= RLIM_INFINITY)
58325 rlim = ~0;
58326+
58327+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58328 if (ex.a_data + ex.a_bss > rlim)
58329 return -ENOMEM;
58330
58331@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58332
58333 install_exec_creds(bprm);
58334
58335+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58336+ current->mm->pax_flags = 0UL;
58337+#endif
58338+
58339+#ifdef CONFIG_PAX_PAGEEXEC
58340+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58341+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58342+
58343+#ifdef CONFIG_PAX_EMUTRAMP
58344+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58345+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58346+#endif
58347+
58348+#ifdef CONFIG_PAX_MPROTECT
58349+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58350+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58351+#endif
58352+
58353+ }
58354+#endif
58355+
58356 if (N_MAGIC(ex) == OMAGIC) {
58357 unsigned long text_addr, map_size;
58358 loff_t pos;
58359@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58360 return error;
58361
58362 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58363- PROT_READ | PROT_WRITE | PROT_EXEC,
58364+ PROT_READ | PROT_WRITE,
58365 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58366 fd_offset + ex.a_text);
58367 if (error != N_DATADDR(ex))
58368diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58369index 995986b..dcc4ef2 100644
58370--- a/fs/binfmt_elf.c
58371+++ b/fs/binfmt_elf.c
58372@@ -34,6 +34,7 @@
58373 #include <linux/utsname.h>
58374 #include <linux/coredump.h>
58375 #include <linux/sched.h>
58376+#include <linux/xattr.h>
58377 #include <asm/uaccess.h>
58378 #include <asm/param.h>
58379 #include <asm/page.h>
58380@@ -47,7 +48,7 @@
58381
58382 static int load_elf_binary(struct linux_binprm *bprm);
58383 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58384- int, int, unsigned long);
58385+ int, int, unsigned long) __intentional_overflow(-1);
58386
58387 #ifdef CONFIG_USELIB
58388 static int load_elf_library(struct file *);
58389@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58390 #define elf_core_dump NULL
58391 #endif
58392
58393+#ifdef CONFIG_PAX_MPROTECT
58394+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58395+#endif
58396+
58397+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58398+static void elf_handle_mmap(struct file *file);
58399+#endif
58400+
58401 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58402 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58403 #else
58404@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58405 .load_binary = load_elf_binary,
58406 .load_shlib = load_elf_library,
58407 .core_dump = elf_core_dump,
58408+
58409+#ifdef CONFIG_PAX_MPROTECT
58410+ .handle_mprotect= elf_handle_mprotect,
58411+#endif
58412+
58413+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58414+ .handle_mmap = elf_handle_mmap,
58415+#endif
58416+
58417 .min_coredump = ELF_EXEC_PAGESIZE,
58418 };
58419
58420@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58421
58422 static int set_brk(unsigned long start, unsigned long end)
58423 {
58424+ unsigned long e = end;
58425+
58426 start = ELF_PAGEALIGN(start);
58427 end = ELF_PAGEALIGN(end);
58428 if (end > start) {
58429@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58430 if (BAD_ADDR(addr))
58431 return addr;
58432 }
58433- current->mm->start_brk = current->mm->brk = end;
58434+ current->mm->start_brk = current->mm->brk = e;
58435 return 0;
58436 }
58437
58438@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58439 elf_addr_t __user *u_rand_bytes;
58440 const char *k_platform = ELF_PLATFORM;
58441 const char *k_base_platform = ELF_BASE_PLATFORM;
58442- unsigned char k_rand_bytes[16];
58443+ u32 k_rand_bytes[4];
58444 int items;
58445 elf_addr_t *elf_info;
58446 int ei_index = 0;
58447 const struct cred *cred = current_cred();
58448 struct vm_area_struct *vma;
58449+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58450
58451 /*
58452 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58453@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58454 * Generate 16 random bytes for userspace PRNG seeding.
58455 */
58456 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58457- u_rand_bytes = (elf_addr_t __user *)
58458- STACK_ALLOC(p, sizeof(k_rand_bytes));
58459+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58460+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58461+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58462+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58463+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58464+ u_rand_bytes = (elf_addr_t __user *) p;
58465 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58466 return -EFAULT;
58467
58468@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58469 return -EFAULT;
58470 current->mm->env_end = p;
58471
58472+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58473+
58474 /* Put the elf_info on the stack in the right place. */
58475 sp = (elf_addr_t __user *)envp + 1;
58476- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58477+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58478 return -EFAULT;
58479 return 0;
58480 }
58481@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58482 an ELF header */
58483
58484 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58485- struct file *interpreter, unsigned long *interp_map_addr,
58486+ struct file *interpreter,
58487 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58488 {
58489 struct elf_phdr *eppnt;
58490- unsigned long load_addr = 0;
58491+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58492 int load_addr_set = 0;
58493 unsigned long last_bss = 0, elf_bss = 0;
58494- unsigned long error = ~0UL;
58495+ unsigned long error = -EINVAL;
58496 unsigned long total_size;
58497 int i;
58498
58499@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58500 goto out;
58501 }
58502
58503+#ifdef CONFIG_PAX_SEGMEXEC
58504+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58505+ pax_task_size = SEGMEXEC_TASK_SIZE;
58506+#endif
58507+
58508 eppnt = interp_elf_phdata;
58509 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58510 if (eppnt->p_type == PT_LOAD) {
58511@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58512 map_addr = elf_map(interpreter, load_addr + vaddr,
58513 eppnt, elf_prot, elf_type, total_size);
58514 total_size = 0;
58515- if (!*interp_map_addr)
58516- *interp_map_addr = map_addr;
58517 error = map_addr;
58518 if (BAD_ADDR(map_addr))
58519 goto out;
58520@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58521 k = load_addr + eppnt->p_vaddr;
58522 if (BAD_ADDR(k) ||
58523 eppnt->p_filesz > eppnt->p_memsz ||
58524- eppnt->p_memsz > TASK_SIZE ||
58525- TASK_SIZE - eppnt->p_memsz < k) {
58526+ eppnt->p_memsz > pax_task_size ||
58527+ pax_task_size - eppnt->p_memsz < k) {
58528 error = -ENOMEM;
58529 goto out;
58530 }
58531@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58532 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58533
58534 /* Map the last of the bss segment */
58535- error = vm_brk(elf_bss, last_bss - elf_bss);
58536- if (BAD_ADDR(error))
58537- goto out;
58538+ if (last_bss > elf_bss) {
58539+ error = vm_brk(elf_bss, last_bss - elf_bss);
58540+ if (BAD_ADDR(error))
58541+ goto out;
58542+ }
58543 }
58544
58545 error = load_addr;
58546@@ -634,6 +666,336 @@ out:
58547 return error;
58548 }
58549
58550+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58551+#ifdef CONFIG_PAX_SOFTMODE
58552+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58553+{
58554+ unsigned long pax_flags = 0UL;
58555+
58556+#ifdef CONFIG_PAX_PAGEEXEC
58557+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58558+ pax_flags |= MF_PAX_PAGEEXEC;
58559+#endif
58560+
58561+#ifdef CONFIG_PAX_SEGMEXEC
58562+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58563+ pax_flags |= MF_PAX_SEGMEXEC;
58564+#endif
58565+
58566+#ifdef CONFIG_PAX_EMUTRAMP
58567+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58568+ pax_flags |= MF_PAX_EMUTRAMP;
58569+#endif
58570+
58571+#ifdef CONFIG_PAX_MPROTECT
58572+ if (elf_phdata->p_flags & PF_MPROTECT)
58573+ pax_flags |= MF_PAX_MPROTECT;
58574+#endif
58575+
58576+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58577+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58578+ pax_flags |= MF_PAX_RANDMMAP;
58579+#endif
58580+
58581+ return pax_flags;
58582+}
58583+#endif
58584+
58585+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58586+{
58587+ unsigned long pax_flags = 0UL;
58588+
58589+#ifdef CONFIG_PAX_PAGEEXEC
58590+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58591+ pax_flags |= MF_PAX_PAGEEXEC;
58592+#endif
58593+
58594+#ifdef CONFIG_PAX_SEGMEXEC
58595+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58596+ pax_flags |= MF_PAX_SEGMEXEC;
58597+#endif
58598+
58599+#ifdef CONFIG_PAX_EMUTRAMP
58600+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58601+ pax_flags |= MF_PAX_EMUTRAMP;
58602+#endif
58603+
58604+#ifdef CONFIG_PAX_MPROTECT
58605+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58606+ pax_flags |= MF_PAX_MPROTECT;
58607+#endif
58608+
58609+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58610+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58611+ pax_flags |= MF_PAX_RANDMMAP;
58612+#endif
58613+
58614+ return pax_flags;
58615+}
58616+#endif
58617+
58618+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58619+#ifdef CONFIG_PAX_SOFTMODE
58620+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58621+{
58622+ unsigned long pax_flags = 0UL;
58623+
58624+#ifdef CONFIG_PAX_PAGEEXEC
58625+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58626+ pax_flags |= MF_PAX_PAGEEXEC;
58627+#endif
58628+
58629+#ifdef CONFIG_PAX_SEGMEXEC
58630+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58631+ pax_flags |= MF_PAX_SEGMEXEC;
58632+#endif
58633+
58634+#ifdef CONFIG_PAX_EMUTRAMP
58635+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58636+ pax_flags |= MF_PAX_EMUTRAMP;
58637+#endif
58638+
58639+#ifdef CONFIG_PAX_MPROTECT
58640+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58641+ pax_flags |= MF_PAX_MPROTECT;
58642+#endif
58643+
58644+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58645+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58646+ pax_flags |= MF_PAX_RANDMMAP;
58647+#endif
58648+
58649+ return pax_flags;
58650+}
58651+#endif
58652+
58653+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58654+{
58655+ unsigned long pax_flags = 0UL;
58656+
58657+#ifdef CONFIG_PAX_PAGEEXEC
58658+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58659+ pax_flags |= MF_PAX_PAGEEXEC;
58660+#endif
58661+
58662+#ifdef CONFIG_PAX_SEGMEXEC
58663+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58664+ pax_flags |= MF_PAX_SEGMEXEC;
58665+#endif
58666+
58667+#ifdef CONFIG_PAX_EMUTRAMP
58668+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58669+ pax_flags |= MF_PAX_EMUTRAMP;
58670+#endif
58671+
58672+#ifdef CONFIG_PAX_MPROTECT
58673+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58674+ pax_flags |= MF_PAX_MPROTECT;
58675+#endif
58676+
58677+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58678+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58679+ pax_flags |= MF_PAX_RANDMMAP;
58680+#endif
58681+
58682+ return pax_flags;
58683+}
58684+#endif
58685+
58686+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58687+static unsigned long pax_parse_defaults(void)
58688+{
58689+ unsigned long pax_flags = 0UL;
58690+
58691+#ifdef CONFIG_PAX_SOFTMODE
58692+ if (pax_softmode)
58693+ return pax_flags;
58694+#endif
58695+
58696+#ifdef CONFIG_PAX_PAGEEXEC
58697+ pax_flags |= MF_PAX_PAGEEXEC;
58698+#endif
58699+
58700+#ifdef CONFIG_PAX_SEGMEXEC
58701+ pax_flags |= MF_PAX_SEGMEXEC;
58702+#endif
58703+
58704+#ifdef CONFIG_PAX_MPROTECT
58705+ pax_flags |= MF_PAX_MPROTECT;
58706+#endif
58707+
58708+#ifdef CONFIG_PAX_RANDMMAP
58709+ if (randomize_va_space)
58710+ pax_flags |= MF_PAX_RANDMMAP;
58711+#endif
58712+
58713+ return pax_flags;
58714+}
58715+
58716+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58717+{
58718+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58719+
58720+#ifdef CONFIG_PAX_EI_PAX
58721+
58722+#ifdef CONFIG_PAX_SOFTMODE
58723+ if (pax_softmode)
58724+ return pax_flags;
58725+#endif
58726+
58727+ pax_flags = 0UL;
58728+
58729+#ifdef CONFIG_PAX_PAGEEXEC
58730+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58731+ pax_flags |= MF_PAX_PAGEEXEC;
58732+#endif
58733+
58734+#ifdef CONFIG_PAX_SEGMEXEC
58735+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58736+ pax_flags |= MF_PAX_SEGMEXEC;
58737+#endif
58738+
58739+#ifdef CONFIG_PAX_EMUTRAMP
58740+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58741+ pax_flags |= MF_PAX_EMUTRAMP;
58742+#endif
58743+
58744+#ifdef CONFIG_PAX_MPROTECT
58745+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58746+ pax_flags |= MF_PAX_MPROTECT;
58747+#endif
58748+
58749+#ifdef CONFIG_PAX_ASLR
58750+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58751+ pax_flags |= MF_PAX_RANDMMAP;
58752+#endif
58753+
58754+#endif
58755+
58756+ return pax_flags;
58757+
58758+}
58759+
58760+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58761+{
58762+
58763+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58764+ unsigned long i;
58765+
58766+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58767+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58768+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58769+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58770+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58771+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58772+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58773+ return PAX_PARSE_FLAGS_FALLBACK;
58774+
58775+#ifdef CONFIG_PAX_SOFTMODE
58776+ if (pax_softmode)
58777+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58778+ else
58779+#endif
58780+
58781+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58782+ break;
58783+ }
58784+#endif
58785+
58786+ return PAX_PARSE_FLAGS_FALLBACK;
58787+}
58788+
58789+static unsigned long pax_parse_xattr_pax(struct file * const file)
58790+{
58791+
58792+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58793+ ssize_t xattr_size, i;
58794+ unsigned char xattr_value[sizeof("pemrs") - 1];
58795+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58796+
58797+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58798+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58799+ return PAX_PARSE_FLAGS_FALLBACK;
58800+
58801+ for (i = 0; i < xattr_size; i++)
58802+ switch (xattr_value[i]) {
58803+ default:
58804+ return PAX_PARSE_FLAGS_FALLBACK;
58805+
58806+#define parse_flag(option1, option2, flag) \
58807+ case option1: \
58808+ if (pax_flags_hardmode & MF_PAX_##flag) \
58809+ return PAX_PARSE_FLAGS_FALLBACK;\
58810+ pax_flags_hardmode |= MF_PAX_##flag; \
58811+ break; \
58812+ case option2: \
58813+ if (pax_flags_softmode & MF_PAX_##flag) \
58814+ return PAX_PARSE_FLAGS_FALLBACK;\
58815+ pax_flags_softmode |= MF_PAX_##flag; \
58816+ break;
58817+
58818+ parse_flag('p', 'P', PAGEEXEC);
58819+ parse_flag('e', 'E', EMUTRAMP);
58820+ parse_flag('m', 'M', MPROTECT);
58821+ parse_flag('r', 'R', RANDMMAP);
58822+ parse_flag('s', 'S', SEGMEXEC);
58823+
58824+#undef parse_flag
58825+ }
58826+
58827+ if (pax_flags_hardmode & pax_flags_softmode)
58828+ return PAX_PARSE_FLAGS_FALLBACK;
58829+
58830+#ifdef CONFIG_PAX_SOFTMODE
58831+ if (pax_softmode)
58832+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58833+ else
58834+#endif
58835+
58836+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58837+#else
58838+ return PAX_PARSE_FLAGS_FALLBACK;
58839+#endif
58840+
58841+}
58842+
58843+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58844+{
58845+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58846+
58847+ pax_flags = pax_parse_defaults();
58848+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58849+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58850+ xattr_pax_flags = pax_parse_xattr_pax(file);
58851+
58852+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58853+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58854+ pt_pax_flags != xattr_pax_flags)
58855+ return -EINVAL;
58856+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58857+ pax_flags = xattr_pax_flags;
58858+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58859+ pax_flags = pt_pax_flags;
58860+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58861+ pax_flags = ei_pax_flags;
58862+
58863+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58864+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58865+ if ((__supported_pte_mask & _PAGE_NX))
58866+ pax_flags &= ~MF_PAX_SEGMEXEC;
58867+ else
58868+ pax_flags &= ~MF_PAX_PAGEEXEC;
58869+ }
58870+#endif
58871+
58872+ if (0 > pax_check_flags(&pax_flags))
58873+ return -EINVAL;
58874+
58875+ current->mm->pax_flags = pax_flags;
58876+ return 0;
58877+}
58878+#endif
58879+
58880 /*
58881 * These are the functions used to load ELF style executables and shared
58882 * libraries. There is no binary dependent code anywhere else.
58883@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58884 {
58885 unsigned long random_variable = 0;
58886
58887+#ifdef CONFIG_PAX_RANDUSTACK
58888+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58889+ return stack_top - current->mm->delta_stack;
58890+#endif
58891+
58892 if ((current->flags & PF_RANDOMIZE) &&
58893 !(current->personality & ADDR_NO_RANDOMIZE)) {
58894 random_variable = (unsigned long) get_random_int();
58895@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58896 unsigned long load_addr = 0, load_bias = 0;
58897 int load_addr_set = 0;
58898 char * elf_interpreter = NULL;
58899- unsigned long error;
58900+ unsigned long error = 0;
58901 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58902 unsigned long elf_bss, elf_brk;
58903 int retval, i;
58904@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58905 struct elfhdr interp_elf_ex;
58906 } *loc;
58907 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58908+ unsigned long pax_task_size;
58909
58910 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58911 if (!loc) {
58912@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58913 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58914 may depend on the personality. */
58915 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58916+
58917+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58918+ current->mm->pax_flags = 0UL;
58919+#endif
58920+
58921+#ifdef CONFIG_PAX_DLRESOLVE
58922+ current->mm->call_dl_resolve = 0UL;
58923+#endif
58924+
58925+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58926+ current->mm->call_syscall = 0UL;
58927+#endif
58928+
58929+#ifdef CONFIG_PAX_ASLR
58930+ current->mm->delta_mmap = 0UL;
58931+ current->mm->delta_stack = 0UL;
58932+#endif
58933+
58934+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58935+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58936+ send_sig(SIGKILL, current, 0);
58937+ goto out_free_dentry;
58938+ }
58939+#endif
58940+
58941+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58942+ pax_set_initial_flags(bprm);
58943+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58944+ if (pax_set_initial_flags_func)
58945+ (pax_set_initial_flags_func)(bprm);
58946+#endif
58947+
58948+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58949+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58950+ current->mm->context.user_cs_limit = PAGE_SIZE;
58951+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58952+ }
58953+#endif
58954+
58955+#ifdef CONFIG_PAX_SEGMEXEC
58956+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58957+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58958+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58959+ pax_task_size = SEGMEXEC_TASK_SIZE;
58960+ current->mm->def_flags |= VM_NOHUGEPAGE;
58961+ } else
58962+#endif
58963+
58964+ pax_task_size = TASK_SIZE;
58965+
58966+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58967+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58968+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58969+ put_cpu();
58970+ }
58971+#endif
58972+
58973+#ifdef CONFIG_PAX_ASLR
58974+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58975+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58976+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58977+ }
58978+#endif
58979+
58980+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58981+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58982+ executable_stack = EXSTACK_DISABLE_X;
58983+ current->personality &= ~READ_IMPLIES_EXEC;
58984+ } else
58985+#endif
58986+
58987 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58988 current->personality |= READ_IMPLIES_EXEC;
58989
58990@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58991 #else
58992 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58993 #endif
58994+
58995+#ifdef CONFIG_PAX_RANDMMAP
58996+ /* PaX: randomize base address at the default exe base if requested */
58997+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58998+#ifdef CONFIG_SPARC64
58999+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59000+#else
59001+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59002+#endif
59003+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59004+ elf_flags |= MAP_FIXED;
59005+ }
59006+#endif
59007+
59008 }
59009
59010 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59011@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59012 * allowed task size. Note that p_filesz must always be
59013 * <= p_memsz so it is only necessary to check p_memsz.
59014 */
59015- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59016- elf_ppnt->p_memsz > TASK_SIZE ||
59017- TASK_SIZE - elf_ppnt->p_memsz < k) {
59018+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59019+ elf_ppnt->p_memsz > pax_task_size ||
59020+ pax_task_size - elf_ppnt->p_memsz < k) {
59021 /* set_brk can never work. Avoid overflows. */
59022 retval = -EINVAL;
59023 goto out_free_dentry;
59024@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
59025 if (retval)
59026 goto out_free_dentry;
59027 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59028- retval = -EFAULT; /* Nobody gets to see this, but.. */
59029- goto out_free_dentry;
59030+ /*
59031+ * This bss-zeroing can fail if the ELF
59032+ * file specifies odd protections. So
59033+ * we don't check the return value
59034+ */
59035 }
59036
59037+#ifdef CONFIG_PAX_RANDMMAP
59038+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59039+ unsigned long start, size, flags;
59040+ vm_flags_t vm_flags;
59041+
59042+ start = ELF_PAGEALIGN(elf_brk);
59043+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59044+ flags = MAP_FIXED | MAP_PRIVATE;
59045+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59046+
59047+ down_write(&current->mm->mmap_sem);
59048+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59049+ retval = -ENOMEM;
59050+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59051+// if (current->personality & ADDR_NO_RANDOMIZE)
59052+// vm_flags |= VM_READ | VM_MAYREAD;
59053+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59054+ retval = IS_ERR_VALUE(start) ? start : 0;
59055+ }
59056+ up_write(&current->mm->mmap_sem);
59057+ if (retval == 0)
59058+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59059+ if (retval < 0)
59060+ goto out_free_dentry;
59061+ }
59062+#endif
59063+
59064 if (elf_interpreter) {
59065- unsigned long interp_map_addr = 0;
59066-
59067 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59068 interpreter,
59069- &interp_map_addr,
59070 load_bias, interp_elf_phdata);
59071 if (!IS_ERR((void *)elf_entry)) {
59072 /*
59073@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59074 * Decide what to dump of a segment, part, all or none.
59075 */
59076 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59077- unsigned long mm_flags)
59078+ unsigned long mm_flags, long signr)
59079 {
59080 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59081
59082@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59083 if (vma->vm_file == NULL)
59084 return 0;
59085
59086- if (FILTER(MAPPED_PRIVATE))
59087+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59088 goto whole;
59089
59090 /*
59091@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59092 {
59093 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59094 int i = 0;
59095- do
59096+ do {
59097 i += 2;
59098- while (auxv[i - 2] != AT_NULL);
59099+ } while (auxv[i - 2] != AT_NULL);
59100 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59101 }
59102
59103@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59104 {
59105 mm_segment_t old_fs = get_fs();
59106 set_fs(KERNEL_DS);
59107- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59108+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59109 set_fs(old_fs);
59110 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59111 }
59112@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59113 vma = next_vma(vma, gate_vma)) {
59114 unsigned long dump_size;
59115
59116- dump_size = vma_dump_size(vma, cprm->mm_flags);
59117+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59118 vma_filesz[i++] = dump_size;
59119 vma_data_size += dump_size;
59120 }
59121@@ -2314,6 +2794,167 @@ out:
59122
59123 #endif /* CONFIG_ELF_CORE */
59124
59125+#ifdef CONFIG_PAX_MPROTECT
59126+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59127+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59128+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59129+ *
59130+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59131+ * basis because we want to allow the common case and not the special ones.
59132+ */
59133+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59134+{
59135+ struct elfhdr elf_h;
59136+ struct elf_phdr elf_p;
59137+ unsigned long i;
59138+ unsigned long oldflags;
59139+ bool is_textrel_rw, is_textrel_rx, is_relro;
59140+
59141+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59142+ return;
59143+
59144+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59145+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59146+
59147+#ifdef CONFIG_PAX_ELFRELOCS
59148+ /* possible TEXTREL */
59149+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59150+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59151+#else
59152+ is_textrel_rw = false;
59153+ is_textrel_rx = false;
59154+#endif
59155+
59156+ /* possible RELRO */
59157+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59158+
59159+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59160+ return;
59161+
59162+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59163+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59164+
59165+#ifdef CONFIG_PAX_ETEXECRELOCS
59166+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59167+#else
59168+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59169+#endif
59170+
59171+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59172+ !elf_check_arch(&elf_h) ||
59173+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59174+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59175+ return;
59176+
59177+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59178+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59179+ return;
59180+ switch (elf_p.p_type) {
59181+ case PT_DYNAMIC:
59182+ if (!is_textrel_rw && !is_textrel_rx)
59183+ continue;
59184+ i = 0UL;
59185+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59186+ elf_dyn dyn;
59187+
59188+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59189+ break;
59190+ if (dyn.d_tag == DT_NULL)
59191+ break;
59192+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59193+ gr_log_textrel(vma);
59194+ if (is_textrel_rw)
59195+ vma->vm_flags |= VM_MAYWRITE;
59196+ else
59197+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59198+ vma->vm_flags &= ~VM_MAYWRITE;
59199+ break;
59200+ }
59201+ i++;
59202+ }
59203+ is_textrel_rw = false;
59204+ is_textrel_rx = false;
59205+ continue;
59206+
59207+ case PT_GNU_RELRO:
59208+ if (!is_relro)
59209+ continue;
59210+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59211+ vma->vm_flags &= ~VM_MAYWRITE;
59212+ is_relro = false;
59213+ continue;
59214+
59215+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59216+ case PT_PAX_FLAGS: {
59217+ const char *msg_mprotect = "", *msg_emutramp = "";
59218+ char *buffer_lib, *buffer_exe;
59219+
59220+ if (elf_p.p_flags & PF_NOMPROTECT)
59221+ msg_mprotect = "MPROTECT disabled";
59222+
59223+#ifdef CONFIG_PAX_EMUTRAMP
59224+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59225+ msg_emutramp = "EMUTRAMP enabled";
59226+#endif
59227+
59228+ if (!msg_mprotect[0] && !msg_emutramp[0])
59229+ continue;
59230+
59231+ if (!printk_ratelimit())
59232+ continue;
59233+
59234+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59235+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59236+ if (buffer_lib && buffer_exe) {
59237+ char *path_lib, *path_exe;
59238+
59239+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59240+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59241+
59242+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59243+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59244+
59245+ }
59246+ free_page((unsigned long)buffer_exe);
59247+ free_page((unsigned long)buffer_lib);
59248+ continue;
59249+ }
59250+#endif
59251+
59252+ }
59253+ }
59254+}
59255+#endif
59256+
59257+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59258+
59259+extern int grsec_enable_log_rwxmaps;
59260+
59261+static void elf_handle_mmap(struct file *file)
59262+{
59263+ struct elfhdr elf_h;
59264+ struct elf_phdr elf_p;
59265+ unsigned long i;
59266+
59267+ if (!grsec_enable_log_rwxmaps)
59268+ return;
59269+
59270+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59271+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59272+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59273+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59274+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59275+ return;
59276+
59277+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59278+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59279+ return;
59280+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59281+ gr_log_ptgnustack(file);
59282+ }
59283+}
59284+#endif
59285+
59286 static int __init init_elf_binfmt(void)
59287 {
59288 register_binfmt(&elf_format);
59289diff --git a/fs/block_dev.c b/fs/block_dev.c
59290index b48c41b..e070416 100644
59291--- a/fs/block_dev.c
59292+++ b/fs/block_dev.c
59293@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59294 else if (bdev->bd_contains == bdev)
59295 return true; /* is a whole device which isn't held */
59296
59297- else if (whole->bd_holder == bd_may_claim)
59298+ else if (whole->bd_holder == (void *)bd_may_claim)
59299 return true; /* is a partition of a device that is being partitioned */
59300 else if (whole->bd_holder != NULL)
59301 return false; /* is a partition of a held device */
59302diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59303index f54511d..58acdec 100644
59304--- a/fs/btrfs/ctree.c
59305+++ b/fs/btrfs/ctree.c
59306@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59307 free_extent_buffer(buf);
59308 add_root_to_dirty_list(root);
59309 } else {
59310- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59311- parent_start = parent->start;
59312- else
59313+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59314+ if (parent)
59315+ parent_start = parent->start;
59316+ else
59317+ parent_start = 0;
59318+ } else
59319 parent_start = 0;
59320
59321 WARN_ON(trans->transid != btrfs_header_generation(parent));
59322diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59323index de4e70f..b41dc45 100644
59324--- a/fs/btrfs/delayed-inode.c
59325+++ b/fs/btrfs/delayed-inode.c
59326@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59327
59328 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59329 {
59330- int seq = atomic_inc_return(&delayed_root->items_seq);
59331+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59332 if ((atomic_dec_return(&delayed_root->items) <
59333 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59334 waitqueue_active(&delayed_root->wait))
59335@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59336
59337 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59338 {
59339- int val = atomic_read(&delayed_root->items_seq);
59340+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59341
59342 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59343 return 1;
59344@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59345 int seq;
59346 int ret;
59347
59348- seq = atomic_read(&delayed_root->items_seq);
59349+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59350
59351 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59352 if (ret)
59353diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59354index f70119f..ab5894d 100644
59355--- a/fs/btrfs/delayed-inode.h
59356+++ b/fs/btrfs/delayed-inode.h
59357@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59358 */
59359 struct list_head prepare_list;
59360 atomic_t items; /* for delayed items */
59361- atomic_t items_seq; /* for delayed items */
59362+ atomic_unchecked_t items_seq; /* for delayed items */
59363 int nodes; /* for delayed nodes */
59364 wait_queue_head_t wait;
59365 };
59366@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59367 struct btrfs_delayed_root *delayed_root)
59368 {
59369 atomic_set(&delayed_root->items, 0);
59370- atomic_set(&delayed_root->items_seq, 0);
59371+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59372 delayed_root->nodes = 0;
59373 spin_lock_init(&delayed_root->lock);
59374 init_waitqueue_head(&delayed_root->wait);
59375diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59376index 6f49b28..483410f 100644
59377--- a/fs/btrfs/super.c
59378+++ b/fs/btrfs/super.c
59379@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59380 function, line, errstr);
59381 return;
59382 }
59383- ACCESS_ONCE(trans->transaction->aborted) = errno;
59384+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59385 /* Wake up anybody who may be waiting on this transaction */
59386 wake_up(&root->fs_info->transaction_wait);
59387 wake_up(&root->fs_info->transaction_blocked_wait);
59388diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59389index 92db3f6..898a561 100644
59390--- a/fs/btrfs/sysfs.c
59391+++ b/fs/btrfs/sysfs.c
59392@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59393 for (set = 0; set < FEAT_MAX; set++) {
59394 int i;
59395 struct attribute *attrs[2];
59396- struct attribute_group agroup = {
59397+ attribute_group_no_const agroup = {
59398 .name = "features",
59399 .attrs = attrs,
59400 };
59401diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59402index 2299bfd..4098e72 100644
59403--- a/fs/btrfs/tests/free-space-tests.c
59404+++ b/fs/btrfs/tests/free-space-tests.c
59405@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59406 * extent entry.
59407 */
59408 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59409- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59410+ pax_open_kernel();
59411+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59412+ pax_close_kernel();
59413
59414 /*
59415 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59416@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59417 if (ret)
59418 return ret;
59419
59420- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59421+ pax_open_kernel();
59422+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59423+ pax_close_kernel();
59424 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59425
59426 return 0;
59427diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59428index 154990c..d0cf699 100644
59429--- a/fs/btrfs/tree-log.h
59430+++ b/fs/btrfs/tree-log.h
59431@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59432 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59433 struct btrfs_trans_handle *trans)
59434 {
59435- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59436+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59437 }
59438
59439 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59440diff --git a/fs/buffer.c b/fs/buffer.c
59441index 20805db..2e8fc69 100644
59442--- a/fs/buffer.c
59443+++ b/fs/buffer.c
59444@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59445 bh_cachep = kmem_cache_create("buffer_head",
59446 sizeof(struct buffer_head), 0,
59447 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59448- SLAB_MEM_SPREAD),
59449+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59450 NULL);
59451
59452 /*
59453diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59454index fbb08e9..0fda764 100644
59455--- a/fs/cachefiles/bind.c
59456+++ b/fs/cachefiles/bind.c
59457@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59458 args);
59459
59460 /* start by checking things over */
59461- ASSERT(cache->fstop_percent >= 0 &&
59462- cache->fstop_percent < cache->fcull_percent &&
59463+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59464 cache->fcull_percent < cache->frun_percent &&
59465 cache->frun_percent < 100);
59466
59467- ASSERT(cache->bstop_percent >= 0 &&
59468- cache->bstop_percent < cache->bcull_percent &&
59469+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59470 cache->bcull_percent < cache->brun_percent &&
59471 cache->brun_percent < 100);
59472
59473diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59474index ce1b115..4a6852c 100644
59475--- a/fs/cachefiles/daemon.c
59476+++ b/fs/cachefiles/daemon.c
59477@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59478 if (n > buflen)
59479 return -EMSGSIZE;
59480
59481- if (copy_to_user(_buffer, buffer, n) != 0)
59482+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59483 return -EFAULT;
59484
59485 return n;
59486@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59487 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59488 return -EIO;
59489
59490- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59491+ if (datalen > PAGE_SIZE - 1)
59492 return -EOPNOTSUPP;
59493
59494 /* drag the command string into the kernel so we can parse it */
59495@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59496 if (args[0] != '%' || args[1] != '\0')
59497 return -EINVAL;
59498
59499- if (fstop < 0 || fstop >= cache->fcull_percent)
59500+ if (fstop >= cache->fcull_percent)
59501 return cachefiles_daemon_range_error(cache, args);
59502
59503 cache->fstop_percent = fstop;
59504@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59505 if (args[0] != '%' || args[1] != '\0')
59506 return -EINVAL;
59507
59508- if (bstop < 0 || bstop >= cache->bcull_percent)
59509+ if (bstop >= cache->bcull_percent)
59510 return cachefiles_daemon_range_error(cache, args);
59511
59512 cache->bstop_percent = bstop;
59513diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59514index 8c52472..c4e3a69 100644
59515--- a/fs/cachefiles/internal.h
59516+++ b/fs/cachefiles/internal.h
59517@@ -66,7 +66,7 @@ struct cachefiles_cache {
59518 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59519 struct rb_root active_nodes; /* active nodes (can't be culled) */
59520 rwlock_t active_lock; /* lock for active_nodes */
59521- atomic_t gravecounter; /* graveyard uniquifier */
59522+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59523 unsigned frun_percent; /* when to stop culling (% files) */
59524 unsigned fcull_percent; /* when to start culling (% files) */
59525 unsigned fstop_percent; /* when to stop allocating (% files) */
59526@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59527 * proc.c
59528 */
59529 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59530-extern atomic_t cachefiles_lookup_histogram[HZ];
59531-extern atomic_t cachefiles_mkdir_histogram[HZ];
59532-extern atomic_t cachefiles_create_histogram[HZ];
59533+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59534+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59535+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59536
59537 extern int __init cachefiles_proc_init(void);
59538 extern void cachefiles_proc_cleanup(void);
59539 static inline
59540-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59541+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59542 {
59543 unsigned long jif = jiffies - start_jif;
59544 if (jif >= HZ)
59545 jif = HZ - 1;
59546- atomic_inc(&histogram[jif]);
59547+ atomic_inc_unchecked(&histogram[jif]);
59548 }
59549
59550 #else
59551diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59552index 7f8e83f..8951aa4 100644
59553--- a/fs/cachefiles/namei.c
59554+++ b/fs/cachefiles/namei.c
59555@@ -309,7 +309,7 @@ try_again:
59556 /* first step is to make up a grave dentry in the graveyard */
59557 sprintf(nbuffer, "%08x%08x",
59558 (uint32_t) get_seconds(),
59559- (uint32_t) atomic_inc_return(&cache->gravecounter));
59560+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59561
59562 /* do the multiway lock magic */
59563 trap = lock_rename(cache->graveyard, dir);
59564diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59565index eccd339..4c1d995 100644
59566--- a/fs/cachefiles/proc.c
59567+++ b/fs/cachefiles/proc.c
59568@@ -14,9 +14,9 @@
59569 #include <linux/seq_file.h>
59570 #include "internal.h"
59571
59572-atomic_t cachefiles_lookup_histogram[HZ];
59573-atomic_t cachefiles_mkdir_histogram[HZ];
59574-atomic_t cachefiles_create_histogram[HZ];
59575+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59576+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59577+atomic_unchecked_t cachefiles_create_histogram[HZ];
59578
59579 /*
59580 * display the latency histogram
59581@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59582 return 0;
59583 default:
59584 index = (unsigned long) v - 3;
59585- x = atomic_read(&cachefiles_lookup_histogram[index]);
59586- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59587- z = atomic_read(&cachefiles_create_histogram[index]);
59588+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59589+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59590+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59591 if (x == 0 && y == 0 && z == 0)
59592 return 0;
59593
59594diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59595index c241603..56bae60 100644
59596--- a/fs/ceph/dir.c
59597+++ b/fs/ceph/dir.c
59598@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59599 struct dentry *dentry, *last;
59600 struct ceph_dentry_info *di;
59601 int err = 0;
59602+ char d_name[DNAME_INLINE_LEN];
59603+ const unsigned char *name;
59604
59605 /* claim ref on last dentry we returned */
59606 last = fi->dentry;
59607@@ -192,7 +194,12 @@ more:
59608
59609 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59610 dentry, dentry, dentry->d_inode);
59611- if (!dir_emit(ctx, dentry->d_name.name,
59612+ name = dentry->d_name.name;
59613+ if (name == dentry->d_iname) {
59614+ memcpy(d_name, name, dentry->d_name.len);
59615+ name = d_name;
59616+ }
59617+ if (!dir_emit(ctx, name,
59618 dentry->d_name.len,
59619 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59620 dentry->d_inode->i_mode >> 12)) {
59621@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59622 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59623 struct ceph_mds_client *mdsc = fsc->mdsc;
59624 unsigned frag = fpos_frag(ctx->pos);
59625- int off = fpos_off(ctx->pos);
59626+ unsigned int off = fpos_off(ctx->pos);
59627 int err;
59628 u32 ftype;
59629 struct ceph_mds_reply_info_parsed *rinfo;
59630diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59631index 50f06cd..c7eba3e 100644
59632--- a/fs/ceph/super.c
59633+++ b/fs/ceph/super.c
59634@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59635 /*
59636 * construct our own bdi so we can control readahead, etc.
59637 */
59638-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59639+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59640
59641 static int ceph_register_bdi(struct super_block *sb,
59642 struct ceph_fs_client *fsc)
59643@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59644 default_backing_dev_info.ra_pages;
59645
59646 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59647- atomic_long_inc_return(&bdi_seq));
59648+ atomic_long_inc_return_unchecked(&bdi_seq));
59649 if (!err)
59650 sb->s_bdi = &fsc->backing_dev_info;
59651 return err;
59652diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59653index 7febcf2..62a5721 100644
59654--- a/fs/cifs/cifs_debug.c
59655+++ b/fs/cifs/cifs_debug.c
59656@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59657
59658 if (strtobool(&c, &bv) == 0) {
59659 #ifdef CONFIG_CIFS_STATS2
59660- atomic_set(&totBufAllocCount, 0);
59661- atomic_set(&totSmBufAllocCount, 0);
59662+ atomic_set_unchecked(&totBufAllocCount, 0);
59663+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59664 #endif /* CONFIG_CIFS_STATS2 */
59665 spin_lock(&cifs_tcp_ses_lock);
59666 list_for_each(tmp1, &cifs_tcp_ses_list) {
59667@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59668 tcon = list_entry(tmp3,
59669 struct cifs_tcon,
59670 tcon_list);
59671- atomic_set(&tcon->num_smbs_sent, 0);
59672+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59673 if (server->ops->clear_stats)
59674 server->ops->clear_stats(tcon);
59675 }
59676@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59677 smBufAllocCount.counter, cifs_min_small);
59678 #ifdef CONFIG_CIFS_STATS2
59679 seq_printf(m, "Total Large %d Small %d Allocations\n",
59680- atomic_read(&totBufAllocCount),
59681- atomic_read(&totSmBufAllocCount));
59682+ atomic_read_unchecked(&totBufAllocCount),
59683+ atomic_read_unchecked(&totSmBufAllocCount));
59684 #endif /* CONFIG_CIFS_STATS2 */
59685
59686 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59687@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59688 if (tcon->need_reconnect)
59689 seq_puts(m, "\tDISCONNECTED ");
59690 seq_printf(m, "\nSMBs: %d",
59691- atomic_read(&tcon->num_smbs_sent));
59692+ atomic_read_unchecked(&tcon->num_smbs_sent));
59693 if (server->ops->print_stats)
59694 server->ops->print_stats(m, tcon);
59695 }
59696diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59697index d72fe37..ded5511 100644
59698--- a/fs/cifs/cifsfs.c
59699+++ b/fs/cifs/cifsfs.c
59700@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59701 */
59702 cifs_req_cachep = kmem_cache_create("cifs_request",
59703 CIFSMaxBufSize + max_hdr_size, 0,
59704- SLAB_HWCACHE_ALIGN, NULL);
59705+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59706 if (cifs_req_cachep == NULL)
59707 return -ENOMEM;
59708
59709@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59710 efficient to alloc 1 per page off the slab compared to 17K (5page)
59711 alloc of large cifs buffers even when page debugging is on */
59712 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59713- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59714+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59715 NULL);
59716 if (cifs_sm_req_cachep == NULL) {
59717 mempool_destroy(cifs_req_poolp);
59718@@ -1204,8 +1204,8 @@ init_cifs(void)
59719 atomic_set(&bufAllocCount, 0);
59720 atomic_set(&smBufAllocCount, 0);
59721 #ifdef CONFIG_CIFS_STATS2
59722- atomic_set(&totBufAllocCount, 0);
59723- atomic_set(&totSmBufAllocCount, 0);
59724+ atomic_set_unchecked(&totBufAllocCount, 0);
59725+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59726 #endif /* CONFIG_CIFS_STATS2 */
59727
59728 atomic_set(&midCount, 0);
59729diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59730index 22b289a..bbbba08 100644
59731--- a/fs/cifs/cifsglob.h
59732+++ b/fs/cifs/cifsglob.h
59733@@ -823,35 +823,35 @@ struct cifs_tcon {
59734 __u16 Flags; /* optional support bits */
59735 enum statusEnum tidStatus;
59736 #ifdef CONFIG_CIFS_STATS
59737- atomic_t num_smbs_sent;
59738+ atomic_unchecked_t num_smbs_sent;
59739 union {
59740 struct {
59741- atomic_t num_writes;
59742- atomic_t num_reads;
59743- atomic_t num_flushes;
59744- atomic_t num_oplock_brks;
59745- atomic_t num_opens;
59746- atomic_t num_closes;
59747- atomic_t num_deletes;
59748- atomic_t num_mkdirs;
59749- atomic_t num_posixopens;
59750- atomic_t num_posixmkdirs;
59751- atomic_t num_rmdirs;
59752- atomic_t num_renames;
59753- atomic_t num_t2renames;
59754- atomic_t num_ffirst;
59755- atomic_t num_fnext;
59756- atomic_t num_fclose;
59757- atomic_t num_hardlinks;
59758- atomic_t num_symlinks;
59759- atomic_t num_locks;
59760- atomic_t num_acl_get;
59761- atomic_t num_acl_set;
59762+ atomic_unchecked_t num_writes;
59763+ atomic_unchecked_t num_reads;
59764+ atomic_unchecked_t num_flushes;
59765+ atomic_unchecked_t num_oplock_brks;
59766+ atomic_unchecked_t num_opens;
59767+ atomic_unchecked_t num_closes;
59768+ atomic_unchecked_t num_deletes;
59769+ atomic_unchecked_t num_mkdirs;
59770+ atomic_unchecked_t num_posixopens;
59771+ atomic_unchecked_t num_posixmkdirs;
59772+ atomic_unchecked_t num_rmdirs;
59773+ atomic_unchecked_t num_renames;
59774+ atomic_unchecked_t num_t2renames;
59775+ atomic_unchecked_t num_ffirst;
59776+ atomic_unchecked_t num_fnext;
59777+ atomic_unchecked_t num_fclose;
59778+ atomic_unchecked_t num_hardlinks;
59779+ atomic_unchecked_t num_symlinks;
59780+ atomic_unchecked_t num_locks;
59781+ atomic_unchecked_t num_acl_get;
59782+ atomic_unchecked_t num_acl_set;
59783 } cifs_stats;
59784 #ifdef CONFIG_CIFS_SMB2
59785 struct {
59786- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59787- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59788+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59789+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59790 } smb2_stats;
59791 #endif /* CONFIG_CIFS_SMB2 */
59792 } stats;
59793@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59794 }
59795
59796 #ifdef CONFIG_CIFS_STATS
59797-#define cifs_stats_inc atomic_inc
59798+#define cifs_stats_inc atomic_inc_unchecked
59799
59800 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59801 unsigned int bytes)
59802@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59803 /* Various Debug counters */
59804 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59805 #ifdef CONFIG_CIFS_STATS2
59806-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59807-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59808+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59809+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59810 #endif
59811 GLOBAL_EXTERN atomic_t smBufAllocCount;
59812 GLOBAL_EXTERN atomic_t midCount;
59813diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59814index 3e30d92..2c9f066 100644
59815--- a/fs/cifs/file.c
59816+++ b/fs/cifs/file.c
59817@@ -2061,10 +2061,14 @@ static int cifs_writepages(struct address_space *mapping,
59818 index = mapping->writeback_index; /* Start from prev offset */
59819 end = -1;
59820 } else {
59821- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59822- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59823- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59824+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59825 range_whole = true;
59826+ index = 0;
59827+ end = ULONG_MAX;
59828+ } else {
59829+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59830+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59831+ }
59832 scanned = true;
59833 }
59834 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59835diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59836index 3379463..3af418a 100644
59837--- a/fs/cifs/misc.c
59838+++ b/fs/cifs/misc.c
59839@@ -170,7 +170,7 @@ cifs_buf_get(void)
59840 memset(ret_buf, 0, buf_size + 3);
59841 atomic_inc(&bufAllocCount);
59842 #ifdef CONFIG_CIFS_STATS2
59843- atomic_inc(&totBufAllocCount);
59844+ atomic_inc_unchecked(&totBufAllocCount);
59845 #endif /* CONFIG_CIFS_STATS2 */
59846 }
59847
59848@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59849 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59850 atomic_inc(&smBufAllocCount);
59851 #ifdef CONFIG_CIFS_STATS2
59852- atomic_inc(&totSmBufAllocCount);
59853+ atomic_inc_unchecked(&totSmBufAllocCount);
59854 #endif /* CONFIG_CIFS_STATS2 */
59855
59856 }
59857diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59858index d297903..1cb7516 100644
59859--- a/fs/cifs/smb1ops.c
59860+++ b/fs/cifs/smb1ops.c
59861@@ -622,27 +622,27 @@ static void
59862 cifs_clear_stats(struct cifs_tcon *tcon)
59863 {
59864 #ifdef CONFIG_CIFS_STATS
59865- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59866- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59867- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59868- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59869- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59870- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59871- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59872- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59873- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59874- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59875- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59876- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59877- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59878- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59879- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59880- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59881- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59882- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59883- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59884- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59885- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59886+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59887+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59888+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59889+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59890+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59891+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59892+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59893+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59894+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59895+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59896+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59897+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59898+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59899+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59900+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59901+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59902+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59903+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59904+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59905+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59906+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59907 #endif
59908 }
59909
59910@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59911 {
59912 #ifdef CONFIG_CIFS_STATS
59913 seq_printf(m, " Oplocks breaks: %d",
59914- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59915+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59916 seq_printf(m, "\nReads: %d Bytes: %llu",
59917- atomic_read(&tcon->stats.cifs_stats.num_reads),
59918+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59919 (long long)(tcon->bytes_read));
59920 seq_printf(m, "\nWrites: %d Bytes: %llu",
59921- atomic_read(&tcon->stats.cifs_stats.num_writes),
59922+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59923 (long long)(tcon->bytes_written));
59924 seq_printf(m, "\nFlushes: %d",
59925- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59926+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59927 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59928- atomic_read(&tcon->stats.cifs_stats.num_locks),
59929- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59930- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59931+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59932+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59933+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59934 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59935- atomic_read(&tcon->stats.cifs_stats.num_opens),
59936- atomic_read(&tcon->stats.cifs_stats.num_closes),
59937- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59938+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59939+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59940+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59941 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59942- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59943- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59944+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59945+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59946 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59947- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59948- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59949+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59950+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59951 seq_printf(m, "\nRenames: %d T2 Renames %d",
59952- atomic_read(&tcon->stats.cifs_stats.num_renames),
59953- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59954+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59955+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59956 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59957- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59958- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59959- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59960+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59961+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59962+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59963 #endif
59964 }
59965
59966diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59967index eab05e1..ffe5ea4 100644
59968--- a/fs/cifs/smb2ops.c
59969+++ b/fs/cifs/smb2ops.c
59970@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59971 #ifdef CONFIG_CIFS_STATS
59972 int i;
59973 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59974- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59975- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59976+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59977+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59978 }
59979 #endif
59980 }
59981@@ -459,65 +459,65 @@ static void
59982 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59983 {
59984 #ifdef CONFIG_CIFS_STATS
59985- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59986- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59987+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59988+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59989 seq_printf(m, "\nNegotiates: %d sent %d failed",
59990- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59991- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59992+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59993+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59994 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59995- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59996- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59997+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59998+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59999 seq_printf(m, "\nLogoffs: %d sent %d failed",
60000- atomic_read(&sent[SMB2_LOGOFF_HE]),
60001- atomic_read(&failed[SMB2_LOGOFF_HE]));
60002+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
60003+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
60004 seq_printf(m, "\nTreeConnects: %d sent %d failed",
60005- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
60006- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
60007+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
60008+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
60009 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
60010- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
60011- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
60012+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60013+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60014 seq_printf(m, "\nCreates: %d sent %d failed",
60015- atomic_read(&sent[SMB2_CREATE_HE]),
60016- atomic_read(&failed[SMB2_CREATE_HE]));
60017+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60018+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60019 seq_printf(m, "\nCloses: %d sent %d failed",
60020- atomic_read(&sent[SMB2_CLOSE_HE]),
60021- atomic_read(&failed[SMB2_CLOSE_HE]));
60022+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60023+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60024 seq_printf(m, "\nFlushes: %d sent %d failed",
60025- atomic_read(&sent[SMB2_FLUSH_HE]),
60026- atomic_read(&failed[SMB2_FLUSH_HE]));
60027+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60028+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60029 seq_printf(m, "\nReads: %d sent %d failed",
60030- atomic_read(&sent[SMB2_READ_HE]),
60031- atomic_read(&failed[SMB2_READ_HE]));
60032+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60033+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60034 seq_printf(m, "\nWrites: %d sent %d failed",
60035- atomic_read(&sent[SMB2_WRITE_HE]),
60036- atomic_read(&failed[SMB2_WRITE_HE]));
60037+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60038+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60039 seq_printf(m, "\nLocks: %d sent %d failed",
60040- atomic_read(&sent[SMB2_LOCK_HE]),
60041- atomic_read(&failed[SMB2_LOCK_HE]));
60042+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60043+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60044 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60045- atomic_read(&sent[SMB2_IOCTL_HE]),
60046- atomic_read(&failed[SMB2_IOCTL_HE]));
60047+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60048+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60049 seq_printf(m, "\nCancels: %d sent %d failed",
60050- atomic_read(&sent[SMB2_CANCEL_HE]),
60051- atomic_read(&failed[SMB2_CANCEL_HE]));
60052+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60053+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60054 seq_printf(m, "\nEchos: %d sent %d failed",
60055- atomic_read(&sent[SMB2_ECHO_HE]),
60056- atomic_read(&failed[SMB2_ECHO_HE]));
60057+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60058+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60059 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60060- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60061- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60062+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60063+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60064 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60065- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60066- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60067+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60068+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60069 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60070- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60071- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60072+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60073+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60074 seq_printf(m, "\nSetInfos: %d sent %d failed",
60075- atomic_read(&sent[SMB2_SET_INFO_HE]),
60076- atomic_read(&failed[SMB2_SET_INFO_HE]));
60077+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60078+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60079 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60080- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60081- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60082+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60083+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60084 #endif
60085 }
60086
60087diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60088index 3417340..b942390 100644
60089--- a/fs/cifs/smb2pdu.c
60090+++ b/fs/cifs/smb2pdu.c
60091@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60092 default:
60093 cifs_dbg(VFS, "info level %u isn't supported\n",
60094 srch_inf->info_level);
60095- rc = -EINVAL;
60096- goto qdir_exit;
60097+ return -EINVAL;
60098 }
60099
60100 req->FileIndex = cpu_to_le32(index);
60101diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60102index 46ee6f2..89a9e7f 100644
60103--- a/fs/coda/cache.c
60104+++ b/fs/coda/cache.c
60105@@ -24,7 +24,7 @@
60106 #include "coda_linux.h"
60107 #include "coda_cache.h"
60108
60109-static atomic_t permission_epoch = ATOMIC_INIT(0);
60110+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60111
60112 /* replace or extend an acl cache hit */
60113 void coda_cache_enter(struct inode *inode, int mask)
60114@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60115 struct coda_inode_info *cii = ITOC(inode);
60116
60117 spin_lock(&cii->c_lock);
60118- cii->c_cached_epoch = atomic_read(&permission_epoch);
60119+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60120 if (!uid_eq(cii->c_uid, current_fsuid())) {
60121 cii->c_uid = current_fsuid();
60122 cii->c_cached_perm = mask;
60123@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60124 {
60125 struct coda_inode_info *cii = ITOC(inode);
60126 spin_lock(&cii->c_lock);
60127- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60128+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60129 spin_unlock(&cii->c_lock);
60130 }
60131
60132 /* remove all acl caches */
60133 void coda_cache_clear_all(struct super_block *sb)
60134 {
60135- atomic_inc(&permission_epoch);
60136+ atomic_inc_unchecked(&permission_epoch);
60137 }
60138
60139
60140@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60141 spin_lock(&cii->c_lock);
60142 hit = (mask & cii->c_cached_perm) == mask &&
60143 uid_eq(cii->c_uid, current_fsuid()) &&
60144- cii->c_cached_epoch == atomic_read(&permission_epoch);
60145+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60146 spin_unlock(&cii->c_lock);
60147
60148 return hit;
60149diff --git a/fs/compat.c b/fs/compat.c
60150index 6fd272d..dd34ba2 100644
60151--- a/fs/compat.c
60152+++ b/fs/compat.c
60153@@ -54,7 +54,7 @@
60154 #include <asm/ioctls.h>
60155 #include "internal.h"
60156
60157-int compat_log = 1;
60158+int compat_log = 0;
60159
60160 int compat_printk(const char *fmt, ...)
60161 {
60162@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60163
60164 set_fs(KERNEL_DS);
60165 /* The __user pointer cast is valid because of the set_fs() */
60166- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60167+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60168 set_fs(oldfs);
60169 /* truncating is ok because it's a user address */
60170 if (!ret)
60171@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60172 goto out;
60173
60174 ret = -EINVAL;
60175- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60176+ if (nr_segs > UIO_MAXIOV)
60177 goto out;
60178 if (nr_segs > fast_segs) {
60179 ret = -ENOMEM;
60180@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60181 struct compat_readdir_callback {
60182 struct dir_context ctx;
60183 struct compat_old_linux_dirent __user *dirent;
60184+ struct file * file;
60185 int result;
60186 };
60187
60188@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60189 buf->result = -EOVERFLOW;
60190 return -EOVERFLOW;
60191 }
60192+
60193+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60194+ return 0;
60195+
60196 buf->result++;
60197 dirent = buf->dirent;
60198 if (!access_ok(VERIFY_WRITE, dirent,
60199@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60200 if (!f.file)
60201 return -EBADF;
60202
60203+ buf.file = f.file;
60204 error = iterate_dir(f.file, &buf.ctx);
60205 if (buf.result)
60206 error = buf.result;
60207@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60208 struct dir_context ctx;
60209 struct compat_linux_dirent __user *current_dir;
60210 struct compat_linux_dirent __user *previous;
60211+ struct file * file;
60212 int count;
60213 int error;
60214 };
60215@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60216 buf->error = -EOVERFLOW;
60217 return -EOVERFLOW;
60218 }
60219+
60220+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60221+ return 0;
60222+
60223 dirent = buf->previous;
60224 if (dirent) {
60225 if (__put_user(offset, &dirent->d_off))
60226@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60227 if (!f.file)
60228 return -EBADF;
60229
60230+ buf.file = f.file;
60231 error = iterate_dir(f.file, &buf.ctx);
60232 if (error >= 0)
60233 error = buf.error;
60234@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60235 struct dir_context ctx;
60236 struct linux_dirent64 __user *current_dir;
60237 struct linux_dirent64 __user *previous;
60238+ struct file * file;
60239 int count;
60240 int error;
60241 };
60242@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60243 buf->error = -EINVAL; /* only used if we fail.. */
60244 if (reclen > buf->count)
60245 return -EINVAL;
60246+
60247+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60248+ return 0;
60249+
60250 dirent = buf->previous;
60251
60252 if (dirent) {
60253@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60254 if (!f.file)
60255 return -EBADF;
60256
60257+ buf.file = f.file;
60258 error = iterate_dir(f.file, &buf.ctx);
60259 if (error >= 0)
60260 error = buf.error;
60261diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60262index 4d24d17..4f8c09e 100644
60263--- a/fs/compat_binfmt_elf.c
60264+++ b/fs/compat_binfmt_elf.c
60265@@ -30,11 +30,13 @@
60266 #undef elf_phdr
60267 #undef elf_shdr
60268 #undef elf_note
60269+#undef elf_dyn
60270 #undef elf_addr_t
60271 #define elfhdr elf32_hdr
60272 #define elf_phdr elf32_phdr
60273 #define elf_shdr elf32_shdr
60274 #define elf_note elf32_note
60275+#define elf_dyn Elf32_Dyn
60276 #define elf_addr_t Elf32_Addr
60277
60278 /*
60279diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60280index afec645..9c65620 100644
60281--- a/fs/compat_ioctl.c
60282+++ b/fs/compat_ioctl.c
60283@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60284 return -EFAULT;
60285 if (__get_user(udata, &ss32->iomem_base))
60286 return -EFAULT;
60287- ss.iomem_base = compat_ptr(udata);
60288+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60289 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60290 __get_user(ss.port_high, &ss32->port_high))
60291 return -EFAULT;
60292@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60293 for (i = 0; i < nmsgs; i++) {
60294 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60295 return -EFAULT;
60296- if (get_user(datap, &umsgs[i].buf) ||
60297- put_user(compat_ptr(datap), &tmsgs[i].buf))
60298+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60299+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60300 return -EFAULT;
60301 }
60302 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60303@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60304 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60305 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60306 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60307- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60308+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60309 return -EFAULT;
60310
60311 return ioctl_preallocate(file, p);
60312@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60313 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60314 {
60315 unsigned int a, b;
60316- a = *(unsigned int *)p;
60317- b = *(unsigned int *)q;
60318+ a = *(const unsigned int *)p;
60319+ b = *(const unsigned int *)q;
60320 if (a > b)
60321 return 1;
60322 if (a < b)
60323diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60324index c9c298b..544d100 100644
60325--- a/fs/configfs/dir.c
60326+++ b/fs/configfs/dir.c
60327@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60328 }
60329 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60330 struct configfs_dirent *next;
60331- const char *name;
60332+ const unsigned char * name;
60333+ char d_name[sizeof(next->s_dentry->d_iname)];
60334 int len;
60335 struct inode *inode = NULL;
60336
60337@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60338 continue;
60339
60340 name = configfs_get_name(next);
60341- len = strlen(name);
60342+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60343+ len = next->s_dentry->d_name.len;
60344+ memcpy(d_name, name, len);
60345+ name = d_name;
60346+ } else
60347+ len = strlen(name);
60348
60349 /*
60350 * We'll have a dentry and an inode for
60351diff --git a/fs/coredump.c b/fs/coredump.c
60352index b5c86ff..0dac262 100644
60353--- a/fs/coredump.c
60354+++ b/fs/coredump.c
60355@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60356 struct pipe_inode_info *pipe = file->private_data;
60357
60358 pipe_lock(pipe);
60359- pipe->readers++;
60360- pipe->writers--;
60361+ atomic_inc(&pipe->readers);
60362+ atomic_dec(&pipe->writers);
60363 wake_up_interruptible_sync(&pipe->wait);
60364 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60365 pipe_unlock(pipe);
60366@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60367 * We actually want wait_event_freezable() but then we need
60368 * to clear TIF_SIGPENDING and improve dump_interrupted().
60369 */
60370- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60371+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60372
60373 pipe_lock(pipe);
60374- pipe->readers--;
60375- pipe->writers++;
60376+ atomic_dec(&pipe->readers);
60377+ atomic_inc(&pipe->writers);
60378 pipe_unlock(pipe);
60379 }
60380
60381@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60382 struct files_struct *displaced;
60383 bool need_nonrelative = false;
60384 bool core_dumped = false;
60385- static atomic_t core_dump_count = ATOMIC_INIT(0);
60386+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60387+ long signr = siginfo->si_signo;
60388+ int dumpable;
60389 struct coredump_params cprm = {
60390 .siginfo = siginfo,
60391 .regs = signal_pt_regs(),
60392@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60393 .mm_flags = mm->flags,
60394 };
60395
60396- audit_core_dumps(siginfo->si_signo);
60397+ audit_core_dumps(signr);
60398+
60399+ dumpable = __get_dumpable(cprm.mm_flags);
60400+
60401+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60402+ gr_handle_brute_attach(dumpable);
60403
60404 binfmt = mm->binfmt;
60405 if (!binfmt || !binfmt->core_dump)
60406 goto fail;
60407- if (!__get_dumpable(cprm.mm_flags))
60408+ if (!dumpable)
60409 goto fail;
60410
60411 cred = prepare_creds();
60412@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60413 need_nonrelative = true;
60414 }
60415
60416- retval = coredump_wait(siginfo->si_signo, &core_state);
60417+ retval = coredump_wait(signr, &core_state);
60418 if (retval < 0)
60419 goto fail_creds;
60420
60421@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60422 }
60423 cprm.limit = RLIM_INFINITY;
60424
60425- dump_count = atomic_inc_return(&core_dump_count);
60426+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60427 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60428 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60429 task_tgid_vnr(current), current->comm);
60430@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60431 } else {
60432 struct inode *inode;
60433
60434+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60435+
60436 if (cprm.limit < binfmt->min_coredump)
60437 goto fail_unlock;
60438
60439@@ -681,7 +690,7 @@ close_fail:
60440 filp_close(cprm.file, NULL);
60441 fail_dropcount:
60442 if (ispipe)
60443- atomic_dec(&core_dump_count);
60444+ atomic_dec_unchecked(&core_dump_count);
60445 fail_unlock:
60446 kfree(cn.corename);
60447 coredump_finish(mm, core_dumped);
60448@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60449 struct file *file = cprm->file;
60450 loff_t pos = file->f_pos;
60451 ssize_t n;
60452+
60453+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60454 if (cprm->written + nr > cprm->limit)
60455 return 0;
60456 while (nr) {
60457diff --git a/fs/dcache.c b/fs/dcache.c
60458index e368d4f..b40ba59 100644
60459--- a/fs/dcache.c
60460+++ b/fs/dcache.c
60461@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60462 * dentry_iput drops the locks, at which point nobody (except
60463 * transient RCU lookups) can reach this dentry.
60464 */
60465- BUG_ON((int)dentry->d_lockref.count > 0);
60466+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60467 this_cpu_dec(nr_dentry);
60468 if (dentry->d_op && dentry->d_op->d_release)
60469 dentry->d_op->d_release(dentry);
60470@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60471 struct dentry *parent = dentry->d_parent;
60472 if (IS_ROOT(dentry))
60473 return NULL;
60474- if (unlikely((int)dentry->d_lockref.count < 0))
60475+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60476 return NULL;
60477 if (likely(spin_trylock(&parent->d_lock)))
60478 return parent;
60479@@ -638,7 +638,7 @@ repeat:
60480 dentry->d_flags |= DCACHE_REFERENCED;
60481 dentry_lru_add(dentry);
60482
60483- dentry->d_lockref.count--;
60484+ __lockref_dec(&dentry->d_lockref);
60485 spin_unlock(&dentry->d_lock);
60486 return;
60487
60488@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60489 /* This must be called with d_lock held */
60490 static inline void __dget_dlock(struct dentry *dentry)
60491 {
60492- dentry->d_lockref.count++;
60493+ __lockref_inc(&dentry->d_lockref);
60494 }
60495
60496 static inline void __dget(struct dentry *dentry)
60497@@ -694,8 +694,8 @@ repeat:
60498 goto repeat;
60499 }
60500 rcu_read_unlock();
60501- BUG_ON(!ret->d_lockref.count);
60502- ret->d_lockref.count++;
60503+ BUG_ON(!__lockref_read(&ret->d_lockref));
60504+ __lockref_inc(&ret->d_lockref);
60505 spin_unlock(&ret->d_lock);
60506 return ret;
60507 }
60508@@ -773,9 +773,9 @@ restart:
60509 spin_lock(&inode->i_lock);
60510 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60511 spin_lock(&dentry->d_lock);
60512- if (!dentry->d_lockref.count) {
60513+ if (!__lockref_read(&dentry->d_lockref)) {
60514 struct dentry *parent = lock_parent(dentry);
60515- if (likely(!dentry->d_lockref.count)) {
60516+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60517 __dentry_kill(dentry);
60518 dput(parent);
60519 goto restart;
60520@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60521 * We found an inuse dentry which was not removed from
60522 * the LRU because of laziness during lookup. Do not free it.
60523 */
60524- if ((int)dentry->d_lockref.count > 0) {
60525+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60526 spin_unlock(&dentry->d_lock);
60527 if (parent)
60528 spin_unlock(&parent->d_lock);
60529@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60530 dentry = parent;
60531 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60532 parent = lock_parent(dentry);
60533- if (dentry->d_lockref.count != 1) {
60534- dentry->d_lockref.count--;
60535+ if (__lockref_read(&dentry->d_lockref) != 1) {
60536+ __lockref_inc(&dentry->d_lockref);
60537 spin_unlock(&dentry->d_lock);
60538 if (parent)
60539 spin_unlock(&parent->d_lock);
60540@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60541 * counts, just remove them from the LRU. Otherwise give them
60542 * another pass through the LRU.
60543 */
60544- if (dentry->d_lockref.count) {
60545+ if (__lockref_read(&dentry->d_lockref) > 0) {
60546 d_lru_isolate(dentry);
60547 spin_unlock(&dentry->d_lock);
60548 return LRU_REMOVED;
60549@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60550 } else {
60551 if (dentry->d_flags & DCACHE_LRU_LIST)
60552 d_lru_del(dentry);
60553- if (!dentry->d_lockref.count) {
60554+ if (!__lockref_read(&dentry->d_lockref)) {
60555 d_shrink_add(dentry, &data->dispose);
60556 data->found++;
60557 }
60558@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60559 return D_WALK_CONTINUE;
60560
60561 /* root with refcount 1 is fine */
60562- if (dentry == _data && dentry->d_lockref.count == 1)
60563+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60564 return D_WALK_CONTINUE;
60565
60566 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60567@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60568 dentry->d_inode ?
60569 dentry->d_inode->i_ino : 0UL,
60570 dentry,
60571- dentry->d_lockref.count,
60572+ __lockref_read(&dentry->d_lockref),
60573 dentry->d_sb->s_type->name,
60574 dentry->d_sb->s_id);
60575 WARN_ON(1);
60576@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60577 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60578 if (name->len > DNAME_INLINE_LEN-1) {
60579 size_t size = offsetof(struct external_name, name[1]);
60580- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60581+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60582 if (!p) {
60583 kmem_cache_free(dentry_cache, dentry);
60584 return NULL;
60585@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60586 smp_wmb();
60587 dentry->d_name.name = dname;
60588
60589- dentry->d_lockref.count = 1;
60590+ __lockref_set(&dentry->d_lockref, 1);
60591 dentry->d_flags = 0;
60592 spin_lock_init(&dentry->d_lock);
60593 seqcount_init(&dentry->d_seq);
60594@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60595 dentry->d_sb = sb;
60596 dentry->d_op = NULL;
60597 dentry->d_fsdata = NULL;
60598+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60599+ atomic_set(&dentry->chroot_refcnt, 0);
60600+#endif
60601 INIT_HLIST_BL_NODE(&dentry->d_hash);
60602 INIT_LIST_HEAD(&dentry->d_lru);
60603 INIT_LIST_HEAD(&dentry->d_subdirs);
60604@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60605 goto next;
60606 }
60607
60608- dentry->d_lockref.count++;
60609+ __lockref_inc(&dentry->d_lockref);
60610 found = dentry;
60611 spin_unlock(&dentry->d_lock);
60612 break;
60613@@ -2250,7 +2253,7 @@ again:
60614 spin_lock(&dentry->d_lock);
60615 inode = dentry->d_inode;
60616 isdir = S_ISDIR(inode->i_mode);
60617- if (dentry->d_lockref.count == 1) {
60618+ if (__lockref_read(&dentry->d_lockref) == 1) {
60619 if (!spin_trylock(&inode->i_lock)) {
60620 spin_unlock(&dentry->d_lock);
60621 cpu_relax();
60622@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60623
60624 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60625 dentry->d_flags |= DCACHE_GENOCIDE;
60626- dentry->d_lockref.count--;
60627+ __lockref_dec(&dentry->d_lockref);
60628 }
60629 }
60630 return D_WALK_CONTINUE;
60631@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60632 mempages -= reserve;
60633
60634 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60635- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60636+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60637+ SLAB_NO_SANITIZE, NULL);
60638
60639 dcache_init();
60640 inode_init();
60641diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60642index 6f0ce53..92bba36 100644
60643--- a/fs/debugfs/inode.c
60644+++ b/fs/debugfs/inode.c
60645@@ -423,10 +423,20 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60646 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60647 * returned.
60648 */
60649+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60650+extern int grsec_enable_sysfs_restrict;
60651+#endif
60652+
60653 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60654 {
60655- return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60656- parent, NULL, NULL);
60657+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60658+
60659+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60660+ if (grsec_enable_sysfs_restrict)
60661+ mode = S_IFDIR | S_IRWXU;
60662+#endif
60663+
60664+ return __create_file(name, mode, parent, NULL, NULL);
60665 }
60666 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60667
60668diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60669index 1686dc2..9611c50 100644
60670--- a/fs/ecryptfs/inode.c
60671+++ b/fs/ecryptfs/inode.c
60672@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60673 old_fs = get_fs();
60674 set_fs(get_ds());
60675 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60676- (char __user *)lower_buf,
60677+ (char __force_user *)lower_buf,
60678 PATH_MAX);
60679 set_fs(old_fs);
60680 if (rc < 0)
60681diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60682index e4141f2..d8263e8 100644
60683--- a/fs/ecryptfs/miscdev.c
60684+++ b/fs/ecryptfs/miscdev.c
60685@@ -304,7 +304,7 @@ check_list:
60686 goto out_unlock_msg_ctx;
60687 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60688 if (msg_ctx->msg) {
60689- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60690+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60691 goto out_unlock_msg_ctx;
60692 i += packet_length_size;
60693 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60694diff --git a/fs/exec.c b/fs/exec.c
60695index ad8798e..e3f50ec 100644
60696--- a/fs/exec.c
60697+++ b/fs/exec.c
60698@@ -56,8 +56,20 @@
60699 #include <linux/pipe_fs_i.h>
60700 #include <linux/oom.h>
60701 #include <linux/compat.h>
60702+#include <linux/random.h>
60703+#include <linux/seq_file.h>
60704+#include <linux/coredump.h>
60705+#include <linux/mman.h>
60706+
60707+#ifdef CONFIG_PAX_REFCOUNT
60708+#include <linux/kallsyms.h>
60709+#include <linux/kdebug.h>
60710+#endif
60711+
60712+#include <trace/events/fs.h>
60713
60714 #include <asm/uaccess.h>
60715+#include <asm/sections.h>
60716 #include <asm/mmu_context.h>
60717 #include <asm/tlb.h>
60718
60719@@ -66,19 +78,34 @@
60720
60721 #include <trace/events/sched.h>
60722
60723+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60724+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60725+{
60726+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60727+}
60728+#endif
60729+
60730+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60731+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60732+EXPORT_SYMBOL(pax_set_initial_flags_func);
60733+#endif
60734+
60735 int suid_dumpable = 0;
60736
60737 static LIST_HEAD(formats);
60738 static DEFINE_RWLOCK(binfmt_lock);
60739
60740+extern int gr_process_kernel_exec_ban(void);
60741+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60742+
60743 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60744 {
60745 BUG_ON(!fmt);
60746 if (WARN_ON(!fmt->load_binary))
60747 return;
60748 write_lock(&binfmt_lock);
60749- insert ? list_add(&fmt->lh, &formats) :
60750- list_add_tail(&fmt->lh, &formats);
60751+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60752+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60753 write_unlock(&binfmt_lock);
60754 }
60755
60756@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60757 void unregister_binfmt(struct linux_binfmt * fmt)
60758 {
60759 write_lock(&binfmt_lock);
60760- list_del(&fmt->lh);
60761+ pax_list_del((struct list_head *)&fmt->lh);
60762 write_unlock(&binfmt_lock);
60763 }
60764
60765@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60766 int write)
60767 {
60768 struct page *page;
60769- int ret;
60770
60771-#ifdef CONFIG_STACK_GROWSUP
60772- if (write) {
60773- ret = expand_downwards(bprm->vma, pos);
60774- if (ret < 0)
60775- return NULL;
60776- }
60777-#endif
60778- ret = get_user_pages(current, bprm->mm, pos,
60779- 1, write, 1, &page, NULL);
60780- if (ret <= 0)
60781+ if (0 > expand_downwards(bprm->vma, pos))
60782+ return NULL;
60783+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60784 return NULL;
60785
60786 if (write) {
60787@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60788 if (size <= ARG_MAX)
60789 return page;
60790
60791+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60792+ // only allow 512KB for argv+env on suid/sgid binaries
60793+ // to prevent easy ASLR exhaustion
60794+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60795+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60796+ (size > (512 * 1024))) {
60797+ put_page(page);
60798+ return NULL;
60799+ }
60800+#endif
60801+
60802 /*
60803 * Limit to 1/4-th the stack size for the argv+env strings.
60804 * This ensures that:
60805@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60806 vma->vm_end = STACK_TOP_MAX;
60807 vma->vm_start = vma->vm_end - PAGE_SIZE;
60808 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60809+
60810+#ifdef CONFIG_PAX_SEGMEXEC
60811+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60812+#endif
60813+
60814 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60815 INIT_LIST_HEAD(&vma->anon_vma_chain);
60816
60817@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60818 arch_bprm_mm_init(mm, vma);
60819 up_write(&mm->mmap_sem);
60820 bprm->p = vma->vm_end - sizeof(void *);
60821+
60822+#ifdef CONFIG_PAX_RANDUSTACK
60823+ if (randomize_va_space)
60824+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60825+#endif
60826+
60827 return 0;
60828 err:
60829 up_write(&mm->mmap_sem);
60830@@ -396,7 +437,7 @@ struct user_arg_ptr {
60831 } ptr;
60832 };
60833
60834-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60835+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60836 {
60837 const char __user *native;
60838
60839@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60840 compat_uptr_t compat;
60841
60842 if (get_user(compat, argv.ptr.compat + nr))
60843- return ERR_PTR(-EFAULT);
60844+ return (const char __force_user *)ERR_PTR(-EFAULT);
60845
60846 return compat_ptr(compat);
60847 }
60848 #endif
60849
60850 if (get_user(native, argv.ptr.native + nr))
60851- return ERR_PTR(-EFAULT);
60852+ return (const char __force_user *)ERR_PTR(-EFAULT);
60853
60854 return native;
60855 }
60856@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60857 if (!p)
60858 break;
60859
60860- if (IS_ERR(p))
60861+ if (IS_ERR((const char __force_kernel *)p))
60862 return -EFAULT;
60863
60864 if (i >= max)
60865@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60866
60867 ret = -EFAULT;
60868 str = get_user_arg_ptr(argv, argc);
60869- if (IS_ERR(str))
60870+ if (IS_ERR((const char __force_kernel *)str))
60871 goto out;
60872
60873 len = strnlen_user(str, MAX_ARG_STRLEN);
60874@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60875 int r;
60876 mm_segment_t oldfs = get_fs();
60877 struct user_arg_ptr argv = {
60878- .ptr.native = (const char __user *const __user *)__argv,
60879+ .ptr.native = (const char __user * const __force_user *)__argv,
60880 };
60881
60882 set_fs(KERNEL_DS);
60883@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60884 unsigned long new_end = old_end - shift;
60885 struct mmu_gather tlb;
60886
60887- BUG_ON(new_start > new_end);
60888+ if (new_start >= new_end || new_start < mmap_min_addr)
60889+ return -ENOMEM;
60890
60891 /*
60892 * ensure there are no vmas between where we want to go
60893@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60894 if (vma != find_vma(mm, new_start))
60895 return -EFAULT;
60896
60897+#ifdef CONFIG_PAX_SEGMEXEC
60898+ BUG_ON(pax_find_mirror_vma(vma));
60899+#endif
60900+
60901 /*
60902 * cover the whole range: [new_start, old_end)
60903 */
60904@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60905 stack_top = arch_align_stack(stack_top);
60906 stack_top = PAGE_ALIGN(stack_top);
60907
60908- if (unlikely(stack_top < mmap_min_addr) ||
60909- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60910- return -ENOMEM;
60911-
60912 stack_shift = vma->vm_end - stack_top;
60913
60914 bprm->p -= stack_shift;
60915@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60916 bprm->exec -= stack_shift;
60917
60918 down_write(&mm->mmap_sem);
60919+
60920+ /* Move stack pages down in memory. */
60921+ if (stack_shift) {
60922+ ret = shift_arg_pages(vma, stack_shift);
60923+ if (ret)
60924+ goto out_unlock;
60925+ }
60926+
60927 vm_flags = VM_STACK_FLAGS;
60928
60929+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60930+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60931+ vm_flags &= ~VM_EXEC;
60932+
60933+#ifdef CONFIG_PAX_MPROTECT
60934+ if (mm->pax_flags & MF_PAX_MPROTECT)
60935+ vm_flags &= ~VM_MAYEXEC;
60936+#endif
60937+
60938+ }
60939+#endif
60940+
60941 /*
60942 * Adjust stack execute permissions; explicitly enable for
60943 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60944@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60945 goto out_unlock;
60946 BUG_ON(prev != vma);
60947
60948- /* Move stack pages down in memory. */
60949- if (stack_shift) {
60950- ret = shift_arg_pages(vma, stack_shift);
60951- if (ret)
60952- goto out_unlock;
60953- }
60954-
60955 /* mprotect_fixup is overkill to remove the temporary stack flags */
60956 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60957
60958@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60959 #endif
60960 current->mm->start_stack = bprm->p;
60961 ret = expand_stack(vma, stack_base);
60962+
60963+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60964+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60965+ unsigned long size;
60966+ vm_flags_t vm_flags;
60967+
60968+ size = STACK_TOP - vma->vm_end;
60969+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60970+
60971+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60972+
60973+#ifdef CONFIG_X86
60974+ if (!ret) {
60975+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60976+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60977+ }
60978+#endif
60979+
60980+ }
60981+#endif
60982+
60983 if (ret)
60984 ret = -EFAULT;
60985
60986@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60987 if (err)
60988 goto exit;
60989
60990- if (name->name[0] != '\0')
60991+ if (name->name[0] != '\0') {
60992 fsnotify_open(file);
60993+ trace_open_exec(name->name);
60994+ }
60995
60996 out:
60997 return file;
60998@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60999 old_fs = get_fs();
61000 set_fs(get_ds());
61001 /* The cast to a user pointer is valid due to the set_fs() */
61002- result = vfs_read(file, (void __user *)addr, count, &pos);
61003+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
61004 set_fs(old_fs);
61005 return result;
61006 }
61007@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
61008 tsk->mm = mm;
61009 tsk->active_mm = mm;
61010 activate_mm(active_mm, mm);
61011+ populate_stack();
61012 tsk->mm->vmacache_seqnum = 0;
61013 vmacache_flush(tsk);
61014 task_unlock(tsk);
61015@@ -920,10 +999,14 @@ static int de_thread(struct task_struct *tsk)
61016 if (!thread_group_leader(tsk)) {
61017 struct task_struct *leader = tsk->group_leader;
61018
61019- sig->notify_count = -1; /* for exit_notify() */
61020 for (;;) {
61021 threadgroup_change_begin(tsk);
61022 write_lock_irq(&tasklist_lock);
61023+ /*
61024+ * Do this under tasklist_lock to ensure that
61025+ * exit_notify() can't miss ->group_exit_task
61026+ */
61027+ sig->notify_count = -1;
61028 if (likely(leader->exit_state))
61029 break;
61030 __set_current_state(TASK_KILLABLE);
61031@@ -1252,13 +1335,60 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61032 }
61033 rcu_read_unlock();
61034
61035- if (p->fs->users > n_fs)
61036+ if (atomic_read(&p->fs->users) > n_fs)
61037 bprm->unsafe |= LSM_UNSAFE_SHARE;
61038 else
61039 p->fs->in_exec = 1;
61040 spin_unlock(&p->fs->lock);
61041 }
61042
61043+static void bprm_fill_uid(struct linux_binprm *bprm)
61044+{
61045+ struct inode *inode;
61046+ unsigned int mode;
61047+ kuid_t uid;
61048+ kgid_t gid;
61049+
61050+ /* clear any previous set[ug]id data from a previous binary */
61051+ bprm->cred->euid = current_euid();
61052+ bprm->cred->egid = current_egid();
61053+
61054+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
61055+ return;
61056+
61057+ if (task_no_new_privs(current))
61058+ return;
61059+
61060+ inode = file_inode(bprm->file);
61061+ mode = READ_ONCE(inode->i_mode);
61062+ if (!(mode & (S_ISUID|S_ISGID)))
61063+ return;
61064+
61065+ /* Be careful if suid/sgid is set */
61066+ mutex_lock(&inode->i_mutex);
61067+
61068+ /* reload atomically mode/uid/gid now that lock held */
61069+ mode = inode->i_mode;
61070+ uid = inode->i_uid;
61071+ gid = inode->i_gid;
61072+ mutex_unlock(&inode->i_mutex);
61073+
61074+ /* We ignore suid/sgid if there are no mappings for them in the ns */
61075+ if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
61076+ !kgid_has_mapping(bprm->cred->user_ns, gid))
61077+ return;
61078+
61079+ if (mode & S_ISUID) {
61080+ bprm->per_clear |= PER_CLEAR_ON_SETID;
61081+ bprm->cred->euid = uid;
61082+ }
61083+
61084+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
61085+ bprm->per_clear |= PER_CLEAR_ON_SETID;
61086+ bprm->cred->egid = gid;
61087+ }
61088+}
61089+
61090 /*
61091 * Fill the binprm structure from the inode.
61092 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
61093@@ -1267,36 +1397,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61094 */
61095 int prepare_binprm(struct linux_binprm *bprm)
61096 {
61097- struct inode *inode = file_inode(bprm->file);
61098- umode_t mode = inode->i_mode;
61099 int retval;
61100
61101-
61102- /* clear any previous set[ug]id data from a previous binary */
61103- bprm->cred->euid = current_euid();
61104- bprm->cred->egid = current_egid();
61105-
61106- if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
61107- !task_no_new_privs(current) &&
61108- kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
61109- kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
61110- /* Set-uid? */
61111- if (mode & S_ISUID) {
61112- bprm->per_clear |= PER_CLEAR_ON_SETID;
61113- bprm->cred->euid = inode->i_uid;
61114- }
61115-
61116- /* Set-gid? */
61117- /*
61118- * If setgid is set but no group execute bit then this
61119- * is a candidate for mandatory locking, not a setgid
61120- * executable.
61121- */
61122- if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
61123- bprm->per_clear |= PER_CLEAR_ON_SETID;
61124- bprm->cred->egid = inode->i_gid;
61125- }
61126- }
61127+ bprm_fill_uid(bprm);
61128
61129 /* fill in binprm security blob */
61130 retval = security_bprm_set_creds(bprm);
61131@@ -1433,6 +1536,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61132 return ret;
61133 }
61134
61135+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61136+static DEFINE_PER_CPU(u64, exec_counter);
61137+static int __init init_exec_counters(void)
61138+{
61139+ unsigned int cpu;
61140+
61141+ for_each_possible_cpu(cpu) {
61142+ per_cpu(exec_counter, cpu) = (u64)cpu;
61143+ }
61144+
61145+ return 0;
61146+}
61147+early_initcall(init_exec_counters);
61148+static inline void increment_exec_counter(void)
61149+{
61150+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61151+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61152+}
61153+#else
61154+static inline void increment_exec_counter(void) {}
61155+#endif
61156+
61157+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61158+ struct user_arg_ptr argv);
61159+
61160 /*
61161 * sys_execve() executes a new program.
61162 */
61163@@ -1441,6 +1569,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61164 struct user_arg_ptr envp,
61165 int flags)
61166 {
61167+#ifdef CONFIG_GRKERNSEC
61168+ struct file *old_exec_file;
61169+ struct acl_subject_label *old_acl;
61170+ struct rlimit old_rlim[RLIM_NLIMITS];
61171+#endif
61172 char *pathbuf = NULL;
61173 struct linux_binprm *bprm;
61174 struct file *file;
61175@@ -1450,6 +1583,8 @@ static int do_execveat_common(int fd, struct filename *filename,
61176 if (IS_ERR(filename))
61177 return PTR_ERR(filename);
61178
61179+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61180+
61181 /*
61182 * We move the actual failure in case of RLIMIT_NPROC excess from
61183 * set*uid() to execve() because too many poorly written programs
61184@@ -1487,6 +1622,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61185 if (IS_ERR(file))
61186 goto out_unmark;
61187
61188+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61189+ retval = -EPERM;
61190+ goto out_unmark;
61191+ }
61192+
61193 sched_exec();
61194
61195 bprm->file = file;
61196@@ -1513,6 +1653,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61197 }
61198 bprm->interp = bprm->filename;
61199
61200+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61201+ retval = -EACCES;
61202+ goto out_unmark;
61203+ }
61204+
61205 retval = bprm_mm_init(bprm);
61206 if (retval)
61207 goto out_unmark;
61208@@ -1529,24 +1674,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61209 if (retval < 0)
61210 goto out;
61211
61212+#ifdef CONFIG_GRKERNSEC
61213+ old_acl = current->acl;
61214+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61215+ old_exec_file = current->exec_file;
61216+ get_file(file);
61217+ current->exec_file = file;
61218+#endif
61219+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61220+ /* limit suid stack to 8MB
61221+ * we saved the old limits above and will restore them if this exec fails
61222+ */
61223+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61224+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61225+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61226+#endif
61227+
61228+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61229+ retval = -EPERM;
61230+ goto out_fail;
61231+ }
61232+
61233+ if (!gr_tpe_allow(file)) {
61234+ retval = -EACCES;
61235+ goto out_fail;
61236+ }
61237+
61238+ if (gr_check_crash_exec(file)) {
61239+ retval = -EACCES;
61240+ goto out_fail;
61241+ }
61242+
61243+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61244+ bprm->unsafe);
61245+ if (retval < 0)
61246+ goto out_fail;
61247+
61248 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61249 if (retval < 0)
61250- goto out;
61251+ goto out_fail;
61252
61253 bprm->exec = bprm->p;
61254 retval = copy_strings(bprm->envc, envp, bprm);
61255 if (retval < 0)
61256- goto out;
61257+ goto out_fail;
61258
61259 retval = copy_strings(bprm->argc, argv, bprm);
61260 if (retval < 0)
61261- goto out;
61262+ goto out_fail;
61263+
61264+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61265+
61266+ gr_handle_exec_args(bprm, argv);
61267
61268 retval = exec_binprm(bprm);
61269 if (retval < 0)
61270- goto out;
61271+ goto out_fail;
61272+#ifdef CONFIG_GRKERNSEC
61273+ if (old_exec_file)
61274+ fput(old_exec_file);
61275+#endif
61276
61277 /* execve succeeded */
61278+
61279+ increment_exec_counter();
61280 current->fs->in_exec = 0;
61281 current->in_execve = 0;
61282 acct_update_integrals(current);
61283@@ -1558,6 +1749,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61284 put_files_struct(displaced);
61285 return retval;
61286
61287+out_fail:
61288+#ifdef CONFIG_GRKERNSEC
61289+ current->acl = old_acl;
61290+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61291+ fput(current->exec_file);
61292+ current->exec_file = old_exec_file;
61293+#endif
61294+
61295 out:
61296 if (bprm->mm) {
61297 acct_arg_size(bprm, 0);
61298@@ -1704,3 +1903,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61299 argv, envp, flags);
61300 }
61301 #endif
61302+
61303+int pax_check_flags(unsigned long *flags)
61304+{
61305+ int retval = 0;
61306+
61307+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61308+ if (*flags & MF_PAX_SEGMEXEC)
61309+ {
61310+ *flags &= ~MF_PAX_SEGMEXEC;
61311+ retval = -EINVAL;
61312+ }
61313+#endif
61314+
61315+ if ((*flags & MF_PAX_PAGEEXEC)
61316+
61317+#ifdef CONFIG_PAX_PAGEEXEC
61318+ && (*flags & MF_PAX_SEGMEXEC)
61319+#endif
61320+
61321+ )
61322+ {
61323+ *flags &= ~MF_PAX_PAGEEXEC;
61324+ retval = -EINVAL;
61325+ }
61326+
61327+ if ((*flags & MF_PAX_MPROTECT)
61328+
61329+#ifdef CONFIG_PAX_MPROTECT
61330+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61331+#endif
61332+
61333+ )
61334+ {
61335+ *flags &= ~MF_PAX_MPROTECT;
61336+ retval = -EINVAL;
61337+ }
61338+
61339+ if ((*flags & MF_PAX_EMUTRAMP)
61340+
61341+#ifdef CONFIG_PAX_EMUTRAMP
61342+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61343+#endif
61344+
61345+ )
61346+ {
61347+ *flags &= ~MF_PAX_EMUTRAMP;
61348+ retval = -EINVAL;
61349+ }
61350+
61351+ return retval;
61352+}
61353+
61354+EXPORT_SYMBOL(pax_check_flags);
61355+
61356+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61357+char *pax_get_path(const struct path *path, char *buf, int buflen)
61358+{
61359+ char *pathname = d_path(path, buf, buflen);
61360+
61361+ if (IS_ERR(pathname))
61362+ goto toolong;
61363+
61364+ pathname = mangle_path(buf, pathname, "\t\n\\");
61365+ if (!pathname)
61366+ goto toolong;
61367+
61368+ *pathname = 0;
61369+ return buf;
61370+
61371+toolong:
61372+ return "<path too long>";
61373+}
61374+EXPORT_SYMBOL(pax_get_path);
61375+
61376+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61377+{
61378+ struct task_struct *tsk = current;
61379+ struct mm_struct *mm = current->mm;
61380+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61381+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61382+ char *path_exec = NULL;
61383+ char *path_fault = NULL;
61384+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61385+ siginfo_t info = { };
61386+
61387+ if (buffer_exec && buffer_fault) {
61388+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61389+
61390+ down_read(&mm->mmap_sem);
61391+ vma = mm->mmap;
61392+ while (vma && (!vma_exec || !vma_fault)) {
61393+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61394+ vma_exec = vma;
61395+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61396+ vma_fault = vma;
61397+ vma = vma->vm_next;
61398+ }
61399+ if (vma_exec)
61400+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61401+ if (vma_fault) {
61402+ start = vma_fault->vm_start;
61403+ end = vma_fault->vm_end;
61404+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61405+ if (vma_fault->vm_file)
61406+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61407+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61408+ path_fault = "<heap>";
61409+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61410+ path_fault = "<stack>";
61411+ else
61412+ path_fault = "<anonymous mapping>";
61413+ }
61414+ up_read(&mm->mmap_sem);
61415+ }
61416+ if (tsk->signal->curr_ip)
61417+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61418+ else
61419+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61420+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61421+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61422+ free_page((unsigned long)buffer_exec);
61423+ free_page((unsigned long)buffer_fault);
61424+ pax_report_insns(regs, pc, sp);
61425+ info.si_signo = SIGKILL;
61426+ info.si_errno = 0;
61427+ info.si_code = SI_KERNEL;
61428+ info.si_pid = 0;
61429+ info.si_uid = 0;
61430+ do_coredump(&info);
61431+}
61432+#endif
61433+
61434+#ifdef CONFIG_PAX_REFCOUNT
61435+void pax_report_refcount_overflow(struct pt_regs *regs)
61436+{
61437+ if (current->signal->curr_ip)
61438+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61439+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61440+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61441+ else
61442+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61443+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61444+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61445+ preempt_disable();
61446+ show_regs(regs);
61447+ preempt_enable();
61448+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61449+}
61450+#endif
61451+
61452+#ifdef CONFIG_PAX_USERCOPY
61453+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61454+static noinline int check_stack_object(const void *obj, unsigned long len)
61455+{
61456+ const void * const stack = task_stack_page(current);
61457+ const void * const stackend = stack + THREAD_SIZE;
61458+
61459+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61460+ const void *frame = NULL;
61461+ const void *oldframe;
61462+#endif
61463+
61464+ if (obj + len < obj)
61465+ return -1;
61466+
61467+ if (obj + len <= stack || stackend <= obj)
61468+ return 0;
61469+
61470+ if (obj < stack || stackend < obj + len)
61471+ return -1;
61472+
61473+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61474+ oldframe = __builtin_frame_address(1);
61475+ if (oldframe)
61476+ frame = __builtin_frame_address(2);
61477+ /*
61478+ low ----------------------------------------------> high
61479+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61480+ ^----------------^
61481+ allow copies only within here
61482+ */
61483+ while (stack <= frame && frame < stackend) {
61484+ /* if obj + len extends past the last frame, this
61485+ check won't pass and the next frame will be 0,
61486+ causing us to bail out and correctly report
61487+ the copy as invalid
61488+ */
61489+ if (obj + len <= frame)
61490+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61491+ oldframe = frame;
61492+ frame = *(const void * const *)frame;
61493+ }
61494+ return -1;
61495+#else
61496+ return 1;
61497+#endif
61498+}
61499+
61500+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61501+{
61502+ if (current->signal->curr_ip)
61503+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61504+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61505+ else
61506+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61507+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61508+ dump_stack();
61509+ gr_handle_kernel_exploit();
61510+ do_group_exit(SIGKILL);
61511+}
61512+#endif
61513+
61514+#ifdef CONFIG_PAX_USERCOPY
61515+
61516+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61517+{
61518+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61519+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61520+#ifdef CONFIG_MODULES
61521+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61522+#else
61523+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61524+#endif
61525+
61526+#else
61527+ unsigned long textlow = (unsigned long)_stext;
61528+ unsigned long texthigh = (unsigned long)_etext;
61529+
61530+#ifdef CONFIG_X86_64
61531+ /* check against linear mapping as well */
61532+ if (high > (unsigned long)__va(__pa(textlow)) &&
61533+ low < (unsigned long)__va(__pa(texthigh)))
61534+ return true;
61535+#endif
61536+
61537+#endif
61538+
61539+ if (high <= textlow || low >= texthigh)
61540+ return false;
61541+ else
61542+ return true;
61543+}
61544+#endif
61545+
61546+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61547+{
61548+#ifdef CONFIG_PAX_USERCOPY
61549+ const char *type;
61550+#endif
61551+
61552+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61553+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61554+ unsigned long currentsp = (unsigned long)&stackstart;
61555+ if (unlikely((currentsp < stackstart + 512 ||
61556+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61557+ BUG();
61558+#endif
61559+
61560+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61561+ if (const_size)
61562+ return;
61563+#endif
61564+
61565+#ifdef CONFIG_PAX_USERCOPY
61566+ if (!n)
61567+ return;
61568+
61569+ type = check_heap_object(ptr, n);
61570+ if (!type) {
61571+ int ret = check_stack_object(ptr, n);
61572+ if (ret == 1 || ret == 2)
61573+ return;
61574+ if (ret == 0) {
61575+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61576+ type = "<kernel text>";
61577+ else
61578+ return;
61579+ } else
61580+ type = "<process stack>";
61581+ }
61582+
61583+ pax_report_usercopy(ptr, n, to_user, type);
61584+#endif
61585+
61586+}
61587+EXPORT_SYMBOL(__check_object_size);
61588+
61589+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61590+void pax_track_stack(void)
61591+{
61592+ unsigned long sp = (unsigned long)&sp;
61593+ if (sp < current_thread_info()->lowest_stack &&
61594+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61595+ current_thread_info()->lowest_stack = sp;
61596+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61597+ BUG();
61598+}
61599+EXPORT_SYMBOL(pax_track_stack);
61600+#endif
61601+
61602+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61603+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61604+{
61605+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61606+ dump_stack();
61607+ do_group_exit(SIGKILL);
61608+}
61609+EXPORT_SYMBOL(report_size_overflow);
61610+#endif
61611diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61612index 9f9992b..8b59411 100644
61613--- a/fs/ext2/balloc.c
61614+++ b/fs/ext2/balloc.c
61615@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61616
61617 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61618 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61619- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61620+ if (free_blocks < root_blocks + 1 &&
61621 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61622 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61623- !in_group_p (sbi->s_resgid))) {
61624+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61625 return 0;
61626 }
61627 return 1;
61628diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61629index ae55fdd..5e64c27 100644
61630--- a/fs/ext2/super.c
61631+++ b/fs/ext2/super.c
61632@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61633 #ifdef CONFIG_EXT2_FS_XATTR
61634 if (test_opt(sb, XATTR_USER))
61635 seq_puts(seq, ",user_xattr");
61636- if (!test_opt(sb, XATTR_USER) &&
61637- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61638+ if (!test_opt(sb, XATTR_USER))
61639 seq_puts(seq, ",nouser_xattr");
61640- }
61641 #endif
61642
61643 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61644@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61645 if (def_mount_opts & EXT2_DEFM_UID16)
61646 set_opt(sbi->s_mount_opt, NO_UID32);
61647 #ifdef CONFIG_EXT2_FS_XATTR
61648- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61649- set_opt(sbi->s_mount_opt, XATTR_USER);
61650+ /* always enable user xattrs */
61651+ set_opt(sbi->s_mount_opt, XATTR_USER);
61652 #endif
61653 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61654 if (def_mount_opts & EXT2_DEFM_ACL)
61655diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61656index 9142614..97484fa 100644
61657--- a/fs/ext2/xattr.c
61658+++ b/fs/ext2/xattr.c
61659@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61660 struct buffer_head *bh = NULL;
61661 struct ext2_xattr_entry *entry;
61662 char *end;
61663- size_t rest = buffer_size;
61664+ size_t rest = buffer_size, total_size = 0;
61665 int error;
61666
61667 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61668@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61669 buffer += size;
61670 }
61671 rest -= size;
61672+ total_size += size;
61673 }
61674 }
61675- error = buffer_size - rest; /* total size */
61676+ error = total_size;
61677
61678 cleanup:
61679 brelse(bh);
61680diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61681index 158b5d4..2432610 100644
61682--- a/fs/ext3/balloc.c
61683+++ b/fs/ext3/balloc.c
61684@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61685
61686 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61687 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61688- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61689+ if (free_blocks < root_blocks + 1 &&
61690 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61691 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61692- !in_group_p (sbi->s_resgid))) {
61693+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61694 return 0;
61695 }
61696 return 1;
61697diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61698index 9b4e7d7..048d025 100644
61699--- a/fs/ext3/super.c
61700+++ b/fs/ext3/super.c
61701@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61702 #ifdef CONFIG_EXT3_FS_XATTR
61703 if (test_opt(sb, XATTR_USER))
61704 seq_puts(seq, ",user_xattr");
61705- if (!test_opt(sb, XATTR_USER) &&
61706- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61707+ if (!test_opt(sb, XATTR_USER))
61708 seq_puts(seq, ",nouser_xattr");
61709- }
61710 #endif
61711 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61712 if (test_opt(sb, POSIX_ACL))
61713@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61714 if (def_mount_opts & EXT3_DEFM_UID16)
61715 set_opt(sbi->s_mount_opt, NO_UID32);
61716 #ifdef CONFIG_EXT3_FS_XATTR
61717- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61718- set_opt(sbi->s_mount_opt, XATTR_USER);
61719+ /* always enable user xattrs */
61720+ set_opt(sbi->s_mount_opt, XATTR_USER);
61721 #endif
61722 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61723 if (def_mount_opts & EXT3_DEFM_ACL)
61724diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61725index c6874be..f8a6ae8 100644
61726--- a/fs/ext3/xattr.c
61727+++ b/fs/ext3/xattr.c
61728@@ -330,7 +330,7 @@ static int
61729 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61730 char *buffer, size_t buffer_size)
61731 {
61732- size_t rest = buffer_size;
61733+ size_t rest = buffer_size, total_size = 0;
61734
61735 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61736 const struct xattr_handler *handler =
61737@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61738 buffer += size;
61739 }
61740 rest -= size;
61741+ total_size += size;
61742 }
61743 }
61744- return buffer_size - rest;
61745+ return total_size;
61746 }
61747
61748 static int
61749diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61750index 83a6f49..d4e4d03 100644
61751--- a/fs/ext4/balloc.c
61752+++ b/fs/ext4/balloc.c
61753@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61754 /* Hm, nope. Are (enough) root reserved clusters available? */
61755 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61756 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61757- capable(CAP_SYS_RESOURCE) ||
61758- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61759+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61760+ capable_nolog(CAP_SYS_RESOURCE)) {
61761
61762 if (free_clusters >= (nclusters + dirty_clusters +
61763 resv_clusters))
61764diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61765index a75fba6..8235fca 100644
61766--- a/fs/ext4/ext4.h
61767+++ b/fs/ext4/ext4.h
61768@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61769 unsigned long s_mb_last_start;
61770
61771 /* stats for buddy allocator */
61772- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61773- atomic_t s_bal_success; /* we found long enough chunks */
61774- atomic_t s_bal_allocated; /* in blocks */
61775- atomic_t s_bal_ex_scanned; /* total extents scanned */
61776- atomic_t s_bal_goals; /* goal hits */
61777- atomic_t s_bal_breaks; /* too long searches */
61778- atomic_t s_bal_2orders; /* 2^order hits */
61779+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61780+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61781+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61782+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61783+ atomic_unchecked_t s_bal_goals; /* goal hits */
61784+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61785+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61786 spinlock_t s_bal_lock;
61787 unsigned long s_mb_buddies_generated;
61788 unsigned long long s_mb_generation_time;
61789- atomic_t s_mb_lost_chunks;
61790- atomic_t s_mb_preallocated;
61791- atomic_t s_mb_discarded;
61792+ atomic_unchecked_t s_mb_lost_chunks;
61793+ atomic_unchecked_t s_mb_preallocated;
61794+ atomic_unchecked_t s_mb_discarded;
61795 atomic_t s_lock_busy;
61796
61797 /* locality groups */
61798diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61799index 8d1e602..abf497b 100644
61800--- a/fs/ext4/mballoc.c
61801+++ b/fs/ext4/mballoc.c
61802@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61803 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61804
61805 if (EXT4_SB(sb)->s_mb_stats)
61806- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61807+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61808
61809 break;
61810 }
61811@@ -2211,7 +2211,7 @@ repeat:
61812 ac->ac_status = AC_STATUS_CONTINUE;
61813 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61814 cr = 3;
61815- atomic_inc(&sbi->s_mb_lost_chunks);
61816+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61817 goto repeat;
61818 }
61819 }
61820@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61821 if (sbi->s_mb_stats) {
61822 ext4_msg(sb, KERN_INFO,
61823 "mballoc: %u blocks %u reqs (%u success)",
61824- atomic_read(&sbi->s_bal_allocated),
61825- atomic_read(&sbi->s_bal_reqs),
61826- atomic_read(&sbi->s_bal_success));
61827+ atomic_read_unchecked(&sbi->s_bal_allocated),
61828+ atomic_read_unchecked(&sbi->s_bal_reqs),
61829+ atomic_read_unchecked(&sbi->s_bal_success));
61830 ext4_msg(sb, KERN_INFO,
61831 "mballoc: %u extents scanned, %u goal hits, "
61832 "%u 2^N hits, %u breaks, %u lost",
61833- atomic_read(&sbi->s_bal_ex_scanned),
61834- atomic_read(&sbi->s_bal_goals),
61835- atomic_read(&sbi->s_bal_2orders),
61836- atomic_read(&sbi->s_bal_breaks),
61837- atomic_read(&sbi->s_mb_lost_chunks));
61838+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61839+ atomic_read_unchecked(&sbi->s_bal_goals),
61840+ atomic_read_unchecked(&sbi->s_bal_2orders),
61841+ atomic_read_unchecked(&sbi->s_bal_breaks),
61842+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61843 ext4_msg(sb, KERN_INFO,
61844 "mballoc: %lu generated and it took %Lu",
61845 sbi->s_mb_buddies_generated,
61846 sbi->s_mb_generation_time);
61847 ext4_msg(sb, KERN_INFO,
61848 "mballoc: %u preallocated, %u discarded",
61849- atomic_read(&sbi->s_mb_preallocated),
61850- atomic_read(&sbi->s_mb_discarded));
61851+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61852+ atomic_read_unchecked(&sbi->s_mb_discarded));
61853 }
61854
61855 free_percpu(sbi->s_locality_groups);
61856@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61857 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61858
61859 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61860- atomic_inc(&sbi->s_bal_reqs);
61861- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61862+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61863+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61864 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61865- atomic_inc(&sbi->s_bal_success);
61866- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61867+ atomic_inc_unchecked(&sbi->s_bal_success);
61868+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61869 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61870 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61871- atomic_inc(&sbi->s_bal_goals);
61872+ atomic_inc_unchecked(&sbi->s_bal_goals);
61873 if (ac->ac_found > sbi->s_mb_max_to_scan)
61874- atomic_inc(&sbi->s_bal_breaks);
61875+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61876 }
61877
61878 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61879@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61880 trace_ext4_mb_new_inode_pa(ac, pa);
61881
61882 ext4_mb_use_inode_pa(ac, pa);
61883- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61884+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61885
61886 ei = EXT4_I(ac->ac_inode);
61887 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61888@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61889 trace_ext4_mb_new_group_pa(ac, pa);
61890
61891 ext4_mb_use_group_pa(ac, pa);
61892- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61893+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61894
61895 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61896 lg = ac->ac_lg;
61897@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61898 * from the bitmap and continue.
61899 */
61900 }
61901- atomic_add(free, &sbi->s_mb_discarded);
61902+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61903
61904 return err;
61905 }
61906@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61907 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61908 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61909 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61910- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61911+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61912 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61913
61914 return 0;
61915diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61916index 8313ca3..8a37d08 100644
61917--- a/fs/ext4/mmp.c
61918+++ b/fs/ext4/mmp.c
61919@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61920 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61921 const char *function, unsigned int line, const char *msg)
61922 {
61923- __ext4_warning(sb, function, line, msg);
61924+ __ext4_warning(sb, function, line, "%s", msg);
61925 __ext4_warning(sb, function, line,
61926 "MMP failure info: last update time: %llu, last update "
61927 "node: %s, last update device: %s\n",
61928diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61929index 8a8ec62..1b02de5 100644
61930--- a/fs/ext4/resize.c
61931+++ b/fs/ext4/resize.c
61932@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61933
61934 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61935 for (count2 = count; count > 0; count -= count2, block += count2) {
61936- ext4_fsblk_t start;
61937+ ext4_fsblk_t start, diff;
61938 struct buffer_head *bh;
61939 ext4_group_t group;
61940 int err;
61941@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61942 start = ext4_group_first_block_no(sb, group);
61943 group -= flex_gd->groups[0].group;
61944
61945- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61946- if (count2 > count)
61947- count2 = count;
61948-
61949 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61950 BUG_ON(flex_gd->count > 1);
61951 continue;
61952@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61953 err = ext4_journal_get_write_access(handle, bh);
61954 if (err)
61955 return err;
61956+
61957+ diff = block - start;
61958+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61959+ if (count2 > count)
61960+ count2 = count;
61961+
61962 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61963- block - start, count2);
61964- ext4_set_bits(bh->b_data, block - start, count2);
61965+ diff, count2);
61966+ ext4_set_bits(bh->b_data, diff, count2);
61967
61968 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61969 if (unlikely(err))
61970diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61971index fc29b2c..6c8b255 100644
61972--- a/fs/ext4/super.c
61973+++ b/fs/ext4/super.c
61974@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61975 }
61976
61977 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61978-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61979+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61980 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61981
61982 #ifdef CONFIG_QUOTA
61983@@ -2440,7 +2440,7 @@ struct ext4_attr {
61984 int offset;
61985 int deprecated_val;
61986 } u;
61987-};
61988+} __do_const;
61989
61990 static int parse_strtoull(const char *buf,
61991 unsigned long long max, unsigned long long *value)
61992diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61993index 1e09fc7..0400dd4 100644
61994--- a/fs/ext4/xattr.c
61995+++ b/fs/ext4/xattr.c
61996@@ -399,7 +399,7 @@ static int
61997 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61998 char *buffer, size_t buffer_size)
61999 {
62000- size_t rest = buffer_size;
62001+ size_t rest = buffer_size, total_size = 0;
62002
62003 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62004 const struct xattr_handler *handler =
62005@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62006 buffer += size;
62007 }
62008 rest -= size;
62009+ total_size += size;
62010 }
62011 }
62012- return buffer_size - rest;
62013+ return total_size;
62014 }
62015
62016 static int
62017diff --git a/fs/fcntl.c b/fs/fcntl.c
62018index ee85cd4..9dd0d20 100644
62019--- a/fs/fcntl.c
62020+++ b/fs/fcntl.c
62021@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62022 int force)
62023 {
62024 security_file_set_fowner(filp);
62025+ if (gr_handle_chroot_fowner(pid, type))
62026+ return;
62027+ if (gr_check_protected_task_fowner(pid, type))
62028+ return;
62029 f_modown(filp, pid, type, force);
62030 }
62031 EXPORT_SYMBOL(__f_setown);
62032diff --git a/fs/fhandle.c b/fs/fhandle.c
62033index 999ff5c..2281df9 100644
62034--- a/fs/fhandle.c
62035+++ b/fs/fhandle.c
62036@@ -8,6 +8,7 @@
62037 #include <linux/fs_struct.h>
62038 #include <linux/fsnotify.h>
62039 #include <linux/personality.h>
62040+#include <linux/grsecurity.h>
62041 #include <asm/uaccess.h>
62042 #include "internal.h"
62043 #include "mount.h"
62044@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62045 } else
62046 retval = 0;
62047 /* copy the mount id */
62048- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62049- sizeof(*mnt_id)) ||
62050+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62051 copy_to_user(ufh, handle,
62052 sizeof(struct file_handle) + handle_bytes))
62053 retval = -EFAULT;
62054@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62055 * the directory. Ideally we would like CAP_DAC_SEARCH.
62056 * But we don't have that
62057 */
62058- if (!capable(CAP_DAC_READ_SEARCH)) {
62059+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62060 retval = -EPERM;
62061 goto out_err;
62062 }
62063@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62064 goto out_err;
62065 }
62066 /* copy the full handle */
62067- if (copy_from_user(handle, ufh,
62068- sizeof(struct file_handle) +
62069+ *handle = f_handle;
62070+ if (copy_from_user(&handle->f_handle,
62071+ &ufh->f_handle,
62072 f_handle.handle_bytes)) {
62073 retval = -EFAULT;
62074 goto out_handle;
62075diff --git a/fs/file.c b/fs/file.c
62076index ee738ea..f6c15629 100644
62077--- a/fs/file.c
62078+++ b/fs/file.c
62079@@ -16,6 +16,7 @@
62080 #include <linux/slab.h>
62081 #include <linux/vmalloc.h>
62082 #include <linux/file.h>
62083+#include <linux/security.h>
62084 #include <linux/fdtable.h>
62085 #include <linux/bitops.h>
62086 #include <linux/interrupt.h>
62087@@ -139,7 +140,7 @@ out:
62088 * Return <0 error code on error; 1 on successful completion.
62089 * The files->file_lock should be held on entry, and will be held on exit.
62090 */
62091-static int expand_fdtable(struct files_struct *files, int nr)
62092+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62093 __releases(files->file_lock)
62094 __acquires(files->file_lock)
62095 {
62096@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62097 * expanded and execution may have blocked.
62098 * The files->file_lock should be held on entry, and will be held on exit.
62099 */
62100-static int expand_files(struct files_struct *files, int nr)
62101+static int expand_files(struct files_struct *files, unsigned int nr)
62102 {
62103 struct fdtable *fdt;
62104
62105@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62106 if (!file)
62107 return __close_fd(files, fd);
62108
62109+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62110 if (fd >= rlimit(RLIMIT_NOFILE))
62111 return -EBADF;
62112
62113@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62114 if (unlikely(oldfd == newfd))
62115 return -EINVAL;
62116
62117+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62118 if (newfd >= rlimit(RLIMIT_NOFILE))
62119 return -EBADF;
62120
62121@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62122 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62123 {
62124 int err;
62125+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62126 if (from >= rlimit(RLIMIT_NOFILE))
62127 return -EINVAL;
62128 err = alloc_fd(from, flags);
62129diff --git a/fs/filesystems.c b/fs/filesystems.c
62130index 5797d45..7d7d79a 100644
62131--- a/fs/filesystems.c
62132+++ b/fs/filesystems.c
62133@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62134 int len = dot ? dot - name : strlen(name);
62135
62136 fs = __get_fs_type(name, len);
62137+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62138+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62139+#else
62140 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62141+#endif
62142 fs = __get_fs_type(name, len);
62143
62144 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62145diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62146index 7dca743..2f2786d 100644
62147--- a/fs/fs_struct.c
62148+++ b/fs/fs_struct.c
62149@@ -4,6 +4,7 @@
62150 #include <linux/path.h>
62151 #include <linux/slab.h>
62152 #include <linux/fs_struct.h>
62153+#include <linux/grsecurity.h>
62154 #include "internal.h"
62155
62156 /*
62157@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62158 struct path old_root;
62159
62160 path_get(path);
62161+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
62162 spin_lock(&fs->lock);
62163 write_seqcount_begin(&fs->seq);
62164 old_root = fs->root;
62165 fs->root = *path;
62166+ gr_set_chroot_entries(current, path);
62167 write_seqcount_end(&fs->seq);
62168 spin_unlock(&fs->lock);
62169- if (old_root.dentry)
62170+ if (old_root.dentry) {
62171+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
62172 path_put(&old_root);
62173+ }
62174 }
62175
62176 /*
62177@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62178 int hits = 0;
62179 spin_lock(&fs->lock);
62180 write_seqcount_begin(&fs->seq);
62181+ /* this root replacement is only done by pivot_root,
62182+ leave grsec's chroot tagging alone for this task
62183+ so that a pivoted root isn't treated as a chroot
62184+ */
62185 hits += replace_path(&fs->root, old_root, new_root);
62186 hits += replace_path(&fs->pwd, old_root, new_root);
62187 write_seqcount_end(&fs->seq);
62188@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62189
62190 void free_fs_struct(struct fs_struct *fs)
62191 {
62192+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62193 path_put(&fs->root);
62194 path_put(&fs->pwd);
62195 kmem_cache_free(fs_cachep, fs);
62196@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
62197 task_lock(tsk);
62198 spin_lock(&fs->lock);
62199 tsk->fs = NULL;
62200- kill = !--fs->users;
62201+ gr_clear_chroot_entries(tsk);
62202+ kill = !atomic_dec_return(&fs->users);
62203 spin_unlock(&fs->lock);
62204 task_unlock(tsk);
62205 if (kill)
62206@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62207 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62208 /* We don't need to lock fs - think why ;-) */
62209 if (fs) {
62210- fs->users = 1;
62211+ atomic_set(&fs->users, 1);
62212 fs->in_exec = 0;
62213 spin_lock_init(&fs->lock);
62214 seqcount_init(&fs->seq);
62215@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62216 spin_lock(&old->lock);
62217 fs->root = old->root;
62218 path_get(&fs->root);
62219+ /* instead of calling gr_set_chroot_entries here,
62220+ we call it from every caller of this function
62221+ */
62222 fs->pwd = old->pwd;
62223 path_get(&fs->pwd);
62224 spin_unlock(&old->lock);
62225@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
62226
62227 task_lock(current);
62228 spin_lock(&fs->lock);
62229- kill = !--fs->users;
62230+ kill = !atomic_dec_return(&fs->users);
62231 current->fs = new_fs;
62232+ gr_set_chroot_entries(current, &new_fs->root);
62233 spin_unlock(&fs->lock);
62234 task_unlock(current);
62235
62236@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62237
62238 int current_umask(void)
62239 {
62240- return current->fs->umask;
62241+ return current->fs->umask | gr_acl_umask();
62242 }
62243 EXPORT_SYMBOL(current_umask);
62244
62245 /* to be mentioned only in INIT_TASK */
62246 struct fs_struct init_fs = {
62247- .users = 1,
62248+ .users = ATOMIC_INIT(1),
62249 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62250 .seq = SEQCNT_ZERO(init_fs.seq),
62251 .umask = 0022,
62252diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62253index 89acec7..a575262 100644
62254--- a/fs/fscache/cookie.c
62255+++ b/fs/fscache/cookie.c
62256@@ -19,7 +19,7 @@
62257
62258 struct kmem_cache *fscache_cookie_jar;
62259
62260-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62261+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62262
62263 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62264 static int fscache_alloc_object(struct fscache_cache *cache,
62265@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62266 parent ? (char *) parent->def->name : "<no-parent>",
62267 def->name, netfs_data, enable);
62268
62269- fscache_stat(&fscache_n_acquires);
62270+ fscache_stat_unchecked(&fscache_n_acquires);
62271
62272 /* if there's no parent cookie, then we don't create one here either */
62273 if (!parent) {
62274- fscache_stat(&fscache_n_acquires_null);
62275+ fscache_stat_unchecked(&fscache_n_acquires_null);
62276 _leave(" [no parent]");
62277 return NULL;
62278 }
62279@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62280 /* allocate and initialise a cookie */
62281 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62282 if (!cookie) {
62283- fscache_stat(&fscache_n_acquires_oom);
62284+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62285 _leave(" [ENOMEM]");
62286 return NULL;
62287 }
62288@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62289
62290 switch (cookie->def->type) {
62291 case FSCACHE_COOKIE_TYPE_INDEX:
62292- fscache_stat(&fscache_n_cookie_index);
62293+ fscache_stat_unchecked(&fscache_n_cookie_index);
62294 break;
62295 case FSCACHE_COOKIE_TYPE_DATAFILE:
62296- fscache_stat(&fscache_n_cookie_data);
62297+ fscache_stat_unchecked(&fscache_n_cookie_data);
62298 break;
62299 default:
62300- fscache_stat(&fscache_n_cookie_special);
62301+ fscache_stat_unchecked(&fscache_n_cookie_special);
62302 break;
62303 }
62304
62305@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62306 } else {
62307 atomic_dec(&parent->n_children);
62308 __fscache_cookie_put(cookie);
62309- fscache_stat(&fscache_n_acquires_nobufs);
62310+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62311 _leave(" = NULL");
62312 return NULL;
62313 }
62314@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62315 }
62316 }
62317
62318- fscache_stat(&fscache_n_acquires_ok);
62319+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62320 _leave(" = %p", cookie);
62321 return cookie;
62322 }
62323@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62324 cache = fscache_select_cache_for_object(cookie->parent);
62325 if (!cache) {
62326 up_read(&fscache_addremove_sem);
62327- fscache_stat(&fscache_n_acquires_no_cache);
62328+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62329 _leave(" = -ENOMEDIUM [no cache]");
62330 return -ENOMEDIUM;
62331 }
62332@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62333 object = cache->ops->alloc_object(cache, cookie);
62334 fscache_stat_d(&fscache_n_cop_alloc_object);
62335 if (IS_ERR(object)) {
62336- fscache_stat(&fscache_n_object_no_alloc);
62337+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62338 ret = PTR_ERR(object);
62339 goto error;
62340 }
62341
62342- fscache_stat(&fscache_n_object_alloc);
62343+ fscache_stat_unchecked(&fscache_n_object_alloc);
62344
62345- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62346+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62347
62348 _debug("ALLOC OBJ%x: %s {%lx}",
62349 object->debug_id, cookie->def->name, object->events);
62350@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62351
62352 _enter("{%s}", cookie->def->name);
62353
62354- fscache_stat(&fscache_n_invalidates);
62355+ fscache_stat_unchecked(&fscache_n_invalidates);
62356
62357 /* Only permit invalidation of data files. Invalidating an index will
62358 * require the caller to release all its attachments to the tree rooted
62359@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62360 {
62361 struct fscache_object *object;
62362
62363- fscache_stat(&fscache_n_updates);
62364+ fscache_stat_unchecked(&fscache_n_updates);
62365
62366 if (!cookie) {
62367- fscache_stat(&fscache_n_updates_null);
62368+ fscache_stat_unchecked(&fscache_n_updates_null);
62369 _leave(" [no cookie]");
62370 return;
62371 }
62372@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62373 */
62374 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62375 {
62376- fscache_stat(&fscache_n_relinquishes);
62377+ fscache_stat_unchecked(&fscache_n_relinquishes);
62378 if (retire)
62379- fscache_stat(&fscache_n_relinquishes_retire);
62380+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62381
62382 if (!cookie) {
62383- fscache_stat(&fscache_n_relinquishes_null);
62384+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62385 _leave(" [no cookie]");
62386 return;
62387 }
62388@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62389 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62390 goto inconsistent;
62391
62392- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62393+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62394
62395 __fscache_use_cookie(cookie);
62396 if (fscache_submit_op(object, op) < 0)
62397diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62398index 7872a62..d91b19f 100644
62399--- a/fs/fscache/internal.h
62400+++ b/fs/fscache/internal.h
62401@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62402 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62403 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62404 struct fscache_operation *,
62405- atomic_t *,
62406- atomic_t *,
62407+ atomic_unchecked_t *,
62408+ atomic_unchecked_t *,
62409 void (*)(struct fscache_operation *));
62410 extern void fscache_invalidate_writes(struct fscache_cookie *);
62411
62412@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62413 * stats.c
62414 */
62415 #ifdef CONFIG_FSCACHE_STATS
62416-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62417-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62418+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62419+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62420
62421-extern atomic_t fscache_n_op_pend;
62422-extern atomic_t fscache_n_op_run;
62423-extern atomic_t fscache_n_op_enqueue;
62424-extern atomic_t fscache_n_op_deferred_release;
62425-extern atomic_t fscache_n_op_release;
62426-extern atomic_t fscache_n_op_gc;
62427-extern atomic_t fscache_n_op_cancelled;
62428-extern atomic_t fscache_n_op_rejected;
62429+extern atomic_unchecked_t fscache_n_op_pend;
62430+extern atomic_unchecked_t fscache_n_op_run;
62431+extern atomic_unchecked_t fscache_n_op_enqueue;
62432+extern atomic_unchecked_t fscache_n_op_deferred_release;
62433+extern atomic_unchecked_t fscache_n_op_release;
62434+extern atomic_unchecked_t fscache_n_op_gc;
62435+extern atomic_unchecked_t fscache_n_op_cancelled;
62436+extern atomic_unchecked_t fscache_n_op_rejected;
62437
62438-extern atomic_t fscache_n_attr_changed;
62439-extern atomic_t fscache_n_attr_changed_ok;
62440-extern atomic_t fscache_n_attr_changed_nobufs;
62441-extern atomic_t fscache_n_attr_changed_nomem;
62442-extern atomic_t fscache_n_attr_changed_calls;
62443+extern atomic_unchecked_t fscache_n_attr_changed;
62444+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62445+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62446+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62447+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62448
62449-extern atomic_t fscache_n_allocs;
62450-extern atomic_t fscache_n_allocs_ok;
62451-extern atomic_t fscache_n_allocs_wait;
62452-extern atomic_t fscache_n_allocs_nobufs;
62453-extern atomic_t fscache_n_allocs_intr;
62454-extern atomic_t fscache_n_allocs_object_dead;
62455-extern atomic_t fscache_n_alloc_ops;
62456-extern atomic_t fscache_n_alloc_op_waits;
62457+extern atomic_unchecked_t fscache_n_allocs;
62458+extern atomic_unchecked_t fscache_n_allocs_ok;
62459+extern atomic_unchecked_t fscache_n_allocs_wait;
62460+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62461+extern atomic_unchecked_t fscache_n_allocs_intr;
62462+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62463+extern atomic_unchecked_t fscache_n_alloc_ops;
62464+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62465
62466-extern atomic_t fscache_n_retrievals;
62467-extern atomic_t fscache_n_retrievals_ok;
62468-extern atomic_t fscache_n_retrievals_wait;
62469-extern atomic_t fscache_n_retrievals_nodata;
62470-extern atomic_t fscache_n_retrievals_nobufs;
62471-extern atomic_t fscache_n_retrievals_intr;
62472-extern atomic_t fscache_n_retrievals_nomem;
62473-extern atomic_t fscache_n_retrievals_object_dead;
62474-extern atomic_t fscache_n_retrieval_ops;
62475-extern atomic_t fscache_n_retrieval_op_waits;
62476+extern atomic_unchecked_t fscache_n_retrievals;
62477+extern atomic_unchecked_t fscache_n_retrievals_ok;
62478+extern atomic_unchecked_t fscache_n_retrievals_wait;
62479+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62480+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62481+extern atomic_unchecked_t fscache_n_retrievals_intr;
62482+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62483+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62484+extern atomic_unchecked_t fscache_n_retrieval_ops;
62485+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62486
62487-extern atomic_t fscache_n_stores;
62488-extern atomic_t fscache_n_stores_ok;
62489-extern atomic_t fscache_n_stores_again;
62490-extern atomic_t fscache_n_stores_nobufs;
62491-extern atomic_t fscache_n_stores_oom;
62492-extern atomic_t fscache_n_store_ops;
62493-extern atomic_t fscache_n_store_calls;
62494-extern atomic_t fscache_n_store_pages;
62495-extern atomic_t fscache_n_store_radix_deletes;
62496-extern atomic_t fscache_n_store_pages_over_limit;
62497+extern atomic_unchecked_t fscache_n_stores;
62498+extern atomic_unchecked_t fscache_n_stores_ok;
62499+extern atomic_unchecked_t fscache_n_stores_again;
62500+extern atomic_unchecked_t fscache_n_stores_nobufs;
62501+extern atomic_unchecked_t fscache_n_stores_oom;
62502+extern atomic_unchecked_t fscache_n_store_ops;
62503+extern atomic_unchecked_t fscache_n_store_calls;
62504+extern atomic_unchecked_t fscache_n_store_pages;
62505+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62506+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62507
62508-extern atomic_t fscache_n_store_vmscan_not_storing;
62509-extern atomic_t fscache_n_store_vmscan_gone;
62510-extern atomic_t fscache_n_store_vmscan_busy;
62511-extern atomic_t fscache_n_store_vmscan_cancelled;
62512-extern atomic_t fscache_n_store_vmscan_wait;
62513+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62514+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62515+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62516+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62517+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62518
62519-extern atomic_t fscache_n_marks;
62520-extern atomic_t fscache_n_uncaches;
62521+extern atomic_unchecked_t fscache_n_marks;
62522+extern atomic_unchecked_t fscache_n_uncaches;
62523
62524-extern atomic_t fscache_n_acquires;
62525-extern atomic_t fscache_n_acquires_null;
62526-extern atomic_t fscache_n_acquires_no_cache;
62527-extern atomic_t fscache_n_acquires_ok;
62528-extern atomic_t fscache_n_acquires_nobufs;
62529-extern atomic_t fscache_n_acquires_oom;
62530+extern atomic_unchecked_t fscache_n_acquires;
62531+extern atomic_unchecked_t fscache_n_acquires_null;
62532+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62533+extern atomic_unchecked_t fscache_n_acquires_ok;
62534+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62535+extern atomic_unchecked_t fscache_n_acquires_oom;
62536
62537-extern atomic_t fscache_n_invalidates;
62538-extern atomic_t fscache_n_invalidates_run;
62539+extern atomic_unchecked_t fscache_n_invalidates;
62540+extern atomic_unchecked_t fscache_n_invalidates_run;
62541
62542-extern atomic_t fscache_n_updates;
62543-extern atomic_t fscache_n_updates_null;
62544-extern atomic_t fscache_n_updates_run;
62545+extern atomic_unchecked_t fscache_n_updates;
62546+extern atomic_unchecked_t fscache_n_updates_null;
62547+extern atomic_unchecked_t fscache_n_updates_run;
62548
62549-extern atomic_t fscache_n_relinquishes;
62550-extern atomic_t fscache_n_relinquishes_null;
62551-extern atomic_t fscache_n_relinquishes_waitcrt;
62552-extern atomic_t fscache_n_relinquishes_retire;
62553+extern atomic_unchecked_t fscache_n_relinquishes;
62554+extern atomic_unchecked_t fscache_n_relinquishes_null;
62555+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62556+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62557
62558-extern atomic_t fscache_n_cookie_index;
62559-extern atomic_t fscache_n_cookie_data;
62560-extern atomic_t fscache_n_cookie_special;
62561+extern atomic_unchecked_t fscache_n_cookie_index;
62562+extern atomic_unchecked_t fscache_n_cookie_data;
62563+extern atomic_unchecked_t fscache_n_cookie_special;
62564
62565-extern atomic_t fscache_n_object_alloc;
62566-extern atomic_t fscache_n_object_no_alloc;
62567-extern atomic_t fscache_n_object_lookups;
62568-extern atomic_t fscache_n_object_lookups_negative;
62569-extern atomic_t fscache_n_object_lookups_positive;
62570-extern atomic_t fscache_n_object_lookups_timed_out;
62571-extern atomic_t fscache_n_object_created;
62572-extern atomic_t fscache_n_object_avail;
62573-extern atomic_t fscache_n_object_dead;
62574+extern atomic_unchecked_t fscache_n_object_alloc;
62575+extern atomic_unchecked_t fscache_n_object_no_alloc;
62576+extern atomic_unchecked_t fscache_n_object_lookups;
62577+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62578+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62579+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62580+extern atomic_unchecked_t fscache_n_object_created;
62581+extern atomic_unchecked_t fscache_n_object_avail;
62582+extern atomic_unchecked_t fscache_n_object_dead;
62583
62584-extern atomic_t fscache_n_checkaux_none;
62585-extern atomic_t fscache_n_checkaux_okay;
62586-extern atomic_t fscache_n_checkaux_update;
62587-extern atomic_t fscache_n_checkaux_obsolete;
62588+extern atomic_unchecked_t fscache_n_checkaux_none;
62589+extern atomic_unchecked_t fscache_n_checkaux_okay;
62590+extern atomic_unchecked_t fscache_n_checkaux_update;
62591+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62592
62593 extern atomic_t fscache_n_cop_alloc_object;
62594 extern atomic_t fscache_n_cop_lookup_object;
62595@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62596 atomic_inc(stat);
62597 }
62598
62599+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62600+{
62601+ atomic_inc_unchecked(stat);
62602+}
62603+
62604 static inline void fscache_stat_d(atomic_t *stat)
62605 {
62606 atomic_dec(stat);
62607@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62608
62609 #define __fscache_stat(stat) (NULL)
62610 #define fscache_stat(stat) do {} while (0)
62611+#define fscache_stat_unchecked(stat) do {} while (0)
62612 #define fscache_stat_d(stat) do {} while (0)
62613 #endif
62614
62615diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62616index da032da..0076ce7 100644
62617--- a/fs/fscache/object.c
62618+++ b/fs/fscache/object.c
62619@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62620 _debug("LOOKUP \"%s\" in \"%s\"",
62621 cookie->def->name, object->cache->tag->name);
62622
62623- fscache_stat(&fscache_n_object_lookups);
62624+ fscache_stat_unchecked(&fscache_n_object_lookups);
62625 fscache_stat(&fscache_n_cop_lookup_object);
62626 ret = object->cache->ops->lookup_object(object);
62627 fscache_stat_d(&fscache_n_cop_lookup_object);
62628@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62629 if (ret == -ETIMEDOUT) {
62630 /* probably stuck behind another object, so move this one to
62631 * the back of the queue */
62632- fscache_stat(&fscache_n_object_lookups_timed_out);
62633+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62634 _leave(" [timeout]");
62635 return NO_TRANSIT;
62636 }
62637@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62638 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62639
62640 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62641- fscache_stat(&fscache_n_object_lookups_negative);
62642+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62643
62644 /* Allow write requests to begin stacking up and read requests to begin
62645 * returning ENODATA.
62646@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62647 /* if we were still looking up, then we must have a positive lookup
62648 * result, in which case there may be data available */
62649 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62650- fscache_stat(&fscache_n_object_lookups_positive);
62651+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62652
62653 /* We do (presumably) have data */
62654 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62655@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62656 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62657 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62658 } else {
62659- fscache_stat(&fscache_n_object_created);
62660+ fscache_stat_unchecked(&fscache_n_object_created);
62661 }
62662
62663 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62664@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62665 fscache_stat_d(&fscache_n_cop_lookup_complete);
62666
62667 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62668- fscache_stat(&fscache_n_object_avail);
62669+ fscache_stat_unchecked(&fscache_n_object_avail);
62670
62671 _leave("");
62672 return transit_to(JUMPSTART_DEPS);
62673@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62674
62675 /* this just shifts the object release to the work processor */
62676 fscache_put_object(object);
62677- fscache_stat(&fscache_n_object_dead);
62678+ fscache_stat_unchecked(&fscache_n_object_dead);
62679
62680 _leave("");
62681 return transit_to(OBJECT_DEAD);
62682@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62683 enum fscache_checkaux result;
62684
62685 if (!object->cookie->def->check_aux) {
62686- fscache_stat(&fscache_n_checkaux_none);
62687+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62688 return FSCACHE_CHECKAUX_OKAY;
62689 }
62690
62691@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62692 switch (result) {
62693 /* entry okay as is */
62694 case FSCACHE_CHECKAUX_OKAY:
62695- fscache_stat(&fscache_n_checkaux_okay);
62696+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62697 break;
62698
62699 /* entry requires update */
62700 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62701- fscache_stat(&fscache_n_checkaux_update);
62702+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62703 break;
62704
62705 /* entry requires deletion */
62706 case FSCACHE_CHECKAUX_OBSOLETE:
62707- fscache_stat(&fscache_n_checkaux_obsolete);
62708+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62709 break;
62710
62711 default:
62712@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62713 {
62714 const struct fscache_state *s;
62715
62716- fscache_stat(&fscache_n_invalidates_run);
62717+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62718 fscache_stat(&fscache_n_cop_invalidate_object);
62719 s = _fscache_invalidate_object(object, event);
62720 fscache_stat_d(&fscache_n_cop_invalidate_object);
62721@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62722 {
62723 _enter("{OBJ%x},%d", object->debug_id, event);
62724
62725- fscache_stat(&fscache_n_updates_run);
62726+ fscache_stat_unchecked(&fscache_n_updates_run);
62727 fscache_stat(&fscache_n_cop_update_object);
62728 object->cache->ops->update_object(object);
62729 fscache_stat_d(&fscache_n_cop_update_object);
62730diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62731index e7b87a0..a85d47a 100644
62732--- a/fs/fscache/operation.c
62733+++ b/fs/fscache/operation.c
62734@@ -17,7 +17,7 @@
62735 #include <linux/slab.h>
62736 #include "internal.h"
62737
62738-atomic_t fscache_op_debug_id;
62739+atomic_unchecked_t fscache_op_debug_id;
62740 EXPORT_SYMBOL(fscache_op_debug_id);
62741
62742 /**
62743@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62744 ASSERTCMP(atomic_read(&op->usage), >, 0);
62745 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62746
62747- fscache_stat(&fscache_n_op_enqueue);
62748+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62749 switch (op->flags & FSCACHE_OP_TYPE) {
62750 case FSCACHE_OP_ASYNC:
62751 _debug("queue async");
62752@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62753 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62754 if (op->processor)
62755 fscache_enqueue_operation(op);
62756- fscache_stat(&fscache_n_op_run);
62757+ fscache_stat_unchecked(&fscache_n_op_run);
62758 }
62759
62760 /*
62761@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62762 if (object->n_in_progress > 0) {
62763 atomic_inc(&op->usage);
62764 list_add_tail(&op->pend_link, &object->pending_ops);
62765- fscache_stat(&fscache_n_op_pend);
62766+ fscache_stat_unchecked(&fscache_n_op_pend);
62767 } else if (!list_empty(&object->pending_ops)) {
62768 atomic_inc(&op->usage);
62769 list_add_tail(&op->pend_link, &object->pending_ops);
62770- fscache_stat(&fscache_n_op_pend);
62771+ fscache_stat_unchecked(&fscache_n_op_pend);
62772 fscache_start_operations(object);
62773 } else {
62774 ASSERTCMP(object->n_in_progress, ==, 0);
62775@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62776 object->n_exclusive++; /* reads and writes must wait */
62777 atomic_inc(&op->usage);
62778 list_add_tail(&op->pend_link, &object->pending_ops);
62779- fscache_stat(&fscache_n_op_pend);
62780+ fscache_stat_unchecked(&fscache_n_op_pend);
62781 ret = 0;
62782 } else {
62783 /* If we're in any other state, there must have been an I/O
62784@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62785 if (object->n_exclusive > 0) {
62786 atomic_inc(&op->usage);
62787 list_add_tail(&op->pend_link, &object->pending_ops);
62788- fscache_stat(&fscache_n_op_pend);
62789+ fscache_stat_unchecked(&fscache_n_op_pend);
62790 } else if (!list_empty(&object->pending_ops)) {
62791 atomic_inc(&op->usage);
62792 list_add_tail(&op->pend_link, &object->pending_ops);
62793- fscache_stat(&fscache_n_op_pend);
62794+ fscache_stat_unchecked(&fscache_n_op_pend);
62795 fscache_start_operations(object);
62796 } else {
62797 ASSERTCMP(object->n_exclusive, ==, 0);
62798@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62799 object->n_ops++;
62800 atomic_inc(&op->usage);
62801 list_add_tail(&op->pend_link, &object->pending_ops);
62802- fscache_stat(&fscache_n_op_pend);
62803+ fscache_stat_unchecked(&fscache_n_op_pend);
62804 ret = 0;
62805 } else if (fscache_object_is_dying(object)) {
62806- fscache_stat(&fscache_n_op_rejected);
62807+ fscache_stat_unchecked(&fscache_n_op_rejected);
62808 op->state = FSCACHE_OP_ST_CANCELLED;
62809 ret = -ENOBUFS;
62810 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62811@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62812 ret = -EBUSY;
62813 if (op->state == FSCACHE_OP_ST_PENDING) {
62814 ASSERT(!list_empty(&op->pend_link));
62815- fscache_stat(&fscache_n_op_cancelled);
62816+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62817 list_del_init(&op->pend_link);
62818 if (do_cancel)
62819 do_cancel(op);
62820@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62821 while (!list_empty(&object->pending_ops)) {
62822 op = list_entry(object->pending_ops.next,
62823 struct fscache_operation, pend_link);
62824- fscache_stat(&fscache_n_op_cancelled);
62825+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62826 list_del_init(&op->pend_link);
62827
62828 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62829@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62830 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62831 op->state = FSCACHE_OP_ST_DEAD;
62832
62833- fscache_stat(&fscache_n_op_release);
62834+ fscache_stat_unchecked(&fscache_n_op_release);
62835
62836 if (op->release) {
62837 op->release(op);
62838@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62839 * lock, and defer it otherwise */
62840 if (!spin_trylock(&object->lock)) {
62841 _debug("defer put");
62842- fscache_stat(&fscache_n_op_deferred_release);
62843+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62844
62845 cache = object->cache;
62846 spin_lock(&cache->op_gc_list_lock);
62847@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62848
62849 _debug("GC DEFERRED REL OBJ%x OP%x",
62850 object->debug_id, op->debug_id);
62851- fscache_stat(&fscache_n_op_gc);
62852+ fscache_stat_unchecked(&fscache_n_op_gc);
62853
62854 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62855 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62856diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62857index de33b3f..8be4d29 100644
62858--- a/fs/fscache/page.c
62859+++ b/fs/fscache/page.c
62860@@ -74,7 +74,7 @@ try_again:
62861 val = radix_tree_lookup(&cookie->stores, page->index);
62862 if (!val) {
62863 rcu_read_unlock();
62864- fscache_stat(&fscache_n_store_vmscan_not_storing);
62865+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62866 __fscache_uncache_page(cookie, page);
62867 return true;
62868 }
62869@@ -104,11 +104,11 @@ try_again:
62870 spin_unlock(&cookie->stores_lock);
62871
62872 if (xpage) {
62873- fscache_stat(&fscache_n_store_vmscan_cancelled);
62874- fscache_stat(&fscache_n_store_radix_deletes);
62875+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62876+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62877 ASSERTCMP(xpage, ==, page);
62878 } else {
62879- fscache_stat(&fscache_n_store_vmscan_gone);
62880+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62881 }
62882
62883 wake_up_bit(&cookie->flags, 0);
62884@@ -123,11 +123,11 @@ page_busy:
62885 * sleeping on memory allocation, so we may need to impose a timeout
62886 * too. */
62887 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62888- fscache_stat(&fscache_n_store_vmscan_busy);
62889+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62890 return false;
62891 }
62892
62893- fscache_stat(&fscache_n_store_vmscan_wait);
62894+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62895 if (!release_page_wait_timeout(cookie, page))
62896 _debug("fscache writeout timeout page: %p{%lx}",
62897 page, page->index);
62898@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62899 FSCACHE_COOKIE_STORING_TAG);
62900 if (!radix_tree_tag_get(&cookie->stores, page->index,
62901 FSCACHE_COOKIE_PENDING_TAG)) {
62902- fscache_stat(&fscache_n_store_radix_deletes);
62903+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62904 xpage = radix_tree_delete(&cookie->stores, page->index);
62905 }
62906 spin_unlock(&cookie->stores_lock);
62907@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62908
62909 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62910
62911- fscache_stat(&fscache_n_attr_changed_calls);
62912+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62913
62914 if (fscache_object_is_active(object)) {
62915 fscache_stat(&fscache_n_cop_attr_changed);
62916@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62917
62918 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62919
62920- fscache_stat(&fscache_n_attr_changed);
62921+ fscache_stat_unchecked(&fscache_n_attr_changed);
62922
62923 op = kzalloc(sizeof(*op), GFP_KERNEL);
62924 if (!op) {
62925- fscache_stat(&fscache_n_attr_changed_nomem);
62926+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62927 _leave(" = -ENOMEM");
62928 return -ENOMEM;
62929 }
62930@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62931 if (fscache_submit_exclusive_op(object, op) < 0)
62932 goto nobufs_dec;
62933 spin_unlock(&cookie->lock);
62934- fscache_stat(&fscache_n_attr_changed_ok);
62935+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62936 fscache_put_operation(op);
62937 _leave(" = 0");
62938 return 0;
62939@@ -242,7 +242,7 @@ nobufs:
62940 kfree(op);
62941 if (wake_cookie)
62942 __fscache_wake_unused_cookie(cookie);
62943- fscache_stat(&fscache_n_attr_changed_nobufs);
62944+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62945 _leave(" = %d", -ENOBUFS);
62946 return -ENOBUFS;
62947 }
62948@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62949 /* allocate a retrieval operation and attempt to submit it */
62950 op = kzalloc(sizeof(*op), GFP_NOIO);
62951 if (!op) {
62952- fscache_stat(&fscache_n_retrievals_nomem);
62953+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62954 return NULL;
62955 }
62956
62957@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62958 return 0;
62959 }
62960
62961- fscache_stat(&fscache_n_retrievals_wait);
62962+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62963
62964 jif = jiffies;
62965 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62966 TASK_INTERRUPTIBLE) != 0) {
62967- fscache_stat(&fscache_n_retrievals_intr);
62968+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62969 _leave(" = -ERESTARTSYS");
62970 return -ERESTARTSYS;
62971 }
62972@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62973 */
62974 int fscache_wait_for_operation_activation(struct fscache_object *object,
62975 struct fscache_operation *op,
62976- atomic_t *stat_op_waits,
62977- atomic_t *stat_object_dead,
62978+ atomic_unchecked_t *stat_op_waits,
62979+ atomic_unchecked_t *stat_object_dead,
62980 void (*do_cancel)(struct fscache_operation *))
62981 {
62982 int ret;
62983@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62984
62985 _debug(">>> WT");
62986 if (stat_op_waits)
62987- fscache_stat(stat_op_waits);
62988+ fscache_stat_unchecked(stat_op_waits);
62989 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62990 TASK_INTERRUPTIBLE) != 0) {
62991 ret = fscache_cancel_op(op, do_cancel);
62992@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62993 check_if_dead:
62994 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62995 if (stat_object_dead)
62996- fscache_stat(stat_object_dead);
62997+ fscache_stat_unchecked(stat_object_dead);
62998 _leave(" = -ENOBUFS [cancelled]");
62999 return -ENOBUFS;
63000 }
63001@@ -381,7 +381,7 @@ check_if_dead:
63002 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63003 fscache_cancel_op(op, do_cancel);
63004 if (stat_object_dead)
63005- fscache_stat(stat_object_dead);
63006+ fscache_stat_unchecked(stat_object_dead);
63007 return -ENOBUFS;
63008 }
63009 return 0;
63010@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63011
63012 _enter("%p,%p,,,", cookie, page);
63013
63014- fscache_stat(&fscache_n_retrievals);
63015+ fscache_stat_unchecked(&fscache_n_retrievals);
63016
63017 if (hlist_empty(&cookie->backing_objects))
63018 goto nobufs;
63019@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63020 goto nobufs_unlock_dec;
63021 spin_unlock(&cookie->lock);
63022
63023- fscache_stat(&fscache_n_retrieval_ops);
63024+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63025
63026 /* pin the netfs read context in case we need to do the actual netfs
63027 * read because we've encountered a cache read failure */
63028@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63029
63030 error:
63031 if (ret == -ENOMEM)
63032- fscache_stat(&fscache_n_retrievals_nomem);
63033+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63034 else if (ret == -ERESTARTSYS)
63035- fscache_stat(&fscache_n_retrievals_intr);
63036+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63037 else if (ret == -ENODATA)
63038- fscache_stat(&fscache_n_retrievals_nodata);
63039+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63040 else if (ret < 0)
63041- fscache_stat(&fscache_n_retrievals_nobufs);
63042+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63043 else
63044- fscache_stat(&fscache_n_retrievals_ok);
63045+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63046
63047 fscache_put_retrieval(op);
63048 _leave(" = %d", ret);
63049@@ -505,7 +505,7 @@ nobufs_unlock:
63050 __fscache_wake_unused_cookie(cookie);
63051 kfree(op);
63052 nobufs:
63053- fscache_stat(&fscache_n_retrievals_nobufs);
63054+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63055 _leave(" = -ENOBUFS");
63056 return -ENOBUFS;
63057 }
63058@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63059
63060 _enter("%p,,%d,,,", cookie, *nr_pages);
63061
63062- fscache_stat(&fscache_n_retrievals);
63063+ fscache_stat_unchecked(&fscache_n_retrievals);
63064
63065 if (hlist_empty(&cookie->backing_objects))
63066 goto nobufs;
63067@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63068 goto nobufs_unlock_dec;
63069 spin_unlock(&cookie->lock);
63070
63071- fscache_stat(&fscache_n_retrieval_ops);
63072+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63073
63074 /* pin the netfs read context in case we need to do the actual netfs
63075 * read because we've encountered a cache read failure */
63076@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63077
63078 error:
63079 if (ret == -ENOMEM)
63080- fscache_stat(&fscache_n_retrievals_nomem);
63081+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63082 else if (ret == -ERESTARTSYS)
63083- fscache_stat(&fscache_n_retrievals_intr);
63084+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63085 else if (ret == -ENODATA)
63086- fscache_stat(&fscache_n_retrievals_nodata);
63087+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63088 else if (ret < 0)
63089- fscache_stat(&fscache_n_retrievals_nobufs);
63090+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63091 else
63092- fscache_stat(&fscache_n_retrievals_ok);
63093+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63094
63095 fscache_put_retrieval(op);
63096 _leave(" = %d", ret);
63097@@ -636,7 +636,7 @@ nobufs_unlock:
63098 if (wake_cookie)
63099 __fscache_wake_unused_cookie(cookie);
63100 nobufs:
63101- fscache_stat(&fscache_n_retrievals_nobufs);
63102+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63103 _leave(" = -ENOBUFS");
63104 return -ENOBUFS;
63105 }
63106@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63107
63108 _enter("%p,%p,,,", cookie, page);
63109
63110- fscache_stat(&fscache_n_allocs);
63111+ fscache_stat_unchecked(&fscache_n_allocs);
63112
63113 if (hlist_empty(&cookie->backing_objects))
63114 goto nobufs;
63115@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63116 goto nobufs_unlock_dec;
63117 spin_unlock(&cookie->lock);
63118
63119- fscache_stat(&fscache_n_alloc_ops);
63120+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63121
63122 ret = fscache_wait_for_operation_activation(
63123 object, &op->op,
63124@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63125
63126 error:
63127 if (ret == -ERESTARTSYS)
63128- fscache_stat(&fscache_n_allocs_intr);
63129+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63130 else if (ret < 0)
63131- fscache_stat(&fscache_n_allocs_nobufs);
63132+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63133 else
63134- fscache_stat(&fscache_n_allocs_ok);
63135+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63136
63137 fscache_put_retrieval(op);
63138 _leave(" = %d", ret);
63139@@ -730,7 +730,7 @@ nobufs_unlock:
63140 if (wake_cookie)
63141 __fscache_wake_unused_cookie(cookie);
63142 nobufs:
63143- fscache_stat(&fscache_n_allocs_nobufs);
63144+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63145 _leave(" = -ENOBUFS");
63146 return -ENOBUFS;
63147 }
63148@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63149
63150 spin_lock(&cookie->stores_lock);
63151
63152- fscache_stat(&fscache_n_store_calls);
63153+ fscache_stat_unchecked(&fscache_n_store_calls);
63154
63155 /* find a page to store */
63156 page = NULL;
63157@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63158 page = results[0];
63159 _debug("gang %d [%lx]", n, page->index);
63160 if (page->index > op->store_limit) {
63161- fscache_stat(&fscache_n_store_pages_over_limit);
63162+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63163 goto superseded;
63164 }
63165
63166@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63167 spin_unlock(&cookie->stores_lock);
63168 spin_unlock(&object->lock);
63169
63170- fscache_stat(&fscache_n_store_pages);
63171+ fscache_stat_unchecked(&fscache_n_store_pages);
63172 fscache_stat(&fscache_n_cop_write_page);
63173 ret = object->cache->ops->write_page(op, page);
63174 fscache_stat_d(&fscache_n_cop_write_page);
63175@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63176 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63177 ASSERT(PageFsCache(page));
63178
63179- fscache_stat(&fscache_n_stores);
63180+ fscache_stat_unchecked(&fscache_n_stores);
63181
63182 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63183 _leave(" = -ENOBUFS [invalidating]");
63184@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63185 spin_unlock(&cookie->stores_lock);
63186 spin_unlock(&object->lock);
63187
63188- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63189+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63190 op->store_limit = object->store_limit;
63191
63192 __fscache_use_cookie(cookie);
63193@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63194
63195 spin_unlock(&cookie->lock);
63196 radix_tree_preload_end();
63197- fscache_stat(&fscache_n_store_ops);
63198- fscache_stat(&fscache_n_stores_ok);
63199+ fscache_stat_unchecked(&fscache_n_store_ops);
63200+ fscache_stat_unchecked(&fscache_n_stores_ok);
63201
63202 /* the work queue now carries its own ref on the object */
63203 fscache_put_operation(&op->op);
63204@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63205 return 0;
63206
63207 already_queued:
63208- fscache_stat(&fscache_n_stores_again);
63209+ fscache_stat_unchecked(&fscache_n_stores_again);
63210 already_pending:
63211 spin_unlock(&cookie->stores_lock);
63212 spin_unlock(&object->lock);
63213 spin_unlock(&cookie->lock);
63214 radix_tree_preload_end();
63215 kfree(op);
63216- fscache_stat(&fscache_n_stores_ok);
63217+ fscache_stat_unchecked(&fscache_n_stores_ok);
63218 _leave(" = 0");
63219 return 0;
63220
63221@@ -1039,14 +1039,14 @@ nobufs:
63222 kfree(op);
63223 if (wake_cookie)
63224 __fscache_wake_unused_cookie(cookie);
63225- fscache_stat(&fscache_n_stores_nobufs);
63226+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63227 _leave(" = -ENOBUFS");
63228 return -ENOBUFS;
63229
63230 nomem_free:
63231 kfree(op);
63232 nomem:
63233- fscache_stat(&fscache_n_stores_oom);
63234+ fscache_stat_unchecked(&fscache_n_stores_oom);
63235 _leave(" = -ENOMEM");
63236 return -ENOMEM;
63237 }
63238@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63239 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63240 ASSERTCMP(page, !=, NULL);
63241
63242- fscache_stat(&fscache_n_uncaches);
63243+ fscache_stat_unchecked(&fscache_n_uncaches);
63244
63245 /* cache withdrawal may beat us to it */
63246 if (!PageFsCache(page))
63247@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63248 struct fscache_cookie *cookie = op->op.object->cookie;
63249
63250 #ifdef CONFIG_FSCACHE_STATS
63251- atomic_inc(&fscache_n_marks);
63252+ atomic_inc_unchecked(&fscache_n_marks);
63253 #endif
63254
63255 _debug("- mark %p{%lx}", page, page->index);
63256diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63257index 40d13c7..ddf52b9 100644
63258--- a/fs/fscache/stats.c
63259+++ b/fs/fscache/stats.c
63260@@ -18,99 +18,99 @@
63261 /*
63262 * operation counters
63263 */
63264-atomic_t fscache_n_op_pend;
63265-atomic_t fscache_n_op_run;
63266-atomic_t fscache_n_op_enqueue;
63267-atomic_t fscache_n_op_requeue;
63268-atomic_t fscache_n_op_deferred_release;
63269-atomic_t fscache_n_op_release;
63270-atomic_t fscache_n_op_gc;
63271-atomic_t fscache_n_op_cancelled;
63272-atomic_t fscache_n_op_rejected;
63273+atomic_unchecked_t fscache_n_op_pend;
63274+atomic_unchecked_t fscache_n_op_run;
63275+atomic_unchecked_t fscache_n_op_enqueue;
63276+atomic_unchecked_t fscache_n_op_requeue;
63277+atomic_unchecked_t fscache_n_op_deferred_release;
63278+atomic_unchecked_t fscache_n_op_release;
63279+atomic_unchecked_t fscache_n_op_gc;
63280+atomic_unchecked_t fscache_n_op_cancelled;
63281+atomic_unchecked_t fscache_n_op_rejected;
63282
63283-atomic_t fscache_n_attr_changed;
63284-atomic_t fscache_n_attr_changed_ok;
63285-atomic_t fscache_n_attr_changed_nobufs;
63286-atomic_t fscache_n_attr_changed_nomem;
63287-atomic_t fscache_n_attr_changed_calls;
63288+atomic_unchecked_t fscache_n_attr_changed;
63289+atomic_unchecked_t fscache_n_attr_changed_ok;
63290+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63291+atomic_unchecked_t fscache_n_attr_changed_nomem;
63292+atomic_unchecked_t fscache_n_attr_changed_calls;
63293
63294-atomic_t fscache_n_allocs;
63295-atomic_t fscache_n_allocs_ok;
63296-atomic_t fscache_n_allocs_wait;
63297-atomic_t fscache_n_allocs_nobufs;
63298-atomic_t fscache_n_allocs_intr;
63299-atomic_t fscache_n_allocs_object_dead;
63300-atomic_t fscache_n_alloc_ops;
63301-atomic_t fscache_n_alloc_op_waits;
63302+atomic_unchecked_t fscache_n_allocs;
63303+atomic_unchecked_t fscache_n_allocs_ok;
63304+atomic_unchecked_t fscache_n_allocs_wait;
63305+atomic_unchecked_t fscache_n_allocs_nobufs;
63306+atomic_unchecked_t fscache_n_allocs_intr;
63307+atomic_unchecked_t fscache_n_allocs_object_dead;
63308+atomic_unchecked_t fscache_n_alloc_ops;
63309+atomic_unchecked_t fscache_n_alloc_op_waits;
63310
63311-atomic_t fscache_n_retrievals;
63312-atomic_t fscache_n_retrievals_ok;
63313-atomic_t fscache_n_retrievals_wait;
63314-atomic_t fscache_n_retrievals_nodata;
63315-atomic_t fscache_n_retrievals_nobufs;
63316-atomic_t fscache_n_retrievals_intr;
63317-atomic_t fscache_n_retrievals_nomem;
63318-atomic_t fscache_n_retrievals_object_dead;
63319-atomic_t fscache_n_retrieval_ops;
63320-atomic_t fscache_n_retrieval_op_waits;
63321+atomic_unchecked_t fscache_n_retrievals;
63322+atomic_unchecked_t fscache_n_retrievals_ok;
63323+atomic_unchecked_t fscache_n_retrievals_wait;
63324+atomic_unchecked_t fscache_n_retrievals_nodata;
63325+atomic_unchecked_t fscache_n_retrievals_nobufs;
63326+atomic_unchecked_t fscache_n_retrievals_intr;
63327+atomic_unchecked_t fscache_n_retrievals_nomem;
63328+atomic_unchecked_t fscache_n_retrievals_object_dead;
63329+atomic_unchecked_t fscache_n_retrieval_ops;
63330+atomic_unchecked_t fscache_n_retrieval_op_waits;
63331
63332-atomic_t fscache_n_stores;
63333-atomic_t fscache_n_stores_ok;
63334-atomic_t fscache_n_stores_again;
63335-atomic_t fscache_n_stores_nobufs;
63336-atomic_t fscache_n_stores_oom;
63337-atomic_t fscache_n_store_ops;
63338-atomic_t fscache_n_store_calls;
63339-atomic_t fscache_n_store_pages;
63340-atomic_t fscache_n_store_radix_deletes;
63341-atomic_t fscache_n_store_pages_over_limit;
63342+atomic_unchecked_t fscache_n_stores;
63343+atomic_unchecked_t fscache_n_stores_ok;
63344+atomic_unchecked_t fscache_n_stores_again;
63345+atomic_unchecked_t fscache_n_stores_nobufs;
63346+atomic_unchecked_t fscache_n_stores_oom;
63347+atomic_unchecked_t fscache_n_store_ops;
63348+atomic_unchecked_t fscache_n_store_calls;
63349+atomic_unchecked_t fscache_n_store_pages;
63350+atomic_unchecked_t fscache_n_store_radix_deletes;
63351+atomic_unchecked_t fscache_n_store_pages_over_limit;
63352
63353-atomic_t fscache_n_store_vmscan_not_storing;
63354-atomic_t fscache_n_store_vmscan_gone;
63355-atomic_t fscache_n_store_vmscan_busy;
63356-atomic_t fscache_n_store_vmscan_cancelled;
63357-atomic_t fscache_n_store_vmscan_wait;
63358+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63359+atomic_unchecked_t fscache_n_store_vmscan_gone;
63360+atomic_unchecked_t fscache_n_store_vmscan_busy;
63361+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63362+atomic_unchecked_t fscache_n_store_vmscan_wait;
63363
63364-atomic_t fscache_n_marks;
63365-atomic_t fscache_n_uncaches;
63366+atomic_unchecked_t fscache_n_marks;
63367+atomic_unchecked_t fscache_n_uncaches;
63368
63369-atomic_t fscache_n_acquires;
63370-atomic_t fscache_n_acquires_null;
63371-atomic_t fscache_n_acquires_no_cache;
63372-atomic_t fscache_n_acquires_ok;
63373-atomic_t fscache_n_acquires_nobufs;
63374-atomic_t fscache_n_acquires_oom;
63375+atomic_unchecked_t fscache_n_acquires;
63376+atomic_unchecked_t fscache_n_acquires_null;
63377+atomic_unchecked_t fscache_n_acquires_no_cache;
63378+atomic_unchecked_t fscache_n_acquires_ok;
63379+atomic_unchecked_t fscache_n_acquires_nobufs;
63380+atomic_unchecked_t fscache_n_acquires_oom;
63381
63382-atomic_t fscache_n_invalidates;
63383-atomic_t fscache_n_invalidates_run;
63384+atomic_unchecked_t fscache_n_invalidates;
63385+atomic_unchecked_t fscache_n_invalidates_run;
63386
63387-atomic_t fscache_n_updates;
63388-atomic_t fscache_n_updates_null;
63389-atomic_t fscache_n_updates_run;
63390+atomic_unchecked_t fscache_n_updates;
63391+atomic_unchecked_t fscache_n_updates_null;
63392+atomic_unchecked_t fscache_n_updates_run;
63393
63394-atomic_t fscache_n_relinquishes;
63395-atomic_t fscache_n_relinquishes_null;
63396-atomic_t fscache_n_relinquishes_waitcrt;
63397-atomic_t fscache_n_relinquishes_retire;
63398+atomic_unchecked_t fscache_n_relinquishes;
63399+atomic_unchecked_t fscache_n_relinquishes_null;
63400+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63401+atomic_unchecked_t fscache_n_relinquishes_retire;
63402
63403-atomic_t fscache_n_cookie_index;
63404-atomic_t fscache_n_cookie_data;
63405-atomic_t fscache_n_cookie_special;
63406+atomic_unchecked_t fscache_n_cookie_index;
63407+atomic_unchecked_t fscache_n_cookie_data;
63408+atomic_unchecked_t fscache_n_cookie_special;
63409
63410-atomic_t fscache_n_object_alloc;
63411-atomic_t fscache_n_object_no_alloc;
63412-atomic_t fscache_n_object_lookups;
63413-atomic_t fscache_n_object_lookups_negative;
63414-atomic_t fscache_n_object_lookups_positive;
63415-atomic_t fscache_n_object_lookups_timed_out;
63416-atomic_t fscache_n_object_created;
63417-atomic_t fscache_n_object_avail;
63418-atomic_t fscache_n_object_dead;
63419+atomic_unchecked_t fscache_n_object_alloc;
63420+atomic_unchecked_t fscache_n_object_no_alloc;
63421+atomic_unchecked_t fscache_n_object_lookups;
63422+atomic_unchecked_t fscache_n_object_lookups_negative;
63423+atomic_unchecked_t fscache_n_object_lookups_positive;
63424+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63425+atomic_unchecked_t fscache_n_object_created;
63426+atomic_unchecked_t fscache_n_object_avail;
63427+atomic_unchecked_t fscache_n_object_dead;
63428
63429-atomic_t fscache_n_checkaux_none;
63430-atomic_t fscache_n_checkaux_okay;
63431-atomic_t fscache_n_checkaux_update;
63432-atomic_t fscache_n_checkaux_obsolete;
63433+atomic_unchecked_t fscache_n_checkaux_none;
63434+atomic_unchecked_t fscache_n_checkaux_okay;
63435+atomic_unchecked_t fscache_n_checkaux_update;
63436+atomic_unchecked_t fscache_n_checkaux_obsolete;
63437
63438 atomic_t fscache_n_cop_alloc_object;
63439 atomic_t fscache_n_cop_lookup_object;
63440@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63441 seq_puts(m, "FS-Cache statistics\n");
63442
63443 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63444- atomic_read(&fscache_n_cookie_index),
63445- atomic_read(&fscache_n_cookie_data),
63446- atomic_read(&fscache_n_cookie_special));
63447+ atomic_read_unchecked(&fscache_n_cookie_index),
63448+ atomic_read_unchecked(&fscache_n_cookie_data),
63449+ atomic_read_unchecked(&fscache_n_cookie_special));
63450
63451 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63452- atomic_read(&fscache_n_object_alloc),
63453- atomic_read(&fscache_n_object_no_alloc),
63454- atomic_read(&fscache_n_object_avail),
63455- atomic_read(&fscache_n_object_dead));
63456+ atomic_read_unchecked(&fscache_n_object_alloc),
63457+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63458+ atomic_read_unchecked(&fscache_n_object_avail),
63459+ atomic_read_unchecked(&fscache_n_object_dead));
63460 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63461- atomic_read(&fscache_n_checkaux_none),
63462- atomic_read(&fscache_n_checkaux_okay),
63463- atomic_read(&fscache_n_checkaux_update),
63464- atomic_read(&fscache_n_checkaux_obsolete));
63465+ atomic_read_unchecked(&fscache_n_checkaux_none),
63466+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63467+ atomic_read_unchecked(&fscache_n_checkaux_update),
63468+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63469
63470 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63471- atomic_read(&fscache_n_marks),
63472- atomic_read(&fscache_n_uncaches));
63473+ atomic_read_unchecked(&fscache_n_marks),
63474+ atomic_read_unchecked(&fscache_n_uncaches));
63475
63476 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63477 " oom=%u\n",
63478- atomic_read(&fscache_n_acquires),
63479- atomic_read(&fscache_n_acquires_null),
63480- atomic_read(&fscache_n_acquires_no_cache),
63481- atomic_read(&fscache_n_acquires_ok),
63482- atomic_read(&fscache_n_acquires_nobufs),
63483- atomic_read(&fscache_n_acquires_oom));
63484+ atomic_read_unchecked(&fscache_n_acquires),
63485+ atomic_read_unchecked(&fscache_n_acquires_null),
63486+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63487+ atomic_read_unchecked(&fscache_n_acquires_ok),
63488+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63489+ atomic_read_unchecked(&fscache_n_acquires_oom));
63490
63491 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63492- atomic_read(&fscache_n_object_lookups),
63493- atomic_read(&fscache_n_object_lookups_negative),
63494- atomic_read(&fscache_n_object_lookups_positive),
63495- atomic_read(&fscache_n_object_created),
63496- atomic_read(&fscache_n_object_lookups_timed_out));
63497+ atomic_read_unchecked(&fscache_n_object_lookups),
63498+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63499+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63500+ atomic_read_unchecked(&fscache_n_object_created),
63501+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63502
63503 seq_printf(m, "Invals : n=%u run=%u\n",
63504- atomic_read(&fscache_n_invalidates),
63505- atomic_read(&fscache_n_invalidates_run));
63506+ atomic_read_unchecked(&fscache_n_invalidates),
63507+ atomic_read_unchecked(&fscache_n_invalidates_run));
63508
63509 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63510- atomic_read(&fscache_n_updates),
63511- atomic_read(&fscache_n_updates_null),
63512- atomic_read(&fscache_n_updates_run));
63513+ atomic_read_unchecked(&fscache_n_updates),
63514+ atomic_read_unchecked(&fscache_n_updates_null),
63515+ atomic_read_unchecked(&fscache_n_updates_run));
63516
63517 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63518- atomic_read(&fscache_n_relinquishes),
63519- atomic_read(&fscache_n_relinquishes_null),
63520- atomic_read(&fscache_n_relinquishes_waitcrt),
63521- atomic_read(&fscache_n_relinquishes_retire));
63522+ atomic_read_unchecked(&fscache_n_relinquishes),
63523+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63524+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63525+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63526
63527 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63528- atomic_read(&fscache_n_attr_changed),
63529- atomic_read(&fscache_n_attr_changed_ok),
63530- atomic_read(&fscache_n_attr_changed_nobufs),
63531- atomic_read(&fscache_n_attr_changed_nomem),
63532- atomic_read(&fscache_n_attr_changed_calls));
63533+ atomic_read_unchecked(&fscache_n_attr_changed),
63534+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63535+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63536+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63537+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63538
63539 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63540- atomic_read(&fscache_n_allocs),
63541- atomic_read(&fscache_n_allocs_ok),
63542- atomic_read(&fscache_n_allocs_wait),
63543- atomic_read(&fscache_n_allocs_nobufs),
63544- atomic_read(&fscache_n_allocs_intr));
63545+ atomic_read_unchecked(&fscache_n_allocs),
63546+ atomic_read_unchecked(&fscache_n_allocs_ok),
63547+ atomic_read_unchecked(&fscache_n_allocs_wait),
63548+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63549+ atomic_read_unchecked(&fscache_n_allocs_intr));
63550 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63551- atomic_read(&fscache_n_alloc_ops),
63552- atomic_read(&fscache_n_alloc_op_waits),
63553- atomic_read(&fscache_n_allocs_object_dead));
63554+ atomic_read_unchecked(&fscache_n_alloc_ops),
63555+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63556+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63557
63558 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63559 " int=%u oom=%u\n",
63560- atomic_read(&fscache_n_retrievals),
63561- atomic_read(&fscache_n_retrievals_ok),
63562- atomic_read(&fscache_n_retrievals_wait),
63563- atomic_read(&fscache_n_retrievals_nodata),
63564- atomic_read(&fscache_n_retrievals_nobufs),
63565- atomic_read(&fscache_n_retrievals_intr),
63566- atomic_read(&fscache_n_retrievals_nomem));
63567+ atomic_read_unchecked(&fscache_n_retrievals),
63568+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63569+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63570+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63571+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63572+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63573+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63574 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63575- atomic_read(&fscache_n_retrieval_ops),
63576- atomic_read(&fscache_n_retrieval_op_waits),
63577- atomic_read(&fscache_n_retrievals_object_dead));
63578+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63579+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63580+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63581
63582 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63583- atomic_read(&fscache_n_stores),
63584- atomic_read(&fscache_n_stores_ok),
63585- atomic_read(&fscache_n_stores_again),
63586- atomic_read(&fscache_n_stores_nobufs),
63587- atomic_read(&fscache_n_stores_oom));
63588+ atomic_read_unchecked(&fscache_n_stores),
63589+ atomic_read_unchecked(&fscache_n_stores_ok),
63590+ atomic_read_unchecked(&fscache_n_stores_again),
63591+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63592+ atomic_read_unchecked(&fscache_n_stores_oom));
63593 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63594- atomic_read(&fscache_n_store_ops),
63595- atomic_read(&fscache_n_store_calls),
63596- atomic_read(&fscache_n_store_pages),
63597- atomic_read(&fscache_n_store_radix_deletes),
63598- atomic_read(&fscache_n_store_pages_over_limit));
63599+ atomic_read_unchecked(&fscache_n_store_ops),
63600+ atomic_read_unchecked(&fscache_n_store_calls),
63601+ atomic_read_unchecked(&fscache_n_store_pages),
63602+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63603+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63604
63605 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63606- atomic_read(&fscache_n_store_vmscan_not_storing),
63607- atomic_read(&fscache_n_store_vmscan_gone),
63608- atomic_read(&fscache_n_store_vmscan_busy),
63609- atomic_read(&fscache_n_store_vmscan_cancelled),
63610- atomic_read(&fscache_n_store_vmscan_wait));
63611+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63612+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63613+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63614+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63615+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63616
63617 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63618- atomic_read(&fscache_n_op_pend),
63619- atomic_read(&fscache_n_op_run),
63620- atomic_read(&fscache_n_op_enqueue),
63621- atomic_read(&fscache_n_op_cancelled),
63622- atomic_read(&fscache_n_op_rejected));
63623+ atomic_read_unchecked(&fscache_n_op_pend),
63624+ atomic_read_unchecked(&fscache_n_op_run),
63625+ atomic_read_unchecked(&fscache_n_op_enqueue),
63626+ atomic_read_unchecked(&fscache_n_op_cancelled),
63627+ atomic_read_unchecked(&fscache_n_op_rejected));
63628 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63629- atomic_read(&fscache_n_op_deferred_release),
63630- atomic_read(&fscache_n_op_release),
63631- atomic_read(&fscache_n_op_gc));
63632+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63633+ atomic_read_unchecked(&fscache_n_op_release),
63634+ atomic_read_unchecked(&fscache_n_op_gc));
63635
63636 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63637 atomic_read(&fscache_n_cop_alloc_object),
63638diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63639index 28d0c7a..04816b7 100644
63640--- a/fs/fuse/cuse.c
63641+++ b/fs/fuse/cuse.c
63642@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63643 INIT_LIST_HEAD(&cuse_conntbl[i]);
63644
63645 /* inherit and extend fuse_dev_operations */
63646- cuse_channel_fops = fuse_dev_operations;
63647- cuse_channel_fops.owner = THIS_MODULE;
63648- cuse_channel_fops.open = cuse_channel_open;
63649- cuse_channel_fops.release = cuse_channel_release;
63650+ pax_open_kernel();
63651+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63652+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63653+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63654+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63655+ pax_close_kernel();
63656
63657 cuse_class = class_create(THIS_MODULE, "cuse");
63658 if (IS_ERR(cuse_class))
63659diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63660index 71c4619..6a9f6d4 100644
63661--- a/fs/fuse/dev.c
63662+++ b/fs/fuse/dev.c
63663@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63664 ret = 0;
63665 pipe_lock(pipe);
63666
63667- if (!pipe->readers) {
63668+ if (!atomic_read(&pipe->readers)) {
63669 send_sig(SIGPIPE, current, 0);
63670 if (!ret)
63671 ret = -EPIPE;
63672@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63673 page_nr++;
63674 ret += buf->len;
63675
63676- if (pipe->files)
63677+ if (atomic_read(&pipe->files))
63678 do_wakeup = 1;
63679 }
63680
63681diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63682index 08e7b1a..d91c6ee 100644
63683--- a/fs/fuse/dir.c
63684+++ b/fs/fuse/dir.c
63685@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63686 return link;
63687 }
63688
63689-static void free_link(char *link)
63690+static void free_link(const char *link)
63691 {
63692 if (!IS_ERR(link))
63693 free_page((unsigned long) link);
63694diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63695index fd62cae..3494dfa 100644
63696--- a/fs/hostfs/hostfs_kern.c
63697+++ b/fs/hostfs/hostfs_kern.c
63698@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63699
63700 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63701 {
63702- char *s = nd_get_link(nd);
63703+ const char *s = nd_get_link(nd);
63704 if (!IS_ERR(s))
63705 __putname(s);
63706 }
63707diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63708index 5eba47f..d353c22 100644
63709--- a/fs/hugetlbfs/inode.c
63710+++ b/fs/hugetlbfs/inode.c
63711@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63712 struct mm_struct *mm = current->mm;
63713 struct vm_area_struct *vma;
63714 struct hstate *h = hstate_file(file);
63715+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63716 struct vm_unmapped_area_info info;
63717
63718 if (len & ~huge_page_mask(h))
63719@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63720 return addr;
63721 }
63722
63723+#ifdef CONFIG_PAX_RANDMMAP
63724+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63725+#endif
63726+
63727 if (addr) {
63728 addr = ALIGN(addr, huge_page_size(h));
63729 vma = find_vma(mm, addr);
63730- if (TASK_SIZE - len >= addr &&
63731- (!vma || addr + len <= vma->vm_start))
63732+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63733 return addr;
63734 }
63735
63736 info.flags = 0;
63737 info.length = len;
63738 info.low_limit = TASK_UNMAPPED_BASE;
63739+
63740+#ifdef CONFIG_PAX_RANDMMAP
63741+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63742+ info.low_limit += mm->delta_mmap;
63743+#endif
63744+
63745 info.high_limit = TASK_SIZE;
63746 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63747 info.align_offset = 0;
63748@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63749 };
63750 MODULE_ALIAS_FS("hugetlbfs");
63751
63752-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63753+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63754
63755 static int can_do_hugetlb_shm(void)
63756 {
63757diff --git a/fs/inode.c b/fs/inode.c
63758index aa149e7..46f1f65 100644
63759--- a/fs/inode.c
63760+++ b/fs/inode.c
63761@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63762 unsigned int *p = &get_cpu_var(last_ino);
63763 unsigned int res = *p;
63764
63765+start:
63766+
63767 #ifdef CONFIG_SMP
63768 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63769- static atomic_t shared_last_ino;
63770- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63771+ static atomic_unchecked_t shared_last_ino;
63772+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63773
63774 res = next - LAST_INO_BATCH;
63775 }
63776 #endif
63777
63778- *p = ++res;
63779+ if (unlikely(!++res))
63780+ goto start; /* never zero */
63781+ *p = res;
63782 put_cpu_var(last_ino);
63783 return res;
63784 }
63785diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63786index 4a6cf28..d3a29d3 100644
63787--- a/fs/jffs2/erase.c
63788+++ b/fs/jffs2/erase.c
63789@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63790 struct jffs2_unknown_node marker = {
63791 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63792 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63793- .totlen = cpu_to_je32(c->cleanmarker_size)
63794+ .totlen = cpu_to_je32(c->cleanmarker_size),
63795+ .hdr_crc = cpu_to_je32(0)
63796 };
63797
63798 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63799diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63800index 09ed551..45684f8 100644
63801--- a/fs/jffs2/wbuf.c
63802+++ b/fs/jffs2/wbuf.c
63803@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63804 {
63805 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63806 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63807- .totlen = constant_cpu_to_je32(8)
63808+ .totlen = constant_cpu_to_je32(8),
63809+ .hdr_crc = constant_cpu_to_je32(0)
63810 };
63811
63812 /*
63813diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63814index 16c3a95..e9cb75d 100644
63815--- a/fs/jfs/super.c
63816+++ b/fs/jfs/super.c
63817@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63818
63819 jfs_inode_cachep =
63820 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63821- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63822+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63823 init_once);
63824 if (jfs_inode_cachep == NULL)
63825 return -ENOMEM;
63826diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63827index 2d881b3..fe1ac77 100644
63828--- a/fs/kernfs/dir.c
63829+++ b/fs/kernfs/dir.c
63830@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63831 *
63832 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63833 */
63834-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63835+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63836 {
63837 unsigned long hash = init_name_hash();
63838 unsigned int len = strlen(name);
63839@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63840 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63841
63842 kernfs_put_active(parent);
63843+
63844+ if (!ret) {
63845+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63846+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63847+ }
63848+
63849 return ret;
63850 }
63851
63852diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63853index ddc9f96..4e450ad 100644
63854--- a/fs/kernfs/file.c
63855+++ b/fs/kernfs/file.c
63856@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63857
63858 struct kernfs_open_node {
63859 atomic_t refcnt;
63860- atomic_t event;
63861+ atomic_unchecked_t event;
63862 wait_queue_head_t poll;
63863 struct list_head files; /* goes through kernfs_open_file.list */
63864 };
63865@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63866 {
63867 struct kernfs_open_file *of = sf->private;
63868
63869- of->event = atomic_read(&of->kn->attr.open->event);
63870+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63871
63872 return of->kn->attr.ops->seq_show(sf, v);
63873 }
63874@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63875 {
63876 struct kernfs_open_file *of = kernfs_of(file);
63877 const struct kernfs_ops *ops;
63878- size_t len;
63879+ ssize_t len;
63880 char *buf;
63881
63882 if (of->atomic_write_len) {
63883@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63884 return ret;
63885 }
63886
63887-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63888- void *buf, int len, int write)
63889+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63890+ void *buf, size_t len, int write)
63891 {
63892 struct file *file = vma->vm_file;
63893 struct kernfs_open_file *of = kernfs_of(file);
63894- int ret;
63895+ ssize_t ret;
63896
63897 if (!of->vm_ops)
63898 return -EINVAL;
63899@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63900 return -ENOMEM;
63901
63902 atomic_set(&new_on->refcnt, 0);
63903- atomic_set(&new_on->event, 1);
63904+ atomic_set_unchecked(&new_on->event, 1);
63905 init_waitqueue_head(&new_on->poll);
63906 INIT_LIST_HEAD(&new_on->files);
63907 goto retry;
63908@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63909
63910 kernfs_put_active(kn);
63911
63912- if (of->event != atomic_read(&on->event))
63913+ if (of->event != atomic_read_unchecked(&on->event))
63914 goto trigger;
63915
63916 return DEFAULT_POLLMASK;
63917@@ -823,7 +823,7 @@ repeat:
63918
63919 on = kn->attr.open;
63920 if (on) {
63921- atomic_inc(&on->event);
63922+ atomic_inc_unchecked(&on->event);
63923 wake_up_interruptible(&on->poll);
63924 }
63925
63926diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63927index 8a19889..4c3069a 100644
63928--- a/fs/kernfs/symlink.c
63929+++ b/fs/kernfs/symlink.c
63930@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63931 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63932 void *cookie)
63933 {
63934- char *page = nd_get_link(nd);
63935+ const char *page = nd_get_link(nd);
63936 if (!IS_ERR(page))
63937 free_page((unsigned long)page);
63938 }
63939diff --git a/fs/libfs.c b/fs/libfs.c
63940index 005843c..06c4191 100644
63941--- a/fs/libfs.c
63942+++ b/fs/libfs.c
63943@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63944
63945 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63946 struct dentry *next = list_entry(p, struct dentry, d_child);
63947+ char d_name[sizeof(next->d_iname)];
63948+ const unsigned char *name;
63949+
63950 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63951 if (!simple_positive(next)) {
63952 spin_unlock(&next->d_lock);
63953@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63954
63955 spin_unlock(&next->d_lock);
63956 spin_unlock(&dentry->d_lock);
63957- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63958+ name = next->d_name.name;
63959+ if (name == next->d_iname) {
63960+ memcpy(d_name, name, next->d_name.len);
63961+ name = d_name;
63962+ }
63963+ if (!dir_emit(ctx, name, next->d_name.len,
63964 next->d_inode->i_ino, dt_type(next->d_inode)))
63965 return 0;
63966 spin_lock(&dentry->d_lock);
63967@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63968 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63969 void *cookie)
63970 {
63971- char *s = nd_get_link(nd);
63972+ const char *s = nd_get_link(nd);
63973 if (!IS_ERR(s))
63974 kfree(s);
63975 }
63976diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63977index acd3947..1f896e2 100644
63978--- a/fs/lockd/clntproc.c
63979+++ b/fs/lockd/clntproc.c
63980@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63981 /*
63982 * Cookie counter for NLM requests
63983 */
63984-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63985+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63986
63987 void nlmclnt_next_cookie(struct nlm_cookie *c)
63988 {
63989- u32 cookie = atomic_inc_return(&nlm_cookie);
63990+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63991
63992 memcpy(c->data, &cookie, 4);
63993 c->len=4;
63994diff --git a/fs/locks.c b/fs/locks.c
63995index 59e2f90..bd69071 100644
63996--- a/fs/locks.c
63997+++ b/fs/locks.c
63998@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63999 locks_remove_posix(filp, filp);
64000
64001 if (filp->f_op->flock) {
64002- struct file_lock fl = {
64003+ struct file_lock flock = {
64004 .fl_owner = filp,
64005 .fl_pid = current->tgid,
64006 .fl_file = filp,
64007@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
64008 .fl_type = F_UNLCK,
64009 .fl_end = OFFSET_MAX,
64010 };
64011- filp->f_op->flock(filp, F_SETLKW, &fl);
64012- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64013- fl.fl_ops->fl_release_private(&fl);
64014+ filp->f_op->flock(filp, F_SETLKW, &flock);
64015+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64016+ flock.fl_ops->fl_release_private(&flock);
64017 }
64018
64019 spin_lock(&inode->i_lock);
64020diff --git a/fs/mount.h b/fs/mount.h
64021index 0ad6f76..a04c146 100644
64022--- a/fs/mount.h
64023+++ b/fs/mount.h
64024@@ -12,7 +12,7 @@ struct mnt_namespace {
64025 u64 seq; /* Sequence number to prevent loops */
64026 wait_queue_head_t poll;
64027 u64 event;
64028-};
64029+} __randomize_layout;
64030
64031 struct mnt_pcp {
64032 int mnt_count;
64033@@ -63,7 +63,7 @@ struct mount {
64034 int mnt_expiry_mark; /* true if marked for expiry */
64035 struct hlist_head mnt_pins;
64036 struct path mnt_ex_mountpoint;
64037-};
64038+} __randomize_layout;
64039
64040 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64041
64042diff --git a/fs/namei.c b/fs/namei.c
64043index bc35b02..7ed1f1d 100644
64044--- a/fs/namei.c
64045+++ b/fs/namei.c
64046@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
64047 if (ret != -EACCES)
64048 return ret;
64049
64050+#ifdef CONFIG_GRKERNSEC
64051+ /* we'll block if we have to log due to a denied capability use */
64052+ if (mask & MAY_NOT_BLOCK)
64053+ return -ECHILD;
64054+#endif
64055+
64056 if (S_ISDIR(inode->i_mode)) {
64057 /* DACs are overridable for directories */
64058- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64059- return 0;
64060 if (!(mask & MAY_WRITE))
64061- if (capable_wrt_inode_uidgid(inode,
64062- CAP_DAC_READ_SEARCH))
64063+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64064+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64065 return 0;
64066+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64067+ return 0;
64068 return -EACCES;
64069 }
64070 /*
64071+ * Searching includes executable on directories, else just read.
64072+ */
64073+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64074+ if (mask == MAY_READ)
64075+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64076+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64077+ return 0;
64078+
64079+ /*
64080 * Read/write DACs are always overridable.
64081 * Executable DACs are overridable when there is
64082 * at least one exec bit set.
64083@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
64084 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64085 return 0;
64086
64087- /*
64088- * Searching includes executable on directories, else just read.
64089- */
64090- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64091- if (mask == MAY_READ)
64092- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64093- return 0;
64094-
64095 return -EACCES;
64096 }
64097 EXPORT_SYMBOL(generic_permission);
64098@@ -497,7 +504,7 @@ struct nameidata {
64099 int last_type;
64100 unsigned depth;
64101 struct file *base;
64102- char *saved_names[MAX_NESTED_LINKS + 1];
64103+ const char *saved_names[MAX_NESTED_LINKS + 1];
64104 };
64105
64106 /*
64107@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
64108 nd->flags |= LOOKUP_JUMPED;
64109 }
64110
64111-void nd_set_link(struct nameidata *nd, char *path)
64112+void nd_set_link(struct nameidata *nd, const char *path)
64113 {
64114 nd->saved_names[nd->depth] = path;
64115 }
64116 EXPORT_SYMBOL(nd_set_link);
64117
64118-char *nd_get_link(struct nameidata *nd)
64119+const char *nd_get_link(const struct nameidata *nd)
64120 {
64121 return nd->saved_names[nd->depth];
64122 }
64123@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64124 {
64125 struct dentry *dentry = link->dentry;
64126 int error;
64127- char *s;
64128+ const char *s;
64129
64130 BUG_ON(nd->flags & LOOKUP_RCU);
64131
64132@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64133 if (error)
64134 goto out_put_nd_path;
64135
64136+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64137+ dentry->d_inode, dentry, nd->path.mnt)) {
64138+ error = -EACCES;
64139+ goto out_put_nd_path;
64140+ }
64141+
64142 nd->last_type = LAST_BIND;
64143 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64144 error = PTR_ERR(*p);
64145@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64146 if (res)
64147 break;
64148 res = walk_component(nd, path, LOOKUP_FOLLOW);
64149+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64150+ res = -EACCES;
64151 put_link(nd, &link, cookie);
64152 } while (res > 0);
64153
64154@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
64155 static inline u64 hash_name(const char *name)
64156 {
64157 unsigned long a, b, adata, bdata, mask, hash, len;
64158- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64159+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64160
64161 hash = a = 0;
64162 len = -sizeof(unsigned long);
64163@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
64164 if (err)
64165 break;
64166 err = lookup_last(nd, &path);
64167+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64168+ err = -EACCES;
64169 put_link(nd, &link, cookie);
64170 }
64171 }
64172@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
64173 if (!err)
64174 err = complete_walk(nd);
64175
64176+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64177+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64178+ path_put(&nd->path);
64179+ err = -ENOENT;
64180+ }
64181+ }
64182+
64183 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64184 if (!d_can_lookup(nd->path.dentry)) {
64185 path_put(&nd->path);
64186@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
64187 retval = path_lookupat(dfd, name->name,
64188 flags | LOOKUP_REVAL, nd);
64189
64190- if (likely(!retval))
64191+ if (likely(!retval)) {
64192 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64193+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64194+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64195+ path_put(&nd->path);
64196+ return -ENOENT;
64197+ }
64198+ }
64199+ }
64200 return retval;
64201 }
64202
64203@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64204 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64205 return -EPERM;
64206
64207+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64208+ return -EPERM;
64209+ if (gr_handle_rawio(inode))
64210+ return -EPERM;
64211+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64212+ return -EACCES;
64213+
64214 return 0;
64215 }
64216
64217@@ -2826,7 +2864,7 @@ looked_up:
64218 * cleared otherwise prior to returning.
64219 */
64220 static int lookup_open(struct nameidata *nd, struct path *path,
64221- struct file *file,
64222+ struct path *link, struct file *file,
64223 const struct open_flags *op,
64224 bool got_write, int *opened)
64225 {
64226@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64227 /* Negative dentry, just create the file */
64228 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64229 umode_t mode = op->mode;
64230+
64231+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64232+ error = -EACCES;
64233+ goto out_dput;
64234+ }
64235+
64236+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64237+ error = -EACCES;
64238+ goto out_dput;
64239+ }
64240+
64241 if (!IS_POSIXACL(dir->d_inode))
64242 mode &= ~current_umask();
64243 /*
64244@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64245 nd->flags & LOOKUP_EXCL);
64246 if (error)
64247 goto out_dput;
64248+ else
64249+ gr_handle_create(dentry, nd->path.mnt);
64250 }
64251 out_no_open:
64252 path->dentry = dentry;
64253@@ -2896,7 +2947,7 @@ out_dput:
64254 /*
64255 * Handle the last step of open()
64256 */
64257-static int do_last(struct nameidata *nd, struct path *path,
64258+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64259 struct file *file, const struct open_flags *op,
64260 int *opened, struct filename *name)
64261 {
64262@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64263 if (error)
64264 return error;
64265
64266+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64267+ error = -ENOENT;
64268+ goto out;
64269+ }
64270+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64271+ error = -EACCES;
64272+ goto out;
64273+ }
64274+
64275 audit_inode(name, dir, LOOKUP_PARENT);
64276 error = -EISDIR;
64277 /* trailing slashes? */
64278@@ -2965,7 +3025,7 @@ retry_lookup:
64279 */
64280 }
64281 mutex_lock(&dir->d_inode->i_mutex);
64282- error = lookup_open(nd, path, file, op, got_write, opened);
64283+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64284 mutex_unlock(&dir->d_inode->i_mutex);
64285
64286 if (error <= 0) {
64287@@ -2989,11 +3049,28 @@ retry_lookup:
64288 goto finish_open_created;
64289 }
64290
64291+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64292+ error = -ENOENT;
64293+ goto exit_dput;
64294+ }
64295+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64296+ error = -EACCES;
64297+ goto exit_dput;
64298+ }
64299+
64300 /*
64301 * create/update audit record if it already exists.
64302 */
64303- if (d_is_positive(path->dentry))
64304+ if (d_is_positive(path->dentry)) {
64305+ /* only check if O_CREAT is specified, all other checks need to go
64306+ into may_open */
64307+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64308+ error = -EACCES;
64309+ goto exit_dput;
64310+ }
64311+
64312 audit_inode(name, path->dentry, 0);
64313+ }
64314
64315 /*
64316 * If atomic_open() acquired write access it is dropped now due to
64317@@ -3034,6 +3111,11 @@ finish_lookup:
64318 }
64319 }
64320 BUG_ON(inode != path->dentry->d_inode);
64321+ /* if we're resolving a symlink to another symlink */
64322+ if (link && gr_handle_symlink_owner(link, inode)) {
64323+ error = -EACCES;
64324+ goto out;
64325+ }
64326 return 1;
64327 }
64328
64329@@ -3053,7 +3135,18 @@ finish_open:
64330 path_put(&save_parent);
64331 return error;
64332 }
64333+
64334+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64335+ error = -ENOENT;
64336+ goto out;
64337+ }
64338+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64339+ error = -EACCES;
64340+ goto out;
64341+ }
64342+
64343 audit_inode(name, nd->path.dentry, 0);
64344+
64345 error = -EISDIR;
64346 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64347 goto out;
64348@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64349 if (unlikely(error))
64350 goto out;
64351
64352- error = do_last(nd, &path, file, op, &opened, pathname);
64353+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64354 while (unlikely(error > 0)) { /* trailing symlink */
64355 struct path link = path;
64356 void *cookie;
64357@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64358 error = follow_link(&link, nd, &cookie);
64359 if (unlikely(error))
64360 break;
64361- error = do_last(nd, &path, file, op, &opened, pathname);
64362+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64363 put_link(nd, &link, cookie);
64364 }
64365 out:
64366@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64367 goto unlock;
64368
64369 error = -EEXIST;
64370- if (d_is_positive(dentry))
64371+ if (d_is_positive(dentry)) {
64372+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64373+ error = -ENOENT;
64374 goto fail;
64375-
64376+ }
64377 /*
64378 * Special case - lookup gave negative, but... we had foo/bar/
64379 * From the vfs_mknod() POV we just have a negative dentry -
64380@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64381 }
64382 EXPORT_SYMBOL(user_path_create);
64383
64384+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64385+{
64386+ struct filename *tmp = getname(pathname);
64387+ struct dentry *res;
64388+ if (IS_ERR(tmp))
64389+ return ERR_CAST(tmp);
64390+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64391+ if (IS_ERR(res))
64392+ putname(tmp);
64393+ else
64394+ *to = tmp;
64395+ return res;
64396+}
64397+
64398 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64399 {
64400 int error = may_create(dir, dentry);
64401@@ -3446,6 +3555,17 @@ retry:
64402
64403 if (!IS_POSIXACL(path.dentry->d_inode))
64404 mode &= ~current_umask();
64405+
64406+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64407+ error = -EPERM;
64408+ goto out;
64409+ }
64410+
64411+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64412+ error = -EACCES;
64413+ goto out;
64414+ }
64415+
64416 error = security_path_mknod(&path, dentry, mode, dev);
64417 if (error)
64418 goto out;
64419@@ -3461,6 +3581,8 @@ retry:
64420 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64421 break;
64422 }
64423+ if (!error)
64424+ gr_handle_create(dentry, path.mnt);
64425 out:
64426 done_path_create(&path, dentry);
64427 if (retry_estale(error, lookup_flags)) {
64428@@ -3515,9 +3637,16 @@ retry:
64429
64430 if (!IS_POSIXACL(path.dentry->d_inode))
64431 mode &= ~current_umask();
64432+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64433+ error = -EACCES;
64434+ goto out;
64435+ }
64436 error = security_path_mkdir(&path, dentry, mode);
64437 if (!error)
64438 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64439+ if (!error)
64440+ gr_handle_create(dentry, path.mnt);
64441+out:
64442 done_path_create(&path, dentry);
64443 if (retry_estale(error, lookup_flags)) {
64444 lookup_flags |= LOOKUP_REVAL;
64445@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64446 struct filename *name;
64447 struct dentry *dentry;
64448 struct nameidata nd;
64449+ u64 saved_ino = 0;
64450+ dev_t saved_dev = 0;
64451 unsigned int lookup_flags = 0;
64452 retry:
64453 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64454@@ -3633,10 +3764,21 @@ retry:
64455 error = -ENOENT;
64456 goto exit3;
64457 }
64458+
64459+ saved_ino = gr_get_ino_from_dentry(dentry);
64460+ saved_dev = gr_get_dev_from_dentry(dentry);
64461+
64462+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64463+ error = -EACCES;
64464+ goto exit3;
64465+ }
64466+
64467 error = security_path_rmdir(&nd.path, dentry);
64468 if (error)
64469 goto exit3;
64470 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64471+ if (!error && (saved_dev || saved_ino))
64472+ gr_handle_delete(saved_ino, saved_dev);
64473 exit3:
64474 dput(dentry);
64475 exit2:
64476@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64477 struct nameidata nd;
64478 struct inode *inode = NULL;
64479 struct inode *delegated_inode = NULL;
64480+ u64 saved_ino = 0;
64481+ dev_t saved_dev = 0;
64482 unsigned int lookup_flags = 0;
64483 retry:
64484 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64485@@ -3755,10 +3899,22 @@ retry_deleg:
64486 if (d_is_negative(dentry))
64487 goto slashes;
64488 ihold(inode);
64489+
64490+ if (inode->i_nlink <= 1) {
64491+ saved_ino = gr_get_ino_from_dentry(dentry);
64492+ saved_dev = gr_get_dev_from_dentry(dentry);
64493+ }
64494+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64495+ error = -EACCES;
64496+ goto exit2;
64497+ }
64498+
64499 error = security_path_unlink(&nd.path, dentry);
64500 if (error)
64501 goto exit2;
64502 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64503+ if (!error && (saved_ino || saved_dev))
64504+ gr_handle_delete(saved_ino, saved_dev);
64505 exit2:
64506 dput(dentry);
64507 }
64508@@ -3847,9 +4003,17 @@ retry:
64509 if (IS_ERR(dentry))
64510 goto out_putname;
64511
64512+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64513+ error = -EACCES;
64514+ goto out;
64515+ }
64516+
64517 error = security_path_symlink(&path, dentry, from->name);
64518 if (!error)
64519 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64520+ if (!error)
64521+ gr_handle_create(dentry, path.mnt);
64522+out:
64523 done_path_create(&path, dentry);
64524 if (retry_estale(error, lookup_flags)) {
64525 lookup_flags |= LOOKUP_REVAL;
64526@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64527 struct dentry *new_dentry;
64528 struct path old_path, new_path;
64529 struct inode *delegated_inode = NULL;
64530+ struct filename *to = NULL;
64531 int how = 0;
64532 int error;
64533
64534@@ -3976,7 +4141,7 @@ retry:
64535 if (error)
64536 return error;
64537
64538- new_dentry = user_path_create(newdfd, newname, &new_path,
64539+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64540 (how & LOOKUP_REVAL));
64541 error = PTR_ERR(new_dentry);
64542 if (IS_ERR(new_dentry))
64543@@ -3988,11 +4153,28 @@ retry:
64544 error = may_linkat(&old_path);
64545 if (unlikely(error))
64546 goto out_dput;
64547+
64548+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64549+ old_path.dentry->d_inode,
64550+ old_path.dentry->d_inode->i_mode, to)) {
64551+ error = -EACCES;
64552+ goto out_dput;
64553+ }
64554+
64555+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64556+ old_path.dentry, old_path.mnt, to)) {
64557+ error = -EACCES;
64558+ goto out_dput;
64559+ }
64560+
64561 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64562 if (error)
64563 goto out_dput;
64564 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64565+ if (!error)
64566+ gr_handle_create(new_dentry, new_path.mnt);
64567 out_dput:
64568+ putname(to);
64569 done_path_create(&new_path, new_dentry);
64570 if (delegated_inode) {
64571 error = break_deleg_wait(&delegated_inode);
64572@@ -4308,6 +4490,20 @@ retry_deleg:
64573 if (new_dentry == trap)
64574 goto exit5;
64575
64576+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64577+ /* use EXDEV error to cause 'mv' to switch to an alternative
64578+ * method for usability
64579+ */
64580+ error = -EXDEV;
64581+ goto exit5;
64582+ }
64583+
64584+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64585+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64586+ to, flags);
64587+ if (error)
64588+ goto exit5;
64589+
64590 error = security_path_rename(&oldnd.path, old_dentry,
64591 &newnd.path, new_dentry, flags);
64592 if (error)
64593@@ -4315,6 +4511,9 @@ retry_deleg:
64594 error = vfs_rename(old_dir->d_inode, old_dentry,
64595 new_dir->d_inode, new_dentry,
64596 &delegated_inode, flags);
64597+ if (!error)
64598+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64599+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64600 exit5:
64601 dput(new_dentry);
64602 exit4:
64603@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64604
64605 int readlink_copy(char __user *buffer, int buflen, const char *link)
64606 {
64607+ char tmpbuf[64];
64608+ const char *newlink;
64609 int len = PTR_ERR(link);
64610+
64611 if (IS_ERR(link))
64612 goto out;
64613
64614 len = strlen(link);
64615 if (len > (unsigned) buflen)
64616 len = buflen;
64617- if (copy_to_user(buffer, link, len))
64618+
64619+ if (len < sizeof(tmpbuf)) {
64620+ memcpy(tmpbuf, link, len);
64621+ newlink = tmpbuf;
64622+ } else
64623+ newlink = link;
64624+
64625+ if (copy_to_user(buffer, newlink, len))
64626 len = -EFAULT;
64627 out:
64628 return len;
64629diff --git a/fs/namespace.c b/fs/namespace.c
64630index cd1e968..e64ff16 100644
64631--- a/fs/namespace.c
64632+++ b/fs/namespace.c
64633@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64634 if (!(sb->s_flags & MS_RDONLY))
64635 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64636 up_write(&sb->s_umount);
64637+
64638+ gr_log_remount(mnt->mnt_devname, retval);
64639+
64640 return retval;
64641 }
64642
64643@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64644 }
64645 unlock_mount_hash();
64646 namespace_unlock();
64647+
64648+ gr_log_unmount(mnt->mnt_devname, retval);
64649+
64650 return retval;
64651 }
64652
64653@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64654 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64655 */
64656
64657-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64658+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64659 {
64660 struct path path;
64661 struct mount *mnt;
64662@@ -1565,7 +1571,7 @@ out:
64663 /*
64664 * The 2.0 compatible umount. No flags.
64665 */
64666-SYSCALL_DEFINE1(oldumount, char __user *, name)
64667+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64668 {
64669 return sys_umount(name, 0);
64670 }
64671@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64672 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64673 MS_STRICTATIME);
64674
64675+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64676+ retval = -EPERM;
64677+ goto dput_out;
64678+ }
64679+
64680+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64681+ retval = -EPERM;
64682+ goto dput_out;
64683+ }
64684+
64685 if (flags & MS_REMOUNT)
64686 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64687 data_page);
64688@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64689 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64690 dev_name, data_page);
64691 dput_out:
64692+ gr_log_mount(dev_name, &path, retval);
64693+
64694 path_put(&path);
64695+
64696 return retval;
64697 }
64698
64699@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64700 * number incrementing at 10Ghz will take 12,427 years to wrap which
64701 * is effectively never, so we can ignore the possibility.
64702 */
64703-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64704+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64705
64706 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64707 {
64708@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64709 return ERR_PTR(ret);
64710 }
64711 new_ns->ns.ops = &mntns_operations;
64712- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64713+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64714 atomic_set(&new_ns->count, 1);
64715 new_ns->root = NULL;
64716 INIT_LIST_HEAD(&new_ns->list);
64717@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64718 return new_ns;
64719 }
64720
64721-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64722+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64723 struct user_namespace *user_ns, struct fs_struct *new_fs)
64724 {
64725 struct mnt_namespace *new_ns;
64726@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64727 }
64728 EXPORT_SYMBOL(mount_subtree);
64729
64730-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64731- char __user *, type, unsigned long, flags, void __user *, data)
64732+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64733+ const char __user *, type, unsigned long, flags, void __user *, data)
64734 {
64735 int ret;
64736 char *kernel_type;
64737@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64738 if (error)
64739 goto out2;
64740
64741+ if (gr_handle_chroot_pivot()) {
64742+ error = -EPERM;
64743+ goto out2;
64744+ }
64745+
64746 get_fs_root(current->fs, &root);
64747 old_mp = lock_mount(&old);
64748 error = PTR_ERR(old_mp);
64749@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64750 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64751 return -EPERM;
64752
64753- if (fs->users != 1)
64754+ if (atomic_read(&fs->users) != 1)
64755 return -EINVAL;
64756
64757 get_mnt_ns(mnt_ns);
64758diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64759index 02f8d09..a5c25d1 100644
64760--- a/fs/nfs/callback_xdr.c
64761+++ b/fs/nfs/callback_xdr.c
64762@@ -51,7 +51,7 @@ struct callback_op {
64763 callback_decode_arg_t decode_args;
64764 callback_encode_res_t encode_res;
64765 long res_maxsize;
64766-};
64767+} __do_const;
64768
64769 static struct callback_op callback_ops[];
64770
64771diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64772index 2211f6b..30d0950 100644
64773--- a/fs/nfs/inode.c
64774+++ b/fs/nfs/inode.c
64775@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64776 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64777 }
64778
64779-static atomic_long_t nfs_attr_generation_counter;
64780+static atomic_long_unchecked_t nfs_attr_generation_counter;
64781
64782 static unsigned long nfs_read_attr_generation_counter(void)
64783 {
64784- return atomic_long_read(&nfs_attr_generation_counter);
64785+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64786 }
64787
64788 unsigned long nfs_inc_attr_generation_counter(void)
64789 {
64790- return atomic_long_inc_return(&nfs_attr_generation_counter);
64791+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64792 }
64793
64794 void nfs_fattr_init(struct nfs_fattr *fattr)
64795diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64796index ac71d13..a2e590a 100644
64797--- a/fs/nfsd/nfs4proc.c
64798+++ b/fs/nfsd/nfs4proc.c
64799@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64800 nfsd4op_rsize op_rsize_bop;
64801 stateid_getter op_get_currentstateid;
64802 stateid_setter op_set_currentstateid;
64803-};
64804+} __do_const;
64805
64806 static struct nfsd4_operation nfsd4_ops[];
64807
64808diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64809index 15f7b73..00e230b 100644
64810--- a/fs/nfsd/nfs4xdr.c
64811+++ b/fs/nfsd/nfs4xdr.c
64812@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64813
64814 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64815
64816-static nfsd4_dec nfsd4_dec_ops[] = {
64817+static const nfsd4_dec nfsd4_dec_ops[] = {
64818 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64819 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64820 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64821diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64822index 83a9694..6b7f928 100644
64823--- a/fs/nfsd/nfscache.c
64824+++ b/fs/nfsd/nfscache.c
64825@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64826 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64827 u32 hash;
64828 struct nfsd_drc_bucket *b;
64829- int len;
64830+ long len;
64831 size_t bufsize = 0;
64832
64833 if (!rp)
64834@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64835 hash = nfsd_cache_hash(rp->c_xid);
64836 b = &drc_hashtbl[hash];
64837
64838- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64839- len >>= 2;
64840+ if (statp) {
64841+ len = (char*)statp - (char*)resv->iov_base;
64842+ len = resv->iov_len - len;
64843+ len >>= 2;
64844+ }
64845
64846 /* Don't cache excessive amounts of data and XDR failures */
64847- if (!statp || len > (256 >> 2)) {
64848+ if (!statp || len > (256 >> 2) || len < 0) {
64849 nfsd_reply_cache_free(b, rp);
64850 return;
64851 }
64852@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64853 switch (cachetype) {
64854 case RC_REPLSTAT:
64855 if (len != 1)
64856- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64857+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64858 rp->c_replstat = *statp;
64859 break;
64860 case RC_REPLBUFF:
64861diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64862index 5685c67..73029ef 100644
64863--- a/fs/nfsd/vfs.c
64864+++ b/fs/nfsd/vfs.c
64865@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64866
64867 oldfs = get_fs();
64868 set_fs(KERNEL_DS);
64869- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64870+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64871 set_fs(oldfs);
64872 return nfsd_finish_read(file, count, host_err);
64873 }
64874@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64875
64876 /* Write the data. */
64877 oldfs = get_fs(); set_fs(KERNEL_DS);
64878- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64879+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64880 set_fs(oldfs);
64881 if (host_err < 0)
64882 goto out_nfserr;
64883@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64884 */
64885
64886 oldfs = get_fs(); set_fs(KERNEL_DS);
64887- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64888+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64889 set_fs(oldfs);
64890
64891 if (host_err < 0)
64892diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64893index 52ccd34..7a6b202 100644
64894--- a/fs/nls/nls_base.c
64895+++ b/fs/nls/nls_base.c
64896@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64897
64898 int __register_nls(struct nls_table *nls, struct module *owner)
64899 {
64900- struct nls_table ** tmp = &tables;
64901+ struct nls_table *tmp = tables;
64902
64903 if (nls->next)
64904 return -EBUSY;
64905
64906- nls->owner = owner;
64907+ pax_open_kernel();
64908+ *(void **)&nls->owner = owner;
64909+ pax_close_kernel();
64910 spin_lock(&nls_lock);
64911- while (*tmp) {
64912- if (nls == *tmp) {
64913+ while (tmp) {
64914+ if (nls == tmp) {
64915 spin_unlock(&nls_lock);
64916 return -EBUSY;
64917 }
64918- tmp = &(*tmp)->next;
64919+ tmp = tmp->next;
64920 }
64921- nls->next = tables;
64922+ pax_open_kernel();
64923+ *(struct nls_table **)&nls->next = tables;
64924+ pax_close_kernel();
64925 tables = nls;
64926 spin_unlock(&nls_lock);
64927 return 0;
64928@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64929
64930 int unregister_nls(struct nls_table * nls)
64931 {
64932- struct nls_table ** tmp = &tables;
64933+ struct nls_table * const * tmp = &tables;
64934
64935 spin_lock(&nls_lock);
64936 while (*tmp) {
64937 if (nls == *tmp) {
64938- *tmp = nls->next;
64939+ pax_open_kernel();
64940+ *(struct nls_table **)tmp = nls->next;
64941+ pax_close_kernel();
64942 spin_unlock(&nls_lock);
64943 return 0;
64944 }
64945@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64946 return -EINVAL;
64947 }
64948
64949-static struct nls_table *find_nls(char *charset)
64950+static struct nls_table *find_nls(const char *charset)
64951 {
64952 struct nls_table *nls;
64953 spin_lock(&nls_lock);
64954@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64955 return nls;
64956 }
64957
64958-struct nls_table *load_nls(char *charset)
64959+struct nls_table *load_nls(const char *charset)
64960 {
64961 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64962 }
64963diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64964index 162b3f1..6076a7c 100644
64965--- a/fs/nls/nls_euc-jp.c
64966+++ b/fs/nls/nls_euc-jp.c
64967@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64968 p_nls = load_nls("cp932");
64969
64970 if (p_nls) {
64971- table.charset2upper = p_nls->charset2upper;
64972- table.charset2lower = p_nls->charset2lower;
64973+ pax_open_kernel();
64974+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64975+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64976+ pax_close_kernel();
64977 return register_nls(&table);
64978 }
64979
64980diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64981index a80a741..7b96e1b 100644
64982--- a/fs/nls/nls_koi8-ru.c
64983+++ b/fs/nls/nls_koi8-ru.c
64984@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64985 p_nls = load_nls("koi8-u");
64986
64987 if (p_nls) {
64988- table.charset2upper = p_nls->charset2upper;
64989- table.charset2lower = p_nls->charset2lower;
64990+ pax_open_kernel();
64991+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64992+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64993+ pax_close_kernel();
64994 return register_nls(&table);
64995 }
64996
64997diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64998index bff8567..83281c6 100644
64999--- a/fs/notify/fanotify/fanotify_user.c
65000+++ b/fs/notify/fanotify/fanotify_user.c
65001@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65002
65003 fd = fanotify_event_metadata.fd;
65004 ret = -EFAULT;
65005- if (copy_to_user(buf, &fanotify_event_metadata,
65006- fanotify_event_metadata.event_len))
65007+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65008+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65009 goto out_close_fd;
65010
65011 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65012diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65013index a95d8e0..a91a5fd 100644
65014--- a/fs/notify/notification.c
65015+++ b/fs/notify/notification.c
65016@@ -48,7 +48,7 @@
65017 #include <linux/fsnotify_backend.h>
65018 #include "fsnotify.h"
65019
65020-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65021+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65022
65023 /**
65024 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65025@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65026 */
65027 u32 fsnotify_get_cookie(void)
65028 {
65029- return atomic_inc_return(&fsnotify_sync_cookie);
65030+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65031 }
65032 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65033
65034diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65035index 9e38daf..5727cae 100644
65036--- a/fs/ntfs/dir.c
65037+++ b/fs/ntfs/dir.c
65038@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65039 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65040 ~(s64)(ndir->itype.index.block_size - 1)));
65041 /* Bounds checks. */
65042- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65043+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65044 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65045 "inode 0x%lx or driver bug.", vdir->i_ino);
65046 goto err_out;
65047diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65048index 643faa4..ef9027e 100644
65049--- a/fs/ntfs/file.c
65050+++ b/fs/ntfs/file.c
65051@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65052 char *addr;
65053 size_t total = 0;
65054 unsigned len;
65055- int left;
65056+ unsigned left;
65057
65058 do {
65059 len = PAGE_CACHE_SIZE - ofs;
65060diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65061index 9e1e112..241a52a 100644
65062--- a/fs/ntfs/super.c
65063+++ b/fs/ntfs/super.c
65064@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65065 if (!silent)
65066 ntfs_error(sb, "Primary boot sector is invalid.");
65067 } else if (!silent)
65068- ntfs_error(sb, read_err_str, "primary");
65069+ ntfs_error(sb, read_err_str, "%s", "primary");
65070 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65071 if (bh_primary)
65072 brelse(bh_primary);
65073@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65074 goto hotfix_primary_boot_sector;
65075 brelse(bh_backup);
65076 } else if (!silent)
65077- ntfs_error(sb, read_err_str, "backup");
65078+ ntfs_error(sb, read_err_str, "%s", "backup");
65079 /* Try to read NT3.51- backup boot sector. */
65080 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65081 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65082@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65083 "sector.");
65084 brelse(bh_backup);
65085 } else if (!silent)
65086- ntfs_error(sb, read_err_str, "backup");
65087+ ntfs_error(sb, read_err_str, "%s", "backup");
65088 /* We failed. Cleanup and return. */
65089 if (bh_primary)
65090 brelse(bh_primary);
65091diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65092index 0440134..d52c93a 100644
65093--- a/fs/ocfs2/localalloc.c
65094+++ b/fs/ocfs2/localalloc.c
65095@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65096 goto bail;
65097 }
65098
65099- atomic_inc(&osb->alloc_stats.moves);
65100+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65101
65102 bail:
65103 if (handle)
65104diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65105index 7d6b7d0..5fb529a 100644
65106--- a/fs/ocfs2/ocfs2.h
65107+++ b/fs/ocfs2/ocfs2.h
65108@@ -242,11 +242,11 @@ enum ocfs2_vol_state
65109
65110 struct ocfs2_alloc_stats
65111 {
65112- atomic_t moves;
65113- atomic_t local_data;
65114- atomic_t bitmap_data;
65115- atomic_t bg_allocs;
65116- atomic_t bg_extends;
65117+ atomic_unchecked_t moves;
65118+ atomic_unchecked_t local_data;
65119+ atomic_unchecked_t bitmap_data;
65120+ atomic_unchecked_t bg_allocs;
65121+ atomic_unchecked_t bg_extends;
65122 };
65123
65124 enum ocfs2_local_alloc_state
65125diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
65126index d81f6e2..e794c38 100644
65127--- a/fs/ocfs2/refcounttree.c
65128+++ b/fs/ocfs2/refcounttree.c
65129@@ -4278,7 +4278,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
65130 error = posix_acl_create(dir, &mode, &default_acl, &acl);
65131 if (error) {
65132 mlog_errno(error);
65133- goto out;
65134+ return error;
65135 }
65136
65137 error = ocfs2_create_inode_in_orphan(dir, mode,
65138diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65139index 0cb889a..6a26b24 100644
65140--- a/fs/ocfs2/suballoc.c
65141+++ b/fs/ocfs2/suballoc.c
65142@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65143 mlog_errno(status);
65144 goto bail;
65145 }
65146- atomic_inc(&osb->alloc_stats.bg_extends);
65147+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65148
65149 /* You should never ask for this much metadata */
65150 BUG_ON(bits_wanted >
65151@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65152 mlog_errno(status);
65153 goto bail;
65154 }
65155- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65156+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65157
65158 *suballoc_loc = res.sr_bg_blkno;
65159 *suballoc_bit_start = res.sr_bit_offset;
65160@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65161 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65162 res->sr_bits);
65163
65164- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65165+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65166
65167 BUG_ON(res->sr_bits != 1);
65168
65169@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65170 mlog_errno(status);
65171 goto bail;
65172 }
65173- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65174+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65175
65176 BUG_ON(res.sr_bits != 1);
65177
65178@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65179 cluster_start,
65180 num_clusters);
65181 if (!status)
65182- atomic_inc(&osb->alloc_stats.local_data);
65183+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65184 } else {
65185 if (min_clusters > (osb->bitmap_cpg - 1)) {
65186 /* The only paths asking for contiguousness
65187@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65188 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65189 res.sr_bg_blkno,
65190 res.sr_bit_offset);
65191- atomic_inc(&osb->alloc_stats.bitmap_data);
65192+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65193 *num_clusters = res.sr_bits;
65194 }
65195 }
65196diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65197index 8372317..ec86e79 100644
65198--- a/fs/ocfs2/super.c
65199+++ b/fs/ocfs2/super.c
65200@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65201 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65202 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65203 "Stats",
65204- atomic_read(&osb->alloc_stats.bitmap_data),
65205- atomic_read(&osb->alloc_stats.local_data),
65206- atomic_read(&osb->alloc_stats.bg_allocs),
65207- atomic_read(&osb->alloc_stats.moves),
65208- atomic_read(&osb->alloc_stats.bg_extends));
65209+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65210+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65211+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65212+ atomic_read_unchecked(&osb->alloc_stats.moves),
65213+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65214
65215 out += snprintf(buf + out, len - out,
65216 "%10s => State: %u Descriptor: %llu Size: %u bits "
65217@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65218
65219 mutex_init(&osb->system_file_mutex);
65220
65221- atomic_set(&osb->alloc_stats.moves, 0);
65222- atomic_set(&osb->alloc_stats.local_data, 0);
65223- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65224- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65225- atomic_set(&osb->alloc_stats.bg_extends, 0);
65226+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65227+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65228+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65229+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65230+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65231
65232 /* Copy the blockcheck stats from the superblock probe */
65233 osb->osb_ecc_stats = *stats;
65234diff --git a/fs/open.c b/fs/open.c
65235index 813be03..781941d 100644
65236--- a/fs/open.c
65237+++ b/fs/open.c
65238@@ -32,6 +32,8 @@
65239 #include <linux/dnotify.h>
65240 #include <linux/compat.h>
65241
65242+#define CREATE_TRACE_POINTS
65243+#include <trace/events/fs.h>
65244 #include "internal.h"
65245
65246 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65247@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65248 error = locks_verify_truncate(inode, NULL, length);
65249 if (!error)
65250 error = security_path_truncate(path);
65251+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65252+ error = -EACCES;
65253 if (!error)
65254 error = do_truncate(path->dentry, length, 0, NULL);
65255
65256@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65257 error = locks_verify_truncate(inode, f.file, length);
65258 if (!error)
65259 error = security_path_truncate(&f.file->f_path);
65260+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65261+ error = -EACCES;
65262 if (!error)
65263 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65264 sb_end_write(inode->i_sb);
65265@@ -392,6 +398,9 @@ retry:
65266 if (__mnt_is_readonly(path.mnt))
65267 res = -EROFS;
65268
65269+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65270+ res = -EACCES;
65271+
65272 out_path_release:
65273 path_put(&path);
65274 if (retry_estale(res, lookup_flags)) {
65275@@ -423,6 +432,8 @@ retry:
65276 if (error)
65277 goto dput_and_out;
65278
65279+ gr_log_chdir(path.dentry, path.mnt);
65280+
65281 set_fs_pwd(current->fs, &path);
65282
65283 dput_and_out:
65284@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65285 goto out_putf;
65286
65287 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65288+
65289+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65290+ error = -EPERM;
65291+
65292+ if (!error)
65293+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65294+
65295 if (!error)
65296 set_fs_pwd(current->fs, &f.file->f_path);
65297 out_putf:
65298@@ -481,7 +499,13 @@ retry:
65299 if (error)
65300 goto dput_and_out;
65301
65302+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65303+ goto dput_and_out;
65304+
65305 set_fs_root(current->fs, &path);
65306+
65307+ gr_handle_chroot_chdir(&path);
65308+
65309 error = 0;
65310 dput_and_out:
65311 path_put(&path);
65312@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65313 return error;
65314 retry_deleg:
65315 mutex_lock(&inode->i_mutex);
65316+
65317+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65318+ error = -EACCES;
65319+ goto out_unlock;
65320+ }
65321+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65322+ error = -EACCES;
65323+ goto out_unlock;
65324+ }
65325+
65326 error = security_path_chmod(path, mode);
65327 if (error)
65328 goto out_unlock;
65329@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65330 uid = make_kuid(current_user_ns(), user);
65331 gid = make_kgid(current_user_ns(), group);
65332
65333+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65334+ return -EACCES;
65335+
65336 newattrs.ia_valid = ATTR_CTIME;
65337 if (user != (uid_t) -1) {
65338 if (!uid_valid(uid))
65339@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65340 } else {
65341 fsnotify_open(f);
65342 fd_install(fd, f);
65343+ trace_do_sys_open(tmp->name, flags, mode);
65344 }
65345 }
65346 putname(tmp);
65347diff --git a/fs/pipe.c b/fs/pipe.c
65348index 21981e5..3d5f55c 100644
65349--- a/fs/pipe.c
65350+++ b/fs/pipe.c
65351@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65352
65353 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65354 {
65355- if (pipe->files)
65356+ if (atomic_read(&pipe->files))
65357 mutex_lock_nested(&pipe->mutex, subclass);
65358 }
65359
65360@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65361
65362 void pipe_unlock(struct pipe_inode_info *pipe)
65363 {
65364- if (pipe->files)
65365+ if (atomic_read(&pipe->files))
65366 mutex_unlock(&pipe->mutex);
65367 }
65368 EXPORT_SYMBOL(pipe_unlock);
65369@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65370 }
65371 if (bufs) /* More to do? */
65372 continue;
65373- if (!pipe->writers)
65374+ if (!atomic_read(&pipe->writers))
65375 break;
65376- if (!pipe->waiting_writers) {
65377+ if (!atomic_read(&pipe->waiting_writers)) {
65378 /* syscall merging: Usually we must not sleep
65379 * if O_NONBLOCK is set, or if we got some data.
65380 * But if a writer sleeps in kernel space, then
65381@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65382
65383 __pipe_lock(pipe);
65384
65385- if (!pipe->readers) {
65386+ if (!atomic_read(&pipe->readers)) {
65387 send_sig(SIGPIPE, current, 0);
65388 ret = -EPIPE;
65389 goto out;
65390@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65391 for (;;) {
65392 int bufs;
65393
65394- if (!pipe->readers) {
65395+ if (!atomic_read(&pipe->readers)) {
65396 send_sig(SIGPIPE, current, 0);
65397 if (!ret)
65398 ret = -EPIPE;
65399@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65400 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65401 do_wakeup = 0;
65402 }
65403- pipe->waiting_writers++;
65404+ atomic_inc(&pipe->waiting_writers);
65405 pipe_wait(pipe);
65406- pipe->waiting_writers--;
65407+ atomic_dec(&pipe->waiting_writers);
65408 }
65409 out:
65410 __pipe_unlock(pipe);
65411@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65412 mask = 0;
65413 if (filp->f_mode & FMODE_READ) {
65414 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65415- if (!pipe->writers && filp->f_version != pipe->w_counter)
65416+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65417 mask |= POLLHUP;
65418 }
65419
65420@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65421 * Most Unices do not set POLLERR for FIFOs but on Linux they
65422 * behave exactly like pipes for poll().
65423 */
65424- if (!pipe->readers)
65425+ if (!atomic_read(&pipe->readers))
65426 mask |= POLLERR;
65427 }
65428
65429@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65430 int kill = 0;
65431
65432 spin_lock(&inode->i_lock);
65433- if (!--pipe->files) {
65434+ if (atomic_dec_and_test(&pipe->files)) {
65435 inode->i_pipe = NULL;
65436 kill = 1;
65437 }
65438@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65439
65440 __pipe_lock(pipe);
65441 if (file->f_mode & FMODE_READ)
65442- pipe->readers--;
65443+ atomic_dec(&pipe->readers);
65444 if (file->f_mode & FMODE_WRITE)
65445- pipe->writers--;
65446+ atomic_dec(&pipe->writers);
65447
65448- if (pipe->readers || pipe->writers) {
65449+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65450 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65451 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65452 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65453@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65454 kfree(pipe);
65455 }
65456
65457-static struct vfsmount *pipe_mnt __read_mostly;
65458+struct vfsmount *pipe_mnt __read_mostly;
65459
65460 /*
65461 * pipefs_dname() is called from d_path().
65462@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65463 goto fail_iput;
65464
65465 inode->i_pipe = pipe;
65466- pipe->files = 2;
65467- pipe->readers = pipe->writers = 1;
65468+ atomic_set(&pipe->files, 2);
65469+ atomic_set(&pipe->readers, 1);
65470+ atomic_set(&pipe->writers, 1);
65471 inode->i_fop = &pipefifo_fops;
65472
65473 /*
65474@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65475 spin_lock(&inode->i_lock);
65476 if (inode->i_pipe) {
65477 pipe = inode->i_pipe;
65478- pipe->files++;
65479+ atomic_inc(&pipe->files);
65480 spin_unlock(&inode->i_lock);
65481 } else {
65482 spin_unlock(&inode->i_lock);
65483 pipe = alloc_pipe_info();
65484 if (!pipe)
65485 return -ENOMEM;
65486- pipe->files = 1;
65487+ atomic_set(&pipe->files, 1);
65488 spin_lock(&inode->i_lock);
65489 if (unlikely(inode->i_pipe)) {
65490- inode->i_pipe->files++;
65491+ atomic_inc(&inode->i_pipe->files);
65492 spin_unlock(&inode->i_lock);
65493 free_pipe_info(pipe);
65494 pipe = inode->i_pipe;
65495@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65496 * opened, even when there is no process writing the FIFO.
65497 */
65498 pipe->r_counter++;
65499- if (pipe->readers++ == 0)
65500+ if (atomic_inc_return(&pipe->readers) == 1)
65501 wake_up_partner(pipe);
65502
65503- if (!is_pipe && !pipe->writers) {
65504+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65505 if ((filp->f_flags & O_NONBLOCK)) {
65506 /* suppress POLLHUP until we have
65507 * seen a writer */
65508@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65509 * errno=ENXIO when there is no process reading the FIFO.
65510 */
65511 ret = -ENXIO;
65512- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65513+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65514 goto err;
65515
65516 pipe->w_counter++;
65517- if (!pipe->writers++)
65518+ if (atomic_inc_return(&pipe->writers) == 1)
65519 wake_up_partner(pipe);
65520
65521- if (!is_pipe && !pipe->readers) {
65522+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65523 if (wait_for_partner(pipe, &pipe->r_counter))
65524 goto err_wr;
65525 }
65526@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65527 * the process can at least talk to itself.
65528 */
65529
65530- pipe->readers++;
65531- pipe->writers++;
65532+ atomic_inc(&pipe->readers);
65533+ atomic_inc(&pipe->writers);
65534 pipe->r_counter++;
65535 pipe->w_counter++;
65536- if (pipe->readers == 1 || pipe->writers == 1)
65537+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65538 wake_up_partner(pipe);
65539 break;
65540
65541@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65542 return 0;
65543
65544 err_rd:
65545- if (!--pipe->readers)
65546+ if (atomic_dec_and_test(&pipe->readers))
65547 wake_up_interruptible(&pipe->wait);
65548 ret = -ERESTARTSYS;
65549 goto err;
65550
65551 err_wr:
65552- if (!--pipe->writers)
65553+ if (atomic_dec_and_test(&pipe->writers))
65554 wake_up_interruptible(&pipe->wait);
65555 ret = -ERESTARTSYS;
65556 goto err;
65557diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65558index 0855f77..6787d50 100644
65559--- a/fs/posix_acl.c
65560+++ b/fs/posix_acl.c
65561@@ -20,6 +20,7 @@
65562 #include <linux/xattr.h>
65563 #include <linux/export.h>
65564 #include <linux/user_namespace.h>
65565+#include <linux/grsecurity.h>
65566
65567 struct posix_acl **acl_by_type(struct inode *inode, int type)
65568 {
65569@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65570 }
65571 }
65572 if (mode_p)
65573- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65574+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65575 return not_equiv;
65576 }
65577 EXPORT_SYMBOL(posix_acl_equiv_mode);
65578@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65579 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65580 }
65581
65582- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65583+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65584 return not_equiv;
65585 }
65586
65587@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65588 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65589 int err = -ENOMEM;
65590 if (clone) {
65591+ *mode_p &= ~gr_acl_umask();
65592+
65593 err = posix_acl_create_masq(clone, mode_p);
65594 if (err < 0) {
65595 posix_acl_release(clone);
65596@@ -659,11 +662,12 @@ struct posix_acl *
65597 posix_acl_from_xattr(struct user_namespace *user_ns,
65598 const void *value, size_t size)
65599 {
65600- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65601- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65602+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65603+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65604 int count;
65605 struct posix_acl *acl;
65606 struct posix_acl_entry *acl_e;
65607+ umode_t umask = gr_acl_umask();
65608
65609 if (!value)
65610 return NULL;
65611@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65612
65613 switch(acl_e->e_tag) {
65614 case ACL_USER_OBJ:
65615+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65616+ break;
65617 case ACL_GROUP_OBJ:
65618 case ACL_MASK:
65619+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65620+ break;
65621 case ACL_OTHER:
65622+ acl_e->e_perm &= ~(umask & S_IRWXO);
65623 break;
65624
65625 case ACL_USER:
65626+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65627 acl_e->e_uid =
65628 make_kuid(user_ns,
65629 le32_to_cpu(entry->e_id));
65630@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65631 goto fail;
65632 break;
65633 case ACL_GROUP:
65634+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65635 acl_e->e_gid =
65636 make_kgid(user_ns,
65637 le32_to_cpu(entry->e_id));
65638diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65639index 2183fcf..3c32a98 100644
65640--- a/fs/proc/Kconfig
65641+++ b/fs/proc/Kconfig
65642@@ -30,7 +30,7 @@ config PROC_FS
65643
65644 config PROC_KCORE
65645 bool "/proc/kcore support" if !ARM
65646- depends on PROC_FS && MMU
65647+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65648 help
65649 Provides a virtual ELF core file of the live kernel. This can
65650 be read with gdb and other ELF tools. No modifications can be
65651@@ -38,8 +38,8 @@ config PROC_KCORE
65652
65653 config PROC_VMCORE
65654 bool "/proc/vmcore support"
65655- depends on PROC_FS && CRASH_DUMP
65656- default y
65657+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65658+ default n
65659 help
65660 Exports the dump image of crashed kernel in ELF format.
65661
65662@@ -63,8 +63,8 @@ config PROC_SYSCTL
65663 limited in memory.
65664
65665 config PROC_PAGE_MONITOR
65666- default y
65667- depends on PROC_FS && MMU
65668+ default n
65669+ depends on PROC_FS && MMU && !GRKERNSEC
65670 bool "Enable /proc page monitoring" if EXPERT
65671 help
65672 Various /proc files exist to monitor process memory utilization:
65673diff --git a/fs/proc/array.c b/fs/proc/array.c
65674index bd117d0..e6872d7 100644
65675--- a/fs/proc/array.c
65676+++ b/fs/proc/array.c
65677@@ -60,6 +60,7 @@
65678 #include <linux/tty.h>
65679 #include <linux/string.h>
65680 #include <linux/mman.h>
65681+#include <linux/grsecurity.h>
65682 #include <linux/proc_fs.h>
65683 #include <linux/ioport.h>
65684 #include <linux/uaccess.h>
65685@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65686 seq_putc(m, '\n');
65687 }
65688
65689+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65690+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65691+{
65692+ if (p->mm)
65693+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65694+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65695+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65696+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65697+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65698+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65699+ else
65700+ seq_printf(m, "PaX:\t-----\n");
65701+}
65702+#endif
65703+
65704 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65705 struct pid *pid, struct task_struct *task)
65706 {
65707@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65708 task_cpus_allowed(m, task);
65709 cpuset_task_status_allowed(m, task);
65710 task_context_switch_counts(m, task);
65711+
65712+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65713+ task_pax(m, task);
65714+#endif
65715+
65716+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65717+ task_grsec_rbac(m, task);
65718+#endif
65719+
65720 return 0;
65721 }
65722
65723+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65724+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65725+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65726+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65727+#endif
65728+
65729 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65730 struct pid *pid, struct task_struct *task, int whole)
65731 {
65732@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65733 char tcomm[sizeof(task->comm)];
65734 unsigned long flags;
65735
65736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65737+ if (current->exec_id != m->exec_id) {
65738+ gr_log_badprocpid("stat");
65739+ return 0;
65740+ }
65741+#endif
65742+
65743 state = *get_task_state(task);
65744 vsize = eip = esp = 0;
65745 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65746@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65747 gtime = task_gtime(task);
65748 }
65749
65750+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65751+ if (PAX_RAND_FLAGS(mm)) {
65752+ eip = 0;
65753+ esp = 0;
65754+ wchan = 0;
65755+ }
65756+#endif
65757+#ifdef CONFIG_GRKERNSEC_HIDESYM
65758+ wchan = 0;
65759+ eip =0;
65760+ esp =0;
65761+#endif
65762+
65763 /* scale priority and nice values from timeslices to -20..20 */
65764 /* to make it look like a "normal" Unix priority/nice value */
65765 priority = task_prio(task);
65766@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65767 seq_put_decimal_ull(m, ' ', vsize);
65768 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65769 seq_put_decimal_ull(m, ' ', rsslim);
65770+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65771+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65772+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65773+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65774+#else
65775 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65776 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65777 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65778+#endif
65779 seq_put_decimal_ull(m, ' ', esp);
65780 seq_put_decimal_ull(m, ' ', eip);
65781 /* The signal information here is obsolete.
65782@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65783 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65784 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65785
65786- if (mm && permitted) {
65787+ if (mm && permitted
65788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65789+ && !PAX_RAND_FLAGS(mm)
65790+#endif
65791+ ) {
65792 seq_put_decimal_ull(m, ' ', mm->start_data);
65793 seq_put_decimal_ull(m, ' ', mm->end_data);
65794 seq_put_decimal_ull(m, ' ', mm->start_brk);
65795@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65796 struct pid *pid, struct task_struct *task)
65797 {
65798 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65799- struct mm_struct *mm = get_task_mm(task);
65800+ struct mm_struct *mm;
65801
65802+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65803+ if (current->exec_id != m->exec_id) {
65804+ gr_log_badprocpid("statm");
65805+ return 0;
65806+ }
65807+#endif
65808+ mm = get_task_mm(task);
65809 if (mm) {
65810 size = task_statm(mm, &shared, &text, &data, &resident);
65811 mmput(mm);
65812@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65813 return 0;
65814 }
65815
65816+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65817+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65818+{
65819+ unsigned long flags;
65820+ u32 curr_ip = 0;
65821+
65822+ if (lock_task_sighand(task, &flags)) {
65823+ curr_ip = task->signal->curr_ip;
65824+ unlock_task_sighand(task, &flags);
65825+ }
65826+ return seq_printf(m, "%pI4\n", &curr_ip);
65827+}
65828+#endif
65829+
65830 #ifdef CONFIG_CHECKPOINT_RESTORE
65831 static struct pid *
65832 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65833diff --git a/fs/proc/base.c b/fs/proc/base.c
65834index 3f3d7ae..68de109 100644
65835--- a/fs/proc/base.c
65836+++ b/fs/proc/base.c
65837@@ -113,6 +113,14 @@ struct pid_entry {
65838 union proc_op op;
65839 };
65840
65841+struct getdents_callback {
65842+ struct linux_dirent __user * current_dir;
65843+ struct linux_dirent __user * previous;
65844+ struct file * file;
65845+ int count;
65846+ int error;
65847+};
65848+
65849 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65850 .name = (NAME), \
65851 .len = sizeof(NAME) - 1, \
65852@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65853 return 0;
65854 }
65855
65856+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65857+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65858+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65859+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65860+#endif
65861+
65862 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65863 struct pid *pid, struct task_struct *task)
65864 {
65865 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65866 if (mm && !IS_ERR(mm)) {
65867 unsigned int nwords = 0;
65868+
65869+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65870+ /* allow if we're currently ptracing this task */
65871+ if (PAX_RAND_FLAGS(mm) &&
65872+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65873+ mmput(mm);
65874+ return 0;
65875+ }
65876+#endif
65877+
65878 do {
65879 nwords += 2;
65880 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65881@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65882 }
65883
65884
65885-#ifdef CONFIG_KALLSYMS
65886+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65887 /*
65888 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65889 * Returns the resolved symbol. If that fails, simply return the address.
65890@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65891 mutex_unlock(&task->signal->cred_guard_mutex);
65892 }
65893
65894-#ifdef CONFIG_STACKTRACE
65895+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65896
65897 #define MAX_STACK_TRACE_DEPTH 64
65898
65899@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65900 return 0;
65901 }
65902
65903-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65904+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65905 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65906 struct pid *pid, struct task_struct *task)
65907 {
65908@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65909 /************************************************************************/
65910
65911 /* permission checks */
65912-static int proc_fd_access_allowed(struct inode *inode)
65913+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65914 {
65915 struct task_struct *task;
65916 int allowed = 0;
65917@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65918 */
65919 task = get_proc_task(inode);
65920 if (task) {
65921- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65922+ if (log)
65923+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65924+ else
65925+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65926 put_task_struct(task);
65927 }
65928 return allowed;
65929@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65930 struct task_struct *task,
65931 int hide_pid_min)
65932 {
65933+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65934+ return false;
65935+
65936+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65937+ rcu_read_lock();
65938+ {
65939+ const struct cred *tmpcred = current_cred();
65940+ const struct cred *cred = __task_cred(task);
65941+
65942+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65943+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65944+ || in_group_p(grsec_proc_gid)
65945+#endif
65946+ ) {
65947+ rcu_read_unlock();
65948+ return true;
65949+ }
65950+ }
65951+ rcu_read_unlock();
65952+
65953+ if (!pid->hide_pid)
65954+ return false;
65955+#endif
65956+
65957 if (pid->hide_pid < hide_pid_min)
65958 return true;
65959 if (in_group_p(pid->pid_gid))
65960 return true;
65961+
65962 return ptrace_may_access(task, PTRACE_MODE_READ);
65963 }
65964
65965@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65966 put_task_struct(task);
65967
65968 if (!has_perms) {
65969+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65970+ {
65971+#else
65972 if (pid->hide_pid == 2) {
65973+#endif
65974 /*
65975 * Let's make getdents(), stat(), and open()
65976 * consistent with each other. If a process
65977@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65978
65979 if (task) {
65980 mm = mm_access(task, mode);
65981+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65982+ mmput(mm);
65983+ mm = ERR_PTR(-EPERM);
65984+ }
65985 put_task_struct(task);
65986
65987 if (!IS_ERR_OR_NULL(mm)) {
65988@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65989 return PTR_ERR(mm);
65990
65991 file->private_data = mm;
65992+
65993+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65994+ file->f_version = current->exec_id;
65995+#endif
65996+
65997 return 0;
65998 }
65999
66000@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66001 ssize_t copied;
66002 char *page;
66003
66004+#ifdef CONFIG_GRKERNSEC
66005+ if (write)
66006+ return -EPERM;
66007+#endif
66008+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66009+ if (file->f_version != current->exec_id) {
66010+ gr_log_badprocpid("mem");
66011+ return 0;
66012+ }
66013+#endif
66014+
66015 if (!mm)
66016 return 0;
66017
66018@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66019 goto free;
66020
66021 while (count > 0) {
66022- int this_len = min_t(int, count, PAGE_SIZE);
66023+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66024
66025 if (write && copy_from_user(page, buf, this_len)) {
66026 copied = -EFAULT;
66027@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66028 if (!mm)
66029 return 0;
66030
66031+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66032+ if (file->f_version != current->exec_id) {
66033+ gr_log_badprocpid("environ");
66034+ return 0;
66035+ }
66036+#endif
66037+
66038 page = (char *)__get_free_page(GFP_TEMPORARY);
66039 if (!page)
66040 return -ENOMEM;
66041@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66042 goto free;
66043 while (count > 0) {
66044 size_t this_len, max_len;
66045- int retval;
66046+ ssize_t retval;
66047
66048 if (src >= (mm->env_end - mm->env_start))
66049 break;
66050@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66051 int error = -EACCES;
66052
66053 /* Are we allowed to snoop on the tasks file descriptors? */
66054- if (!proc_fd_access_allowed(inode))
66055+ if (!proc_fd_access_allowed(inode, 0))
66056 goto out;
66057
66058 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66059@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66060 struct path path;
66061
66062 /* Are we allowed to snoop on the tasks file descriptors? */
66063- if (!proc_fd_access_allowed(inode))
66064- goto out;
66065+ /* logging this is needed for learning on chromium to work properly,
66066+ but we don't want to flood the logs from 'ps' which does a readlink
66067+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66068+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66069+ */
66070+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66071+ if (!proc_fd_access_allowed(inode,0))
66072+ goto out;
66073+ } else {
66074+ if (!proc_fd_access_allowed(inode,1))
66075+ goto out;
66076+ }
66077
66078 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66079 if (error)
66080@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66081 rcu_read_lock();
66082 cred = __task_cred(task);
66083 inode->i_uid = cred->euid;
66084+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66085+ inode->i_gid = grsec_proc_gid;
66086+#else
66087 inode->i_gid = cred->egid;
66088+#endif
66089 rcu_read_unlock();
66090 }
66091 security_task_to_inode(task, inode);
66092@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66093 return -ENOENT;
66094 }
66095 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66096+#ifdef CONFIG_GRKERNSEC_PROC_USER
66097+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66098+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66099+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66100+#endif
66101 task_dumpable(task)) {
66102 cred = __task_cred(task);
66103 stat->uid = cred->euid;
66104+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66105+ stat->gid = grsec_proc_gid;
66106+#else
66107 stat->gid = cred->egid;
66108+#endif
66109 }
66110 }
66111 rcu_read_unlock();
66112@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66113
66114 if (task) {
66115 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66116+#ifdef CONFIG_GRKERNSEC_PROC_USER
66117+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66118+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66119+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66120+#endif
66121 task_dumpable(task)) {
66122 rcu_read_lock();
66123 cred = __task_cred(task);
66124 inode->i_uid = cred->euid;
66125+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66126+ inode->i_gid = grsec_proc_gid;
66127+#else
66128 inode->i_gid = cred->egid;
66129+#endif
66130 rcu_read_unlock();
66131 } else {
66132 inode->i_uid = GLOBAL_ROOT_UID;
66133@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66134 if (!task)
66135 goto out_no_task;
66136
66137+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66138+ goto out;
66139+
66140 /*
66141 * Yes, it does not scale. And it should not. Don't add
66142 * new entries into /proc/<tgid>/ without very good reasons.
66143@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66144 if (!task)
66145 return -ENOENT;
66146
66147+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66148+ goto out;
66149+
66150 if (!dir_emit_dots(file, ctx))
66151 goto out;
66152
66153@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66154 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66155 #endif
66156 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66157-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66158+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66159 ONE("syscall", S_IRUSR, proc_pid_syscall),
66160 #endif
66161 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66162@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66163 #ifdef CONFIG_SECURITY
66164 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66165 #endif
66166-#ifdef CONFIG_KALLSYMS
66167+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66168 ONE("wchan", S_IRUGO, proc_pid_wchan),
66169 #endif
66170-#ifdef CONFIG_STACKTRACE
66171+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66172 ONE("stack", S_IRUSR, proc_pid_stack),
66173 #endif
66174 #ifdef CONFIG_SCHEDSTATS
66175@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66176 #ifdef CONFIG_HARDWALL
66177 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66178 #endif
66179+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66180+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66181+#endif
66182 #ifdef CONFIG_USER_NS
66183 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66184 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66185@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
66186 if (!inode)
66187 goto out;
66188
66189+#ifdef CONFIG_GRKERNSEC_PROC_USER
66190+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66191+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66192+ inode->i_gid = grsec_proc_gid;
66193+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66194+#else
66195 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66196+#endif
66197 inode->i_op = &proc_tgid_base_inode_operations;
66198 inode->i_fop = &proc_tgid_base_operations;
66199 inode->i_flags|=S_IMMUTABLE;
66200@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66201 if (!task)
66202 goto out;
66203
66204+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66205+ goto out_put_task;
66206+
66207 result = proc_pid_instantiate(dir, dentry, task, NULL);
66208+out_put_task:
66209 put_task_struct(task);
66210 out:
66211 return ERR_PTR(result);
66212@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66213 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66214 #endif
66215 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66216-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66217+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66218 ONE("syscall", S_IRUSR, proc_pid_syscall),
66219 #endif
66220 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66221@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66222 #ifdef CONFIG_SECURITY
66223 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66224 #endif
66225-#ifdef CONFIG_KALLSYMS
66226+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66227 ONE("wchan", S_IRUGO, proc_pid_wchan),
66228 #endif
66229-#ifdef CONFIG_STACKTRACE
66230+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66231 ONE("stack", S_IRUSR, proc_pid_stack),
66232 #endif
66233 #ifdef CONFIG_SCHEDSTATS
66234diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66235index cbd82df..c0407d2 100644
66236--- a/fs/proc/cmdline.c
66237+++ b/fs/proc/cmdline.c
66238@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66239
66240 static int __init proc_cmdline_init(void)
66241 {
66242+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66243+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66244+#else
66245 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66246+#endif
66247 return 0;
66248 }
66249 fs_initcall(proc_cmdline_init);
66250diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66251index 50493ed..248166b 100644
66252--- a/fs/proc/devices.c
66253+++ b/fs/proc/devices.c
66254@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66255
66256 static int __init proc_devices_init(void)
66257 {
66258+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66259+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66260+#else
66261 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66262+#endif
66263 return 0;
66264 }
66265 fs_initcall(proc_devices_init);
66266diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66267index 8e5ad83..1f07a8c 100644
66268--- a/fs/proc/fd.c
66269+++ b/fs/proc/fd.c
66270@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66271 if (!task)
66272 return -ENOENT;
66273
66274- files = get_files_struct(task);
66275+ if (!gr_acl_handle_procpidmem(task))
66276+ files = get_files_struct(task);
66277 put_task_struct(task);
66278
66279 if (files) {
66280@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66281 */
66282 int proc_fd_permission(struct inode *inode, int mask)
66283 {
66284+ struct task_struct *task;
66285 int rv = generic_permission(inode, mask);
66286- if (rv == 0)
66287- return 0;
66288+
66289 if (task_tgid(current) == proc_pid(inode))
66290 rv = 0;
66291+
66292+ task = get_proc_task(inode);
66293+ if (task == NULL)
66294+ return rv;
66295+
66296+ if (gr_acl_handle_procpidmem(task))
66297+ rv = -EACCES;
66298+
66299+ put_task_struct(task);
66300+
66301 return rv;
66302 }
66303
66304diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66305index b502bba..849e216 100644
66306--- a/fs/proc/generic.c
66307+++ b/fs/proc/generic.c
66308@@ -22,6 +22,7 @@
66309 #include <linux/bitops.h>
66310 #include <linux/spinlock.h>
66311 #include <linux/completion.h>
66312+#include <linux/grsecurity.h>
66313 #include <asm/uaccess.h>
66314
66315 #include "internal.h"
66316@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66317 return proc_lookup_de(PDE(dir), dir, dentry);
66318 }
66319
66320+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66321+ unsigned int flags)
66322+{
66323+ if (gr_proc_is_restricted())
66324+ return ERR_PTR(-EACCES);
66325+
66326+ return proc_lookup_de(PDE(dir), dir, dentry);
66327+}
66328+
66329 /*
66330 * This returns non-zero if at EOF, so that the /proc
66331 * root directory can use this and check if it should
66332@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66333 return proc_readdir_de(PDE(inode), file, ctx);
66334 }
66335
66336+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66337+{
66338+ struct inode *inode = file_inode(file);
66339+
66340+ if (gr_proc_is_restricted())
66341+ return -EACCES;
66342+
66343+ return proc_readdir_de(PDE(inode), file, ctx);
66344+}
66345+
66346 /*
66347 * These are the generic /proc directory operations. They
66348 * use the in-memory "struct proc_dir_entry" tree to parse
66349@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66350 .iterate = proc_readdir,
66351 };
66352
66353+static const struct file_operations proc_dir_restricted_operations = {
66354+ .llseek = generic_file_llseek,
66355+ .read = generic_read_dir,
66356+ .iterate = proc_readdir_restrict,
66357+};
66358+
66359 /*
66360 * proc directories can do almost nothing..
66361 */
66362@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66363 .setattr = proc_notify_change,
66364 };
66365
66366+static const struct inode_operations proc_dir_restricted_inode_operations = {
66367+ .lookup = proc_lookup_restrict,
66368+ .getattr = proc_getattr,
66369+ .setattr = proc_notify_change,
66370+};
66371+
66372 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66373 {
66374 int ret;
66375@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66376 return ret;
66377
66378 if (S_ISDIR(dp->mode)) {
66379- dp->proc_fops = &proc_dir_operations;
66380- dp->proc_iops = &proc_dir_inode_operations;
66381+ if (dp->restricted) {
66382+ dp->proc_fops = &proc_dir_restricted_operations;
66383+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66384+ } else {
66385+ dp->proc_fops = &proc_dir_operations;
66386+ dp->proc_iops = &proc_dir_inode_operations;
66387+ }
66388 dir->nlink++;
66389 } else if (S_ISLNK(dp->mode)) {
66390 dp->proc_iops = &proc_link_inode_operations;
66391@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66392 }
66393 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66394
66395+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66396+ struct proc_dir_entry *parent, void *data)
66397+{
66398+ struct proc_dir_entry *ent;
66399+
66400+ if (mode == 0)
66401+ mode = S_IRUGO | S_IXUGO;
66402+
66403+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66404+ if (ent) {
66405+ ent->data = data;
66406+ ent->restricted = 1;
66407+ if (proc_register(parent, ent) < 0) {
66408+ kfree(ent);
66409+ ent = NULL;
66410+ }
66411+ }
66412+ return ent;
66413+}
66414+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66415+
66416 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66417 struct proc_dir_entry *parent)
66418 {
66419@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66420 }
66421 EXPORT_SYMBOL(proc_mkdir);
66422
66423+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66424+ struct proc_dir_entry *parent)
66425+{
66426+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66427+}
66428+EXPORT_SYMBOL(proc_mkdir_restrict);
66429+
66430 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66431 struct proc_dir_entry *parent,
66432 const struct file_operations *proc_fops,
66433diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66434index 3b0f838..a0e0f63e 100644
66435--- a/fs/proc/inode.c
66436+++ b/fs/proc/inode.c
66437@@ -24,11 +24,17 @@
66438 #include <linux/mount.h>
66439 #include <linux/magic.h>
66440 #include <linux/namei.h>
66441+#include <linux/grsecurity.h>
66442
66443 #include <asm/uaccess.h>
66444
66445 #include "internal.h"
66446
66447+#ifdef CONFIG_PROC_SYSCTL
66448+extern const struct inode_operations proc_sys_inode_operations;
66449+extern const struct inode_operations proc_sys_dir_operations;
66450+#endif
66451+
66452 static void proc_evict_inode(struct inode *inode)
66453 {
66454 struct proc_dir_entry *de;
66455@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66456 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66457 sysctl_head_put(head);
66458 }
66459+
66460+#ifdef CONFIG_PROC_SYSCTL
66461+ if (inode->i_op == &proc_sys_inode_operations ||
66462+ inode->i_op == &proc_sys_dir_operations)
66463+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66464+#endif
66465+
66466 }
66467
66468 static struct kmem_cache * proc_inode_cachep;
66469@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66470 if (de->mode) {
66471 inode->i_mode = de->mode;
66472 inode->i_uid = de->uid;
66473+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66474+ inode->i_gid = grsec_proc_gid;
66475+#else
66476 inode->i_gid = de->gid;
66477+#endif
66478 }
66479 if (de->size)
66480 inode->i_size = de->size;
66481diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66482index c835b94..c9e01a3 100644
66483--- a/fs/proc/internal.h
66484+++ b/fs/proc/internal.h
66485@@ -47,9 +47,10 @@ struct proc_dir_entry {
66486 struct completion *pde_unload_completion;
66487 struct list_head pde_openers; /* who did ->open, but not ->release */
66488 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66489+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66490 u8 namelen;
66491 char name[];
66492-};
66493+} __randomize_layout;
66494
66495 union proc_op {
66496 int (*proc_get_link)(struct dentry *, struct path *);
66497@@ -67,7 +68,7 @@ struct proc_inode {
66498 struct ctl_table *sysctl_entry;
66499 const struct proc_ns_operations *ns_ops;
66500 struct inode vfs_inode;
66501-};
66502+} __randomize_layout;
66503
66504 /*
66505 * General functions
66506@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66507 struct pid *, struct task_struct *);
66508 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66509 struct pid *, struct task_struct *);
66510+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66511+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66512+ struct pid *, struct task_struct *);
66513+#endif
66514
66515 /*
66516 * base.c
66517@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66518 * generic.c
66519 */
66520 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66521+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66522 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66523 struct dentry *);
66524 extern int proc_readdir(struct file *, struct dir_context *);
66525+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66526 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66527
66528 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66529diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66530index a352d57..cb94a5c 100644
66531--- a/fs/proc/interrupts.c
66532+++ b/fs/proc/interrupts.c
66533@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66534
66535 static int __init proc_interrupts_init(void)
66536 {
66537+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66538+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66539+#else
66540 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66541+#endif
66542 return 0;
66543 }
66544 fs_initcall(proc_interrupts_init);
66545diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66546index 91a4e64..cb007c0 100644
66547--- a/fs/proc/kcore.c
66548+++ b/fs/proc/kcore.c
66549@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66550 * the addresses in the elf_phdr on our list.
66551 */
66552 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66553- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66554+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66555+ if (tsz > buflen)
66556 tsz = buflen;
66557-
66558+
66559 while (buflen) {
66560 struct kcore_list *m;
66561
66562@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66563 kfree(elf_buf);
66564 } else {
66565 if (kern_addr_valid(start)) {
66566- unsigned long n;
66567+ char *elf_buf;
66568+ mm_segment_t oldfs;
66569
66570- n = copy_to_user(buffer, (char *)start, tsz);
66571- /*
66572- * We cannot distinguish between fault on source
66573- * and fault on destination. When this happens
66574- * we clear too and hope it will trigger the
66575- * EFAULT again.
66576- */
66577- if (n) {
66578- if (clear_user(buffer + tsz - n,
66579- n))
66580+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66581+ if (!elf_buf)
66582+ return -ENOMEM;
66583+ oldfs = get_fs();
66584+ set_fs(KERNEL_DS);
66585+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66586+ set_fs(oldfs);
66587+ if (copy_to_user(buffer, elf_buf, tsz)) {
66588+ kfree(elf_buf);
66589 return -EFAULT;
66590+ }
66591 }
66592+ set_fs(oldfs);
66593+ kfree(elf_buf);
66594 } else {
66595 if (clear_user(buffer, tsz))
66596 return -EFAULT;
66597@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66598
66599 static int open_kcore(struct inode *inode, struct file *filp)
66600 {
66601+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66602+ return -EPERM;
66603+#endif
66604 if (!capable(CAP_SYS_RAWIO))
66605 return -EPERM;
66606 if (kcore_need_update)
66607diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66608index d3ebf2e..6ad42d1 100644
66609--- a/fs/proc/meminfo.c
66610+++ b/fs/proc/meminfo.c
66611@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66612 vmi.used >> 10,
66613 vmi.largest_chunk >> 10
66614 #ifdef CONFIG_MEMORY_FAILURE
66615- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66616+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66617 #endif
66618 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66619 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66620diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66621index d4a3574..b421ce9 100644
66622--- a/fs/proc/nommu.c
66623+++ b/fs/proc/nommu.c
66624@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66625
66626 if (file) {
66627 seq_pad(m, ' ');
66628- seq_path(m, &file->f_path, "");
66629+ seq_path(m, &file->f_path, "\n\\");
66630 }
66631
66632 seq_putc(m, '\n');
66633diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66634index 1bde894..22ac7eb 100644
66635--- a/fs/proc/proc_net.c
66636+++ b/fs/proc/proc_net.c
66637@@ -23,9 +23,27 @@
66638 #include <linux/nsproxy.h>
66639 #include <net/net_namespace.h>
66640 #include <linux/seq_file.h>
66641+#include <linux/grsecurity.h>
66642
66643 #include "internal.h"
66644
66645+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66646+static struct seq_operations *ipv6_seq_ops_addr;
66647+
66648+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66649+{
66650+ ipv6_seq_ops_addr = addr;
66651+}
66652+
66653+void unregister_ipv6_seq_ops_addr(void)
66654+{
66655+ ipv6_seq_ops_addr = NULL;
66656+}
66657+
66658+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66659+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66660+#endif
66661+
66662 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66663 {
66664 return pde->parent->data;
66665@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66666 return maybe_get_net(PDE_NET(PDE(inode)));
66667 }
66668
66669+extern const struct seq_operations dev_seq_ops;
66670+
66671 int seq_open_net(struct inode *ino, struct file *f,
66672 const struct seq_operations *ops, int size)
66673 {
66674@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66675
66676 BUG_ON(size < sizeof(*p));
66677
66678+ /* only permit access to /proc/net/dev */
66679+ if (
66680+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66681+ ops != ipv6_seq_ops_addr &&
66682+#endif
66683+ ops != &dev_seq_ops && gr_proc_is_restricted())
66684+ return -EACCES;
66685+
66686 net = get_proc_net(ino);
66687 if (net == NULL)
66688 return -ENXIO;
66689@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66690 int err;
66691 struct net *net;
66692
66693+ if (gr_proc_is_restricted())
66694+ return -EACCES;
66695+
66696 err = -ENXIO;
66697 net = get_proc_net(inode);
66698 if (net == NULL)
66699diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66700index f92d5dd..26398ac 100644
66701--- a/fs/proc/proc_sysctl.c
66702+++ b/fs/proc/proc_sysctl.c
66703@@ -11,13 +11,21 @@
66704 #include <linux/namei.h>
66705 #include <linux/mm.h>
66706 #include <linux/module.h>
66707+#include <linux/nsproxy.h>
66708+#ifdef CONFIG_GRKERNSEC
66709+#include <net/net_namespace.h>
66710+#endif
66711 #include "internal.h"
66712
66713+extern int gr_handle_chroot_sysctl(const int op);
66714+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66715+ const int op);
66716+
66717 static const struct dentry_operations proc_sys_dentry_operations;
66718 static const struct file_operations proc_sys_file_operations;
66719-static const struct inode_operations proc_sys_inode_operations;
66720+const struct inode_operations proc_sys_inode_operations;
66721 static const struct file_operations proc_sys_dir_file_operations;
66722-static const struct inode_operations proc_sys_dir_operations;
66723+const struct inode_operations proc_sys_dir_operations;
66724
66725 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66726 {
66727@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66728
66729 err = NULL;
66730 d_set_d_op(dentry, &proc_sys_dentry_operations);
66731+
66732+ gr_handle_proc_create(dentry, inode);
66733+
66734 d_add(dentry, inode);
66735
66736 out:
66737@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66738 struct inode *inode = file_inode(filp);
66739 struct ctl_table_header *head = grab_header(inode);
66740 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66741+ int op = write ? MAY_WRITE : MAY_READ;
66742 ssize_t error;
66743 size_t res;
66744
66745@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66746 * and won't be until we finish.
66747 */
66748 error = -EPERM;
66749- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66750+ if (sysctl_perm(head, table, op))
66751 goto out;
66752
66753 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66754@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66755 if (!table->proc_handler)
66756 goto out;
66757
66758+#ifdef CONFIG_GRKERNSEC
66759+ error = -EPERM;
66760+ if (gr_handle_chroot_sysctl(op))
66761+ goto out;
66762+ dget(filp->f_path.dentry);
66763+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66764+ dput(filp->f_path.dentry);
66765+ goto out;
66766+ }
66767+ dput(filp->f_path.dentry);
66768+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66769+ goto out;
66770+ if (write) {
66771+ if (current->nsproxy->net_ns != table->extra2) {
66772+ if (!capable(CAP_SYS_ADMIN))
66773+ goto out;
66774+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66775+ goto out;
66776+ }
66777+#endif
66778+
66779 /* careful: calling conventions are nasty here */
66780 res = count;
66781 error = table->proc_handler(table, write, buf, &res, ppos);
66782@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66783 return false;
66784 } else {
66785 d_set_d_op(child, &proc_sys_dentry_operations);
66786+
66787+ gr_handle_proc_create(child, inode);
66788+
66789 d_add(child, inode);
66790 }
66791 } else {
66792@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66793 if ((*pos)++ < ctx->pos)
66794 return true;
66795
66796+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66797+ return 0;
66798+
66799 if (unlikely(S_ISLNK(table->mode)))
66800 res = proc_sys_link_fill_cache(file, ctx, head, table);
66801 else
66802@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66803 if (IS_ERR(head))
66804 return PTR_ERR(head);
66805
66806+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66807+ return -ENOENT;
66808+
66809 generic_fillattr(inode, stat);
66810 if (table)
66811 stat->mode = (stat->mode & S_IFMT) | table->mode;
66812@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66813 .llseek = generic_file_llseek,
66814 };
66815
66816-static const struct inode_operations proc_sys_inode_operations = {
66817+const struct inode_operations proc_sys_inode_operations = {
66818 .permission = proc_sys_permission,
66819 .setattr = proc_sys_setattr,
66820 .getattr = proc_sys_getattr,
66821 };
66822
66823-static const struct inode_operations proc_sys_dir_operations = {
66824+const struct inode_operations proc_sys_dir_operations = {
66825 .lookup = proc_sys_lookup,
66826 .permission = proc_sys_permission,
66827 .setattr = proc_sys_setattr,
66828@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66829 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66830 const char *name, int namelen)
66831 {
66832- struct ctl_table *table;
66833+ ctl_table_no_const *table;
66834 struct ctl_dir *new;
66835 struct ctl_node *node;
66836 char *new_name;
66837@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66838 return NULL;
66839
66840 node = (struct ctl_node *)(new + 1);
66841- table = (struct ctl_table *)(node + 1);
66842+ table = (ctl_table_no_const *)(node + 1);
66843 new_name = (char *)(table + 2);
66844 memcpy(new_name, name, namelen);
66845 new_name[namelen] = '\0';
66846@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66847 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66848 struct ctl_table_root *link_root)
66849 {
66850- struct ctl_table *link_table, *entry, *link;
66851+ ctl_table_no_const *link_table, *link;
66852+ struct ctl_table *entry;
66853 struct ctl_table_header *links;
66854 struct ctl_node *node;
66855 char *link_name;
66856@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66857 return NULL;
66858
66859 node = (struct ctl_node *)(links + 1);
66860- link_table = (struct ctl_table *)(node + nr_entries);
66861+ link_table = (ctl_table_no_const *)(node + nr_entries);
66862 link_name = (char *)&link_table[nr_entries + 1];
66863
66864 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66865@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66866 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66867 struct ctl_table *table)
66868 {
66869- struct ctl_table *ctl_table_arg = NULL;
66870- struct ctl_table *entry, *files;
66871+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66872+ struct ctl_table *entry;
66873 int nr_files = 0;
66874 int nr_dirs = 0;
66875 int err = -ENOMEM;
66876@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66877 nr_files++;
66878 }
66879
66880- files = table;
66881 /* If there are mixed files and directories we need a new table */
66882 if (nr_dirs && nr_files) {
66883- struct ctl_table *new;
66884+ ctl_table_no_const *new;
66885 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66886 GFP_KERNEL);
66887 if (!files)
66888@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66889 /* Register everything except a directory full of subdirectories */
66890 if (nr_files || !nr_dirs) {
66891 struct ctl_table_header *header;
66892- header = __register_sysctl_table(set, path, files);
66893+ header = __register_sysctl_table(set, path, files ? files : table);
66894 if (!header) {
66895 kfree(ctl_table_arg);
66896 goto out;
66897diff --git a/fs/proc/root.c b/fs/proc/root.c
66898index e74ac9f..35e89f4 100644
66899--- a/fs/proc/root.c
66900+++ b/fs/proc/root.c
66901@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66902 proc_mkdir("openprom", NULL);
66903 #endif
66904 proc_tty_init();
66905+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66906+#ifdef CONFIG_GRKERNSEC_PROC_USER
66907+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66908+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66909+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66910+#endif
66911+#else
66912 proc_mkdir("bus", NULL);
66913+#endif
66914 proc_sys_init();
66915 }
66916
66917diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66918index 510413eb..34d9a8c 100644
66919--- a/fs/proc/stat.c
66920+++ b/fs/proc/stat.c
66921@@ -11,6 +11,7 @@
66922 #include <linux/irqnr.h>
66923 #include <linux/cputime.h>
66924 #include <linux/tick.h>
66925+#include <linux/grsecurity.h>
66926
66927 #ifndef arch_irq_stat_cpu
66928 #define arch_irq_stat_cpu(cpu) 0
66929@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66930 u64 sum_softirq = 0;
66931 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66932 struct timespec boottime;
66933+ int unrestricted = 1;
66934+
66935+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66936+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66937+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66938+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66939+ && !in_group_p(grsec_proc_gid)
66940+#endif
66941+ )
66942+ unrestricted = 0;
66943+#endif
66944+#endif
66945
66946 user = nice = system = idle = iowait =
66947 irq = softirq = steal = 0;
66948@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66949 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66950 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66951 idle += get_idle_time(i);
66952- iowait += get_iowait_time(i);
66953- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66954- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66955- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66956- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66957- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66958- sum += kstat_cpu_irqs_sum(i);
66959- sum += arch_irq_stat_cpu(i);
66960+ if (unrestricted) {
66961+ iowait += get_iowait_time(i);
66962+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66963+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66964+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66965+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66966+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66967+ sum += kstat_cpu_irqs_sum(i);
66968+ sum += arch_irq_stat_cpu(i);
66969+ for (j = 0; j < NR_SOFTIRQS; j++) {
66970+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66971
66972- for (j = 0; j < NR_SOFTIRQS; j++) {
66973- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66974-
66975- per_softirq_sums[j] += softirq_stat;
66976- sum_softirq += softirq_stat;
66977+ per_softirq_sums[j] += softirq_stat;
66978+ sum_softirq += softirq_stat;
66979+ }
66980 }
66981 }
66982- sum += arch_irq_stat();
66983+ if (unrestricted)
66984+ sum += arch_irq_stat();
66985
66986 seq_puts(p, "cpu ");
66987 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66988@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66989 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66990 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66991 idle = get_idle_time(i);
66992- iowait = get_iowait_time(i);
66993- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66994- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66995- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66996- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66997- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66998+ if (unrestricted) {
66999+ iowait = get_iowait_time(i);
67000+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67001+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67002+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67003+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67004+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67005+ }
67006 seq_printf(p, "cpu%d", i);
67007 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67008 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67009@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67010
67011 /* sum again ? it could be updated? */
67012 for_each_irq_nr(j)
67013- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
67014+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
67015
67016 seq_printf(p,
67017 "\nctxt %llu\n"
67018@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67019 "processes %lu\n"
67020 "procs_running %lu\n"
67021 "procs_blocked %lu\n",
67022- nr_context_switches(),
67023+ unrestricted ? nr_context_switches() : 0ULL,
67024 (unsigned long)jif,
67025- total_forks,
67026- nr_running(),
67027- nr_iowait());
67028+ unrestricted ? total_forks : 0UL,
67029+ unrestricted ? nr_running() : 0UL,
67030+ unrestricted ? nr_iowait() : 0UL);
67031
67032 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67033
67034diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67035index f86e549..3a88fcd 100644
67036--- a/fs/proc/task_mmu.c
67037+++ b/fs/proc/task_mmu.c
67038@@ -13,12 +13,19 @@
67039 #include <linux/swap.h>
67040 #include <linux/swapops.h>
67041 #include <linux/mmu_notifier.h>
67042+#include <linux/grsecurity.h>
67043
67044 #include <asm/elf.h>
67045 #include <asm/uaccess.h>
67046 #include <asm/tlbflush.h>
67047 #include "internal.h"
67048
67049+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67050+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67051+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67052+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67053+#endif
67054+
67055 void task_mem(struct seq_file *m, struct mm_struct *mm)
67056 {
67057 unsigned long data, text, lib, swap;
67058@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67059 "VmExe:\t%8lu kB\n"
67060 "VmLib:\t%8lu kB\n"
67061 "VmPTE:\t%8lu kB\n"
67062- "VmSwap:\t%8lu kB\n",
67063- hiwater_vm << (PAGE_SHIFT-10),
67064+ "VmSwap:\t%8lu kB\n"
67065+
67066+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67067+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67068+#endif
67069+
67070+ ,hiwater_vm << (PAGE_SHIFT-10),
67071 total_vm << (PAGE_SHIFT-10),
67072 mm->locked_vm << (PAGE_SHIFT-10),
67073 mm->pinned_vm << (PAGE_SHIFT-10),
67074@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67075 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67076 (PTRS_PER_PTE * sizeof(pte_t) *
67077 atomic_long_read(&mm->nr_ptes)) >> 10,
67078- swap << (PAGE_SHIFT-10));
67079+ swap << (PAGE_SHIFT-10)
67080+
67081+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67082+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67083+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67084+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67085+#else
67086+ , mm->context.user_cs_base
67087+ , mm->context.user_cs_limit
67088+#endif
67089+#endif
67090+
67091+ );
67092 }
67093
67094 unsigned long task_vsize(struct mm_struct *mm)
67095@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67096 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67097 }
67098
67099- /* We don't show the stack guard page in /proc/maps */
67100+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67101+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67102+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67103+#else
67104 start = vma->vm_start;
67105- if (stack_guard_page_start(vma, start))
67106- start += PAGE_SIZE;
67107 end = vma->vm_end;
67108- if (stack_guard_page_end(vma, end))
67109- end -= PAGE_SIZE;
67110+#endif
67111
67112 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67113 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67114@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67115 flags & VM_WRITE ? 'w' : '-',
67116 flags & VM_EXEC ? 'x' : '-',
67117 flags & VM_MAYSHARE ? 's' : 'p',
67118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67119+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67120+#else
67121 pgoff,
67122+#endif
67123 MAJOR(dev), MINOR(dev), ino);
67124
67125 /*
67126@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67127 */
67128 if (file) {
67129 seq_pad(m, ' ');
67130- seq_path(m, &file->f_path, "\n");
67131+ seq_path(m, &file->f_path, "\n\\");
67132 goto done;
67133 }
67134
67135@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67136 * Thread stack in /proc/PID/task/TID/maps or
67137 * the main process stack.
67138 */
67139- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67140- vma->vm_end >= mm->start_stack)) {
67141+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67142+ (vma->vm_start <= mm->start_stack &&
67143+ vma->vm_end >= mm->start_stack)) {
67144 name = "[stack]";
67145 } else {
67146 /* Thread stack in /proc/PID/maps */
67147@@ -359,6 +388,12 @@ done:
67148
67149 static int show_map(struct seq_file *m, void *v, int is_pid)
67150 {
67151+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67152+ if (current->exec_id != m->exec_id) {
67153+ gr_log_badprocpid("maps");
67154+ return 0;
67155+ }
67156+#endif
67157 show_map_vma(m, v, is_pid);
67158 m_cache_vma(m, v);
67159 return 0;
67160@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67161 .private = &mss,
67162 };
67163
67164+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67165+ if (current->exec_id != m->exec_id) {
67166+ gr_log_badprocpid("smaps");
67167+ return 0;
67168+ }
67169+#endif
67170 memset(&mss, 0, sizeof mss);
67171- mss.vma = vma;
67172- /* mmap_sem is held in m_start */
67173- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67174- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67175-
67176+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67177+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67178+#endif
67179+ mss.vma = vma;
67180+ /* mmap_sem is held in m_start */
67181+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67182+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67183+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67184+ }
67185+#endif
67186 show_map_vma(m, vma, is_pid);
67187
67188 seq_printf(m,
67189@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67190 "KernelPageSize: %8lu kB\n"
67191 "MMUPageSize: %8lu kB\n"
67192 "Locked: %8lu kB\n",
67193+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67194+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67195+#else
67196 (vma->vm_end - vma->vm_start) >> 10,
67197+#endif
67198 mss.resident >> 10,
67199 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67200 mss.shared_clean >> 10,
67201@@ -1489,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67202 char buffer[64];
67203 int nid;
67204
67205+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67206+ if (current->exec_id != m->exec_id) {
67207+ gr_log_badprocpid("numa_maps");
67208+ return 0;
67209+ }
67210+#endif
67211+
67212 if (!mm)
67213 return 0;
67214
67215@@ -1510,11 +1567,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67216 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67217 }
67218
67219+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67220+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67221+#else
67222 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67223+#endif
67224
67225 if (file) {
67226 seq_puts(m, " file=");
67227- seq_path(m, &file->f_path, "\n\t= ");
67228+ seq_path(m, &file->f_path, "\n\t\\= ");
67229 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67230 seq_puts(m, " heap");
67231 } else {
67232diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67233index 599ec2e..f1413ae 100644
67234--- a/fs/proc/task_nommu.c
67235+++ b/fs/proc/task_nommu.c
67236@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67237 else
67238 bytes += kobjsize(mm);
67239
67240- if (current->fs && current->fs->users > 1)
67241+ if (current->fs && atomic_read(&current->fs->users) > 1)
67242 sbytes += kobjsize(current->fs);
67243 else
67244 bytes += kobjsize(current->fs);
67245@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67246
67247 if (file) {
67248 seq_pad(m, ' ');
67249- seq_path(m, &file->f_path, "");
67250+ seq_path(m, &file->f_path, "\n\\");
67251 } else if (mm) {
67252 pid_t tid = pid_of_stack(priv, vma, is_pid);
67253
67254diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67255index a90d6d35..d08047c 100644
67256--- a/fs/proc/vmcore.c
67257+++ b/fs/proc/vmcore.c
67258@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67259 nr_bytes = count;
67260
67261 /* If pfn is not ram, return zeros for sparse dump files */
67262- if (pfn_is_ram(pfn) == 0)
67263- memset(buf, 0, nr_bytes);
67264- else {
67265+ if (pfn_is_ram(pfn) == 0) {
67266+ if (userbuf) {
67267+ if (clear_user((char __force_user *)buf, nr_bytes))
67268+ return -EFAULT;
67269+ } else
67270+ memset(buf, 0, nr_bytes);
67271+ } else {
67272 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67273 offset, userbuf);
67274 if (tmp < 0)
67275@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67276 static int copy_to(void *target, void *src, size_t size, int userbuf)
67277 {
67278 if (userbuf) {
67279- if (copy_to_user((char __user *) target, src, size))
67280+ if (copy_to_user((char __force_user *) target, src, size))
67281 return -EFAULT;
67282 } else {
67283 memcpy(target, src, size);
67284@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67285 if (*fpos < m->offset + m->size) {
67286 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67287 start = m->paddr + *fpos - m->offset;
67288- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67289+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67290 if (tmp < 0)
67291 return tmp;
67292 buflen -= tsz;
67293@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67294 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67295 size_t buflen, loff_t *fpos)
67296 {
67297- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67298+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67299 }
67300
67301 /*
67302diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67303index d3fb2b6..43a8140 100644
67304--- a/fs/qnx6/qnx6.h
67305+++ b/fs/qnx6/qnx6.h
67306@@ -74,7 +74,7 @@ enum {
67307 BYTESEX_BE,
67308 };
67309
67310-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67311+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67312 {
67313 if (sbi->s_bytesex == BYTESEX_LE)
67314 return le64_to_cpu((__force __le64)n);
67315@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67316 return (__force __fs64)cpu_to_be64(n);
67317 }
67318
67319-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67320+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67321 {
67322 if (sbi->s_bytesex == BYTESEX_LE)
67323 return le32_to_cpu((__force __le32)n);
67324diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67325index bb2869f..d34ada8 100644
67326--- a/fs/quota/netlink.c
67327+++ b/fs/quota/netlink.c
67328@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67329 void quota_send_warning(struct kqid qid, dev_t dev,
67330 const char warntype)
67331 {
67332- static atomic_t seq;
67333+ static atomic_unchecked_t seq;
67334 struct sk_buff *skb;
67335 void *msg_head;
67336 int ret;
67337@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67338 "VFS: Not enough memory to send quota warning.\n");
67339 return;
67340 }
67341- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67342+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67343 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67344 if (!msg_head) {
67345 printk(KERN_ERR
67346diff --git a/fs/read_write.c b/fs/read_write.c
67347index c0805c93..d39f2eb 100644
67348--- a/fs/read_write.c
67349+++ b/fs/read_write.c
67350@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67351
67352 old_fs = get_fs();
67353 set_fs(get_ds());
67354- p = (__force const char __user *)buf;
67355+ p = (const char __force_user *)buf;
67356 if (count > MAX_RW_COUNT)
67357 count = MAX_RW_COUNT;
67358 if (file->f_op->write)
67359diff --git a/fs/readdir.c b/fs/readdir.c
67360index ced6791..936687b 100644
67361--- a/fs/readdir.c
67362+++ b/fs/readdir.c
67363@@ -18,6 +18,7 @@
67364 #include <linux/security.h>
67365 #include <linux/syscalls.h>
67366 #include <linux/unistd.h>
67367+#include <linux/namei.h>
67368
67369 #include <asm/uaccess.h>
67370
67371@@ -71,6 +72,7 @@ struct old_linux_dirent {
67372 struct readdir_callback {
67373 struct dir_context ctx;
67374 struct old_linux_dirent __user * dirent;
67375+ struct file * file;
67376 int result;
67377 };
67378
67379@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67380 buf->result = -EOVERFLOW;
67381 return -EOVERFLOW;
67382 }
67383+
67384+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67385+ return 0;
67386+
67387 buf->result++;
67388 dirent = buf->dirent;
67389 if (!access_ok(VERIFY_WRITE, dirent,
67390@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67391 if (!f.file)
67392 return -EBADF;
67393
67394+ buf.file = f.file;
67395 error = iterate_dir(f.file, &buf.ctx);
67396 if (buf.result)
67397 error = buf.result;
67398@@ -145,6 +152,7 @@ struct getdents_callback {
67399 struct dir_context ctx;
67400 struct linux_dirent __user * current_dir;
67401 struct linux_dirent __user * previous;
67402+ struct file * file;
67403 int count;
67404 int error;
67405 };
67406@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67407 buf->error = -EOVERFLOW;
67408 return -EOVERFLOW;
67409 }
67410+
67411+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67412+ return 0;
67413+
67414 dirent = buf->previous;
67415 if (dirent) {
67416 if (__put_user(offset, &dirent->d_off))
67417@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67418 if (!f.file)
67419 return -EBADF;
67420
67421+ buf.file = f.file;
67422 error = iterate_dir(f.file, &buf.ctx);
67423 if (error >= 0)
67424 error = buf.error;
67425@@ -230,6 +243,7 @@ struct getdents_callback64 {
67426 struct dir_context ctx;
67427 struct linux_dirent64 __user * current_dir;
67428 struct linux_dirent64 __user * previous;
67429+ struct file *file;
67430 int count;
67431 int error;
67432 };
67433@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67434 buf->error = -EINVAL; /* only used if we fail.. */
67435 if (reclen > buf->count)
67436 return -EINVAL;
67437+
67438+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67439+ return 0;
67440+
67441 dirent = buf->previous;
67442 if (dirent) {
67443 if (__put_user(offset, &dirent->d_off))
67444@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67445 if (!f.file)
67446 return -EBADF;
67447
67448+ buf.file = f.file;
67449 error = iterate_dir(f.file, &buf.ctx);
67450 if (error >= 0)
67451 error = buf.error;
67452diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67453index 9c02d96..6562c10 100644
67454--- a/fs/reiserfs/do_balan.c
67455+++ b/fs/reiserfs/do_balan.c
67456@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67457 return;
67458 }
67459
67460- atomic_inc(&fs_generation(tb->tb_sb));
67461+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67462 do_balance_starts(tb);
67463
67464 /*
67465diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67466index aca73dd..e3c558d 100644
67467--- a/fs/reiserfs/item_ops.c
67468+++ b/fs/reiserfs/item_ops.c
67469@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67470 }
67471
67472 static struct item_operations errcatch_ops = {
67473- errcatch_bytes_number,
67474- errcatch_decrement_key,
67475- errcatch_is_left_mergeable,
67476- errcatch_print_item,
67477- errcatch_check_item,
67478+ .bytes_number = errcatch_bytes_number,
67479+ .decrement_key = errcatch_decrement_key,
67480+ .is_left_mergeable = errcatch_is_left_mergeable,
67481+ .print_item = errcatch_print_item,
67482+ .check_item = errcatch_check_item,
67483
67484- errcatch_create_vi,
67485- errcatch_check_left,
67486- errcatch_check_right,
67487- errcatch_part_size,
67488- errcatch_unit_num,
67489- errcatch_print_vi
67490+ .create_vi = errcatch_create_vi,
67491+ .check_left = errcatch_check_left,
67492+ .check_right = errcatch_check_right,
67493+ .part_size = errcatch_part_size,
67494+ .unit_num = errcatch_unit_num,
67495+ .print_vi = errcatch_print_vi
67496 };
67497
67498 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67499diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67500index 621b9f3..af527fd 100644
67501--- a/fs/reiserfs/procfs.c
67502+++ b/fs/reiserfs/procfs.c
67503@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67504 "SMALL_TAILS " : "NO_TAILS ",
67505 replay_only(sb) ? "REPLAY_ONLY " : "",
67506 convert_reiserfs(sb) ? "CONV " : "",
67507- atomic_read(&r->s_generation_counter),
67508+ atomic_read_unchecked(&r->s_generation_counter),
67509 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67510 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67511 SF(s_good_search_by_key_reada), SF(s_bmaps),
67512diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67513index bb79cdd..fcf49ef 100644
67514--- a/fs/reiserfs/reiserfs.h
67515+++ b/fs/reiserfs/reiserfs.h
67516@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67517 /* Comment? -Hans */
67518 wait_queue_head_t s_wait;
67519 /* increased by one every time the tree gets re-balanced */
67520- atomic_t s_generation_counter;
67521+ atomic_unchecked_t s_generation_counter;
67522
67523 /* File system properties. Currently holds on-disk FS format */
67524 unsigned long s_properties;
67525@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67526 #define REISERFS_USER_MEM 1 /* user memory mode */
67527
67528 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67529-#define get_generation(s) atomic_read (&fs_generation(s))
67530+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67531 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67532 #define __fs_changed(gen,s) (gen != get_generation (s))
67533 #define fs_changed(gen,s) \
67534diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67535index 71fbbe3..eff29ba 100644
67536--- a/fs/reiserfs/super.c
67537+++ b/fs/reiserfs/super.c
67538@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67539 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67540 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67541 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67542+#ifdef CONFIG_REISERFS_FS_XATTR
67543+ /* turn on user xattrs by default */
67544+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67545+#endif
67546 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67547 sbi->s_alloc_options.preallocmin = 0;
67548 /* Preallocate by 16 blocks (17-1) at once */
67549diff --git a/fs/select.c b/fs/select.c
67550index 467bb1c..cf9d65a 100644
67551--- a/fs/select.c
67552+++ b/fs/select.c
67553@@ -20,6 +20,7 @@
67554 #include <linux/export.h>
67555 #include <linux/slab.h>
67556 #include <linux/poll.h>
67557+#include <linux/security.h>
67558 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67559 #include <linux/file.h>
67560 #include <linux/fdtable.h>
67561@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67562 struct poll_list *walk = head;
67563 unsigned long todo = nfds;
67564
67565+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67566 if (nfds > rlimit(RLIMIT_NOFILE))
67567 return -EINVAL;
67568
67569diff --git a/fs/seq_file.c b/fs/seq_file.c
67570index dbf3a59..daf023f 100644
67571--- a/fs/seq_file.c
67572+++ b/fs/seq_file.c
67573@@ -12,6 +12,8 @@
67574 #include <linux/slab.h>
67575 #include <linux/cred.h>
67576 #include <linux/mm.h>
67577+#include <linux/sched.h>
67578+#include <linux/grsecurity.h>
67579
67580 #include <asm/uaccess.h>
67581 #include <asm/page.h>
67582@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67583
67584 static void *seq_buf_alloc(unsigned long size)
67585 {
67586- void *buf;
67587-
67588- /*
67589- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67590- * it's better to fall back to vmalloc() than to kill things.
67591- */
67592- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67593- if (!buf && size > PAGE_SIZE)
67594- buf = vmalloc(size);
67595- return buf;
67596+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67597 }
67598
67599 /**
67600@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67601 #ifdef CONFIG_USER_NS
67602 p->user_ns = file->f_cred->user_ns;
67603 #endif
67604+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67605+ p->exec_id = current->exec_id;
67606+#endif
67607
67608 /*
67609 * Wrappers around seq_open(e.g. swaps_open) need to be
67610@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67611 }
67612 EXPORT_SYMBOL(seq_open);
67613
67614+
67615+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67616+{
67617+ if (gr_proc_is_restricted())
67618+ return -EACCES;
67619+
67620+ return seq_open(file, op);
67621+}
67622+EXPORT_SYMBOL(seq_open_restrict);
67623+
67624 static int traverse(struct seq_file *m, loff_t offset)
67625 {
67626 loff_t pos = 0, index;
67627@@ -158,7 +164,7 @@ Eoverflow:
67628 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67629 {
67630 struct seq_file *m = file->private_data;
67631- size_t copied = 0;
67632+ ssize_t copied = 0;
67633 loff_t pos;
67634 size_t n;
67635 void *p;
67636@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67637 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67638 void *data)
67639 {
67640- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67641+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67642 int res = -ENOMEM;
67643
67644 if (op) {
67645@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67646 }
67647 EXPORT_SYMBOL(single_open_size);
67648
67649+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67650+ void *data)
67651+{
67652+ if (gr_proc_is_restricted())
67653+ return -EACCES;
67654+
67655+ return single_open(file, show, data);
67656+}
67657+EXPORT_SYMBOL(single_open_restrict);
67658+
67659+
67660 int single_release(struct inode *inode, struct file *file)
67661 {
67662 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67663diff --git a/fs/splice.c b/fs/splice.c
67664index 75c6058..770d40c 100644
67665--- a/fs/splice.c
67666+++ b/fs/splice.c
67667@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67668 pipe_lock(pipe);
67669
67670 for (;;) {
67671- if (!pipe->readers) {
67672+ if (!atomic_read(&pipe->readers)) {
67673 send_sig(SIGPIPE, current, 0);
67674 if (!ret)
67675 ret = -EPIPE;
67676@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67677 page_nr++;
67678 ret += buf->len;
67679
67680- if (pipe->files)
67681+ if (atomic_read(&pipe->files))
67682 do_wakeup = 1;
67683
67684 if (!--spd->nr_pages)
67685@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67686 do_wakeup = 0;
67687 }
67688
67689- pipe->waiting_writers++;
67690+ atomic_inc(&pipe->waiting_writers);
67691 pipe_wait(pipe);
67692- pipe->waiting_writers--;
67693+ atomic_dec(&pipe->waiting_writers);
67694 }
67695
67696 pipe_unlock(pipe);
67697@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67698 old_fs = get_fs();
67699 set_fs(get_ds());
67700 /* The cast to a user pointer is valid due to the set_fs() */
67701- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67702+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67703 set_fs(old_fs);
67704
67705 return res;
67706@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67707 old_fs = get_fs();
67708 set_fs(get_ds());
67709 /* The cast to a user pointer is valid due to the set_fs() */
67710- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67711+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67712 set_fs(old_fs);
67713
67714 return res;
67715@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67716 goto err;
67717
67718 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67719- vec[i].iov_base = (void __user *) page_address(page);
67720+ vec[i].iov_base = (void __force_user *) page_address(page);
67721 vec[i].iov_len = this_len;
67722 spd.pages[i] = page;
67723 spd.nr_pages++;
67724@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67725 ops->release(pipe, buf);
67726 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67727 pipe->nrbufs--;
67728- if (pipe->files)
67729+ if (atomic_read(&pipe->files))
67730 sd->need_wakeup = true;
67731 }
67732
67733@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67734 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67735 {
67736 while (!pipe->nrbufs) {
67737- if (!pipe->writers)
67738+ if (!atomic_read(&pipe->writers))
67739 return 0;
67740
67741- if (!pipe->waiting_writers && sd->num_spliced)
67742+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67743 return 0;
67744
67745 if (sd->flags & SPLICE_F_NONBLOCK)
67746@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67747 ops->release(pipe, buf);
67748 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67749 pipe->nrbufs--;
67750- if (pipe->files)
67751+ if (atomic_read(&pipe->files))
67752 sd.need_wakeup = true;
67753 } else {
67754 buf->offset += ret;
67755@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67756 * out of the pipe right after the splice_to_pipe(). So set
67757 * PIPE_READERS appropriately.
67758 */
67759- pipe->readers = 1;
67760+ atomic_set(&pipe->readers, 1);
67761
67762 current->splice_pipe = pipe;
67763 }
67764@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67765
67766 partial[buffers].offset = off;
67767 partial[buffers].len = plen;
67768+ partial[buffers].private = 0;
67769
67770 off = 0;
67771 len -= plen;
67772@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67773 ret = -ERESTARTSYS;
67774 break;
67775 }
67776- if (!pipe->writers)
67777+ if (!atomic_read(&pipe->writers))
67778 break;
67779- if (!pipe->waiting_writers) {
67780+ if (!atomic_read(&pipe->waiting_writers)) {
67781 if (flags & SPLICE_F_NONBLOCK) {
67782 ret = -EAGAIN;
67783 break;
67784@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67785 pipe_lock(pipe);
67786
67787 while (pipe->nrbufs >= pipe->buffers) {
67788- if (!pipe->readers) {
67789+ if (!atomic_read(&pipe->readers)) {
67790 send_sig(SIGPIPE, current, 0);
67791 ret = -EPIPE;
67792 break;
67793@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67794 ret = -ERESTARTSYS;
67795 break;
67796 }
67797- pipe->waiting_writers++;
67798+ atomic_inc(&pipe->waiting_writers);
67799 pipe_wait(pipe);
67800- pipe->waiting_writers--;
67801+ atomic_dec(&pipe->waiting_writers);
67802 }
67803
67804 pipe_unlock(pipe);
67805@@ -1818,14 +1819,14 @@ retry:
67806 pipe_double_lock(ipipe, opipe);
67807
67808 do {
67809- if (!opipe->readers) {
67810+ if (!atomic_read(&opipe->readers)) {
67811 send_sig(SIGPIPE, current, 0);
67812 if (!ret)
67813 ret = -EPIPE;
67814 break;
67815 }
67816
67817- if (!ipipe->nrbufs && !ipipe->writers)
67818+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67819 break;
67820
67821 /*
67822@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67823 pipe_double_lock(ipipe, opipe);
67824
67825 do {
67826- if (!opipe->readers) {
67827+ if (!atomic_read(&opipe->readers)) {
67828 send_sig(SIGPIPE, current, 0);
67829 if (!ret)
67830 ret = -EPIPE;
67831@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67832 * return EAGAIN if we have the potential of some data in the
67833 * future, otherwise just return 0
67834 */
67835- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67836+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67837 ret = -EAGAIN;
67838
67839 pipe_unlock(ipipe);
67840diff --git a/fs/stat.c b/fs/stat.c
67841index ae0c3ce..9ee641c 100644
67842--- a/fs/stat.c
67843+++ b/fs/stat.c
67844@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67845 stat->gid = inode->i_gid;
67846 stat->rdev = inode->i_rdev;
67847 stat->size = i_size_read(inode);
67848- stat->atime = inode->i_atime;
67849- stat->mtime = inode->i_mtime;
67850+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67851+ stat->atime = inode->i_ctime;
67852+ stat->mtime = inode->i_ctime;
67853+ } else {
67854+ stat->atime = inode->i_atime;
67855+ stat->mtime = inode->i_mtime;
67856+ }
67857 stat->ctime = inode->i_ctime;
67858 stat->blksize = (1 << inode->i_blkbits);
67859 stat->blocks = inode->i_blocks;
67860@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67861 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67862 {
67863 struct inode *inode = path->dentry->d_inode;
67864+ int retval;
67865
67866- if (inode->i_op->getattr)
67867- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67868+ if (inode->i_op->getattr) {
67869+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67870+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67871+ stat->atime = stat->ctime;
67872+ stat->mtime = stat->ctime;
67873+ }
67874+ return retval;
67875+ }
67876
67877 generic_fillattr(inode, stat);
67878 return 0;
67879diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67880index 0b45ff4..edf9d3a 100644
67881--- a/fs/sysfs/dir.c
67882+++ b/fs/sysfs/dir.c
67883@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67884 kfree(buf);
67885 }
67886
67887+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67888+extern int grsec_enable_sysfs_restrict;
67889+#endif
67890+
67891 /**
67892 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
67893 * @kobj: object we're creating directory for
67894@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67895 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67896 {
67897 struct kernfs_node *parent, *kn;
67898+ const char *name;
67899+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67900+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67901+ const char *parent_name;
67902+#endif
67903
67904 BUG_ON(!kobj);
67905
67906+ name = kobject_name(kobj);
67907+
67908 if (kobj->parent)
67909 parent = kobj->parent->sd;
67910 else
67911@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67912 if (!parent)
67913 return -ENOENT;
67914
67915- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67916- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67917+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67918+ parent_name = parent->name;
67919+ mode = S_IRWXU;
67920+
67921+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67922+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67923+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67924+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67925+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67926+ if (!grsec_enable_sysfs_restrict)
67927+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67928+#endif
67929+
67930+ kn = kernfs_create_dir_ns(parent, name,
67931+ mode, kobj, ns);
67932 if (IS_ERR(kn)) {
67933 if (PTR_ERR(kn) == -EEXIST)
67934- sysfs_warn_dup(parent, kobject_name(kobj));
67935+ sysfs_warn_dup(parent, name);
67936 return PTR_ERR(kn);
67937 }
67938
67939diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67940index 69d4889..a810bd4 100644
67941--- a/fs/sysv/sysv.h
67942+++ b/fs/sysv/sysv.h
67943@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67944 #endif
67945 }
67946
67947-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67948+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67949 {
67950 if (sbi->s_bytesex == BYTESEX_PDP)
67951 return PDP_swab((__force __u32)n);
67952diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67953index fb08b0c..65fcc7e 100644
67954--- a/fs/ubifs/io.c
67955+++ b/fs/ubifs/io.c
67956@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67957 return err;
67958 }
67959
67960-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67961+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67962 {
67963 int err;
67964
67965diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67966index c175b4d..8f36a16 100644
67967--- a/fs/udf/misc.c
67968+++ b/fs/udf/misc.c
67969@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67970
67971 u8 udf_tag_checksum(const struct tag *t)
67972 {
67973- u8 *data = (u8 *)t;
67974+ const u8 *data = (const u8 *)t;
67975 u8 checksum = 0;
67976 int i;
67977 for (i = 0; i < sizeof(struct tag); ++i)
67978diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67979index 8d974c4..b82f6ec 100644
67980--- a/fs/ufs/swab.h
67981+++ b/fs/ufs/swab.h
67982@@ -22,7 +22,7 @@ enum {
67983 BYTESEX_BE
67984 };
67985
67986-static inline u64
67987+static inline u64 __intentional_overflow(-1)
67988 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67989 {
67990 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67991@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67992 return (__force __fs64)cpu_to_be64(n);
67993 }
67994
67995-static inline u32
67996+static inline u32 __intentional_overflow(-1)
67997 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67998 {
67999 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68000diff --git a/fs/utimes.c b/fs/utimes.c
68001index aa138d6..5f3a811 100644
68002--- a/fs/utimes.c
68003+++ b/fs/utimes.c
68004@@ -1,6 +1,7 @@
68005 #include <linux/compiler.h>
68006 #include <linux/file.h>
68007 #include <linux/fs.h>
68008+#include <linux/security.h>
68009 #include <linux/linkage.h>
68010 #include <linux/mount.h>
68011 #include <linux/namei.h>
68012@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68013 }
68014 }
68015 retry_deleg:
68016+
68017+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68018+ error = -EACCES;
68019+ goto mnt_drop_write_and_out;
68020+ }
68021+
68022 mutex_lock(&inode->i_mutex);
68023 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68024 mutex_unlock(&inode->i_mutex);
68025diff --git a/fs/xattr.c b/fs/xattr.c
68026index 4ef6985..a6cd6567 100644
68027--- a/fs/xattr.c
68028+++ b/fs/xattr.c
68029@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68030 return rc;
68031 }
68032
68033+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68034+ssize_t
68035+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68036+{
68037+ struct inode *inode = dentry->d_inode;
68038+ ssize_t error;
68039+
68040+ error = inode_permission(inode, MAY_EXEC);
68041+ if (error)
68042+ return error;
68043+
68044+ if (inode->i_op->getxattr)
68045+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68046+ else
68047+ error = -EOPNOTSUPP;
68048+
68049+ return error;
68050+}
68051+EXPORT_SYMBOL(pax_getxattr);
68052+#endif
68053+
68054 ssize_t
68055 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68056 {
68057@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68058 * Extended attribute SET operations
68059 */
68060 static long
68061-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68062+setxattr(struct path *path, const char __user *name, const void __user *value,
68063 size_t size, int flags)
68064 {
68065 int error;
68066@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68067 posix_acl_fix_xattr_from_user(kvalue, size);
68068 }
68069
68070- error = vfs_setxattr(d, kname, kvalue, size, flags);
68071+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68072+ error = -EACCES;
68073+ goto out;
68074+ }
68075+
68076+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68077 out:
68078 if (vvalue)
68079 vfree(vvalue);
68080@@ -376,7 +402,7 @@ retry:
68081 return error;
68082 error = mnt_want_write(path.mnt);
68083 if (!error) {
68084- error = setxattr(path.dentry, name, value, size, flags);
68085+ error = setxattr(&path, name, value, size, flags);
68086 mnt_drop_write(path.mnt);
68087 }
68088 path_put(&path);
68089@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68090 audit_file(f.file);
68091 error = mnt_want_write_file(f.file);
68092 if (!error) {
68093- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
68094+ error = setxattr(&f.file->f_path, name, value, size, flags);
68095 mnt_drop_write_file(f.file);
68096 }
68097 fdput(f);
68098@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68099 * Extended attribute REMOVE operations
68100 */
68101 static long
68102-removexattr(struct dentry *d, const char __user *name)
68103+removexattr(struct path *path, const char __user *name)
68104 {
68105 int error;
68106 char kname[XATTR_NAME_MAX + 1];
68107@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
68108 if (error < 0)
68109 return error;
68110
68111- return vfs_removexattr(d, kname);
68112+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68113+ return -EACCES;
68114+
68115+ return vfs_removexattr(path->dentry, kname);
68116 }
68117
68118 static int path_removexattr(const char __user *pathname,
68119@@ -623,7 +652,7 @@ retry:
68120 return error;
68121 error = mnt_want_write(path.mnt);
68122 if (!error) {
68123- error = removexattr(path.dentry, name);
68124+ error = removexattr(&path, name);
68125 mnt_drop_write(path.mnt);
68126 }
68127 path_put(&path);
68128@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
68129 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68130 {
68131 struct fd f = fdget(fd);
68132+ struct path *path;
68133 int error = -EBADF;
68134
68135 if (!f.file)
68136 return error;
68137+ path = &f.file->f_path;
68138 audit_file(f.file);
68139 error = mnt_want_write_file(f.file);
68140 if (!error) {
68141- error = removexattr(f.file->f_path.dentry, name);
68142+ error = removexattr(path, name);
68143 mnt_drop_write_file(f.file);
68144 }
68145 fdput(f);
68146diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68147index 4e20fe7..6d1a55a 100644
68148--- a/fs/xfs/libxfs/xfs_bmap.c
68149+++ b/fs/xfs/libxfs/xfs_bmap.c
68150@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
68151
68152 #else
68153 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68154-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68155+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68156 #endif /* DEBUG */
68157
68158 /*
68159diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68160index 098cd78..724d3f8 100644
68161--- a/fs/xfs/xfs_dir2_readdir.c
68162+++ b/fs/xfs/xfs_dir2_readdir.c
68163@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
68164 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68165 filetype = dp->d_ops->sf_get_ftype(sfep);
68166 ctx->pos = off & 0x7fffffff;
68167- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68168+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68169+ char name[sfep->namelen];
68170+ memcpy(name, sfep->name, sfep->namelen);
68171+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68172+ return 0;
68173+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68174 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68175 return 0;
68176 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68177diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68178index a183198..6b52f52 100644
68179--- a/fs/xfs/xfs_ioctl.c
68180+++ b/fs/xfs/xfs_ioctl.c
68181@@ -119,7 +119,7 @@ xfs_find_handle(
68182 }
68183
68184 error = -EFAULT;
68185- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68186+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68187 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68188 goto out_put;
68189
68190diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68191index c31d2c2..6ec8f62 100644
68192--- a/fs/xfs/xfs_linux.h
68193+++ b/fs/xfs/xfs_linux.h
68194@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68195 * of the compiler which do not like us using do_div in the middle
68196 * of large functions.
68197 */
68198-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68199+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68200 {
68201 __u32 mod;
68202
68203@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68204 return 0;
68205 }
68206 #else
68207-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68208+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68209 {
68210 __u32 mod;
68211
68212diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68213new file mode 100644
68214index 0000000..31f8fe4
68215--- /dev/null
68216+++ b/grsecurity/Kconfig
68217@@ -0,0 +1,1182 @@
68218+#
68219+# grecurity configuration
68220+#
68221+menu "Memory Protections"
68222+depends on GRKERNSEC
68223+
68224+config GRKERNSEC_KMEM
68225+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68226+ default y if GRKERNSEC_CONFIG_AUTO
68227+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68228+ help
68229+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68230+ be written to or read from to modify or leak the contents of the running
68231+ kernel. /dev/port will also not be allowed to be opened, writing to
68232+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68233+ If you have module support disabled, enabling this will close up several
68234+ ways that are currently used to insert malicious code into the running
68235+ kernel.
68236+
68237+ Even with this feature enabled, we still highly recommend that
68238+ you use the RBAC system, as it is still possible for an attacker to
68239+ modify the running kernel through other more obscure methods.
68240+
68241+ It is highly recommended that you say Y here if you meet all the
68242+ conditions above.
68243+
68244+config GRKERNSEC_VM86
68245+ bool "Restrict VM86 mode"
68246+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68247+ depends on X86_32
68248+
68249+ help
68250+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68251+ make use of a special execution mode on 32bit x86 processors called
68252+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68253+ video cards and will still work with this option enabled. The purpose
68254+ of the option is to prevent exploitation of emulation errors in
68255+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68256+ Nearly all users should be able to enable this option.
68257+
68258+config GRKERNSEC_IO
68259+ bool "Disable privileged I/O"
68260+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68261+ depends on X86
68262+ select RTC_CLASS
68263+ select RTC_INTF_DEV
68264+ select RTC_DRV_CMOS
68265+
68266+ help
68267+ If you say Y here, all ioperm and iopl calls will return an error.
68268+ Ioperm and iopl can be used to modify the running kernel.
68269+ Unfortunately, some programs need this access to operate properly,
68270+ the most notable of which are XFree86 and hwclock. hwclock can be
68271+ remedied by having RTC support in the kernel, so real-time
68272+ clock support is enabled if this option is enabled, to ensure
68273+ that hwclock operates correctly. If hwclock still does not work,
68274+ either update udev or symlink /dev/rtc to /dev/rtc0.
68275+
68276+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68277+ you may not be able to boot into a graphical environment with this
68278+ option enabled. In this case, you should use the RBAC system instead.
68279+
68280+config GRKERNSEC_BPF_HARDEN
68281+ bool "Harden BPF interpreter"
68282+ default y if GRKERNSEC_CONFIG_AUTO
68283+ help
68284+ Unlike previous versions of grsecurity that hardened both the BPF
68285+ interpreted code against corruption at rest as well as the JIT code
68286+ against JIT-spray attacks and attacker-controlled immediate values
68287+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68288+ and will ensure the interpreted code is read-only at rest. This feature
68289+ may be removed at a later time when eBPF stabilizes to entirely revert
68290+ back to the more secure pre-3.16 BPF interpreter/JIT.
68291+
68292+ If you're using KERNEXEC, it's recommended that you enable this option
68293+ to supplement the hardening of the kernel.
68294+
68295+config GRKERNSEC_PERF_HARDEN
68296+ bool "Disable unprivileged PERF_EVENTS usage by default"
68297+ default y if GRKERNSEC_CONFIG_AUTO
68298+ depends on PERF_EVENTS
68299+ help
68300+ If you say Y here, the range of acceptable values for the
68301+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68302+ default to a new value: 3. When the sysctl is set to this value, no
68303+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68304+
68305+ Though PERF_EVENTS can be used legitimately for performance monitoring
68306+ and low-level application profiling, it is forced on regardless of
68307+ configuration, has been at fault for several vulnerabilities, and
68308+ creates new opportunities for side channels and other information leaks.
68309+
68310+ This feature puts PERF_EVENTS into a secure default state and permits
68311+ the administrator to change out of it temporarily if unprivileged
68312+ application profiling is needed.
68313+
68314+config GRKERNSEC_RAND_THREADSTACK
68315+ bool "Insert random gaps between thread stacks"
68316+ default y if GRKERNSEC_CONFIG_AUTO
68317+ depends on PAX_RANDMMAP && !PPC
68318+ help
68319+ If you say Y here, a random-sized gap will be enforced between allocated
68320+ thread stacks. Glibc's NPTL and other threading libraries that
68321+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68322+ The implementation currently provides 8 bits of entropy for the gap.
68323+
68324+ Many distributions do not compile threaded remote services with the
68325+ -fstack-check argument to GCC, causing the variable-sized stack-based
68326+ allocator, alloca(), to not probe the stack on allocation. This
68327+ permits an unbounded alloca() to skip over any guard page and potentially
68328+ modify another thread's stack reliably. An enforced random gap
68329+ reduces the reliability of such an attack and increases the chance
68330+ that such a read/write to another thread's stack instead lands in
68331+ an unmapped area, causing a crash and triggering grsecurity's
68332+ anti-bruteforcing logic.
68333+
68334+config GRKERNSEC_PROC_MEMMAP
68335+ bool "Harden ASLR against information leaks and entropy reduction"
68336+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68337+ depends on PAX_NOEXEC || PAX_ASLR
68338+ help
68339+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68340+ give no information about the addresses of its mappings if
68341+ PaX features that rely on random addresses are enabled on the task.
68342+ In addition to sanitizing this information and disabling other
68343+ dangerous sources of information, this option causes reads of sensitive
68344+ /proc/<pid> entries where the file descriptor was opened in a different
68345+ task than the one performing the read. Such attempts are logged.
68346+ This option also limits argv/env strings for suid/sgid binaries
68347+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68348+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68349+ binaries to prevent alternative mmap layouts from being abused.
68350+
68351+ If you use PaX it is essential that you say Y here as it closes up
68352+ several holes that make full ASLR useless locally.
68353+
68354+
68355+config GRKERNSEC_KSTACKOVERFLOW
68356+ bool "Prevent kernel stack overflows"
68357+ default y if GRKERNSEC_CONFIG_AUTO
68358+ depends on !IA64 && 64BIT
68359+ help
68360+ If you say Y here, the kernel's process stacks will be allocated
68361+ with vmalloc instead of the kernel's default allocator. This
68362+ introduces guard pages that in combination with the alloca checking
68363+ of the STACKLEAK feature prevents all forms of kernel process stack
68364+ overflow abuse. Note that this is different from kernel stack
68365+ buffer overflows.
68366+
68367+config GRKERNSEC_BRUTE
68368+ bool "Deter exploit bruteforcing"
68369+ default y if GRKERNSEC_CONFIG_AUTO
68370+ help
68371+ If you say Y here, attempts to bruteforce exploits against forking
68372+ daemons such as apache or sshd, as well as against suid/sgid binaries
68373+ will be deterred. When a child of a forking daemon is killed by PaX
68374+ or crashes due to an illegal instruction or other suspicious signal,
68375+ the parent process will be delayed 30 seconds upon every subsequent
68376+ fork until the administrator is able to assess the situation and
68377+ restart the daemon.
68378+ In the suid/sgid case, the attempt is logged, the user has all their
68379+ existing instances of the suid/sgid binary terminated and will
68380+ be unable to execute any suid/sgid binaries for 15 minutes.
68381+
68382+ It is recommended that you also enable signal logging in the auditing
68383+ section so that logs are generated when a process triggers a suspicious
68384+ signal.
68385+ If the sysctl option is enabled, a sysctl option with name
68386+ "deter_bruteforce" is created.
68387+
68388+config GRKERNSEC_MODHARDEN
68389+ bool "Harden module auto-loading"
68390+ default y if GRKERNSEC_CONFIG_AUTO
68391+ depends on MODULES
68392+ help
68393+ If you say Y here, module auto-loading in response to use of some
68394+ feature implemented by an unloaded module will be restricted to
68395+ root users. Enabling this option helps defend against attacks
68396+ by unprivileged users who abuse the auto-loading behavior to
68397+ cause a vulnerable module to load that is then exploited.
68398+
68399+ If this option prevents a legitimate use of auto-loading for a
68400+ non-root user, the administrator can execute modprobe manually
68401+ with the exact name of the module mentioned in the alert log.
68402+ Alternatively, the administrator can add the module to the list
68403+ of modules loaded at boot by modifying init scripts.
68404+
68405+ Modification of init scripts will most likely be needed on
68406+ Ubuntu servers with encrypted home directory support enabled,
68407+ as the first non-root user logging in will cause the ecb(aes),
68408+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68409+
68410+config GRKERNSEC_HIDESYM
68411+ bool "Hide kernel symbols"
68412+ default y if GRKERNSEC_CONFIG_AUTO
68413+ select PAX_USERCOPY_SLABS
68414+ help
68415+ If you say Y here, getting information on loaded modules, and
68416+ displaying all kernel symbols through a syscall will be restricted
68417+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68418+ /proc/kallsyms will be restricted to the root user. The RBAC
68419+ system can hide that entry even from root.
68420+
68421+ This option also prevents leaking of kernel addresses through
68422+ several /proc entries.
68423+
68424+ Note that this option is only effective provided the following
68425+ conditions are met:
68426+ 1) The kernel using grsecurity is not precompiled by some distribution
68427+ 2) You have also enabled GRKERNSEC_DMESG
68428+ 3) You are using the RBAC system and hiding other files such as your
68429+ kernel image and System.map. Alternatively, enabling this option
68430+ causes the permissions on /boot, /lib/modules, and the kernel
68431+ source directory to change at compile time to prevent
68432+ reading by non-root users.
68433+ If the above conditions are met, this option will aid in providing a
68434+ useful protection against local kernel exploitation of overflows
68435+ and arbitrary read/write vulnerabilities.
68436+
68437+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68438+ in addition to this feature.
68439+
68440+config GRKERNSEC_RANDSTRUCT
68441+ bool "Randomize layout of sensitive kernel structures"
68442+ default y if GRKERNSEC_CONFIG_AUTO
68443+ select GRKERNSEC_HIDESYM
68444+ select MODVERSIONS if MODULES
68445+ help
68446+ If you say Y here, the layouts of a number of sensitive kernel
68447+ structures (task, fs, cred, etc) and all structures composed entirely
68448+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68449+ This can introduce the requirement of an additional infoleak
68450+ vulnerability for exploits targeting these structure types.
68451+
68452+ Enabling this feature will introduce some performance impact, slightly
68453+ increase memory usage, and prevent the use of forensic tools like
68454+ Volatility against the system (unless the kernel source tree isn't
68455+ cleaned after kernel installation).
68456+
68457+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68458+ It remains after a make clean to allow for external modules to be compiled
68459+ with the existing seed and will be removed by a make mrproper or
68460+ make distclean.
68461+
68462+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68463+ to install the supporting headers explicitly in addition to the normal
68464+ gcc package.
68465+
68466+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68467+ bool "Use cacheline-aware structure randomization"
68468+ depends on GRKERNSEC_RANDSTRUCT
68469+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68470+ help
68471+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68472+ at restricting randomization to cacheline-sized groups of elements. It
68473+ will further not randomize bitfields in structures. This reduces the
68474+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68475+
68476+config GRKERNSEC_KERN_LOCKOUT
68477+ bool "Active kernel exploit response"
68478+ default y if GRKERNSEC_CONFIG_AUTO
68479+ depends on X86 || ARM || PPC || SPARC
68480+ help
68481+ If you say Y here, when a PaX alert is triggered due to suspicious
68482+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68483+ or an OOPS occurs due to bad memory accesses, instead of just
68484+ terminating the offending process (and potentially allowing
68485+ a subsequent exploit from the same user), we will take one of two
68486+ actions:
68487+ If the user was root, we will panic the system
68488+ If the user was non-root, we will log the attempt, terminate
68489+ all processes owned by the user, then prevent them from creating
68490+ any new processes until the system is restarted
68491+ This deters repeated kernel exploitation/bruteforcing attempts
68492+ and is useful for later forensics.
68493+
68494+config GRKERNSEC_OLD_ARM_USERLAND
68495+ bool "Old ARM userland compatibility"
68496+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68497+ help
68498+ If you say Y here, stubs of executable code to perform such operations
68499+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68500+ table. This is unfortunately needed for old ARM userland meant to run
68501+ across a wide range of processors. Without this option enabled,
68502+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68503+ which is enough for Linaro userlands or other userlands designed for v6
68504+ and newer ARM CPUs. It's recommended that you try without this option enabled
68505+ first, and only enable it if your userland does not boot (it will likely fail
68506+ at init time).
68507+
68508+endmenu
68509+menu "Role Based Access Control Options"
68510+depends on GRKERNSEC
68511+
68512+config GRKERNSEC_RBAC_DEBUG
68513+ bool
68514+
68515+config GRKERNSEC_NO_RBAC
68516+ bool "Disable RBAC system"
68517+ help
68518+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68519+ preventing the RBAC system from being enabled. You should only say Y
68520+ here if you have no intention of using the RBAC system, so as to prevent
68521+ an attacker with root access from misusing the RBAC system to hide files
68522+ and processes when loadable module support and /dev/[k]mem have been
68523+ locked down.
68524+
68525+config GRKERNSEC_ACL_HIDEKERN
68526+ bool "Hide kernel processes"
68527+ help
68528+ If you say Y here, all kernel threads will be hidden to all
68529+ processes but those whose subject has the "view hidden processes"
68530+ flag.
68531+
68532+config GRKERNSEC_ACL_MAXTRIES
68533+ int "Maximum tries before password lockout"
68534+ default 3
68535+ help
68536+ This option enforces the maximum number of times a user can attempt
68537+ to authorize themselves with the grsecurity RBAC system before being
68538+ denied the ability to attempt authorization again for a specified time.
68539+ The lower the number, the harder it will be to brute-force a password.
68540+
68541+config GRKERNSEC_ACL_TIMEOUT
68542+ int "Time to wait after max password tries, in seconds"
68543+ default 30
68544+ help
68545+ This option specifies the time the user must wait after attempting to
68546+ authorize to the RBAC system with the maximum number of invalid
68547+ passwords. The higher the number, the harder it will be to brute-force
68548+ a password.
68549+
68550+endmenu
68551+menu "Filesystem Protections"
68552+depends on GRKERNSEC
68553+
68554+config GRKERNSEC_PROC
68555+ bool "Proc restrictions"
68556+ default y if GRKERNSEC_CONFIG_AUTO
68557+ help
68558+ If you say Y here, the permissions of the /proc filesystem
68559+ will be altered to enhance system security and privacy. You MUST
68560+ choose either a user only restriction or a user and group restriction.
68561+ Depending upon the option you choose, you can either restrict users to
68562+ see only the processes they themselves run, or choose a group that can
68563+ view all processes and files normally restricted to root if you choose
68564+ the "restrict to user only" option. NOTE: If you're running identd or
68565+ ntpd as a non-root user, you will have to run it as the group you
68566+ specify here.
68567+
68568+config GRKERNSEC_PROC_USER
68569+ bool "Restrict /proc to user only"
68570+ depends on GRKERNSEC_PROC
68571+ help
68572+ If you say Y here, non-root users will only be able to view their own
68573+ processes, and restricts them from viewing network-related information,
68574+ and viewing kernel symbol and module information.
68575+
68576+config GRKERNSEC_PROC_USERGROUP
68577+ bool "Allow special group"
68578+ default y if GRKERNSEC_CONFIG_AUTO
68579+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68580+ help
68581+ If you say Y here, you will be able to select a group that will be
68582+ able to view all processes and network-related information. If you've
68583+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68584+ remain hidden. This option is useful if you want to run identd as
68585+ a non-root user. The group you select may also be chosen at boot time
68586+ via "grsec_proc_gid=" on the kernel commandline.
68587+
68588+config GRKERNSEC_PROC_GID
68589+ int "GID for special group"
68590+ depends on GRKERNSEC_PROC_USERGROUP
68591+ default 1001
68592+
68593+config GRKERNSEC_PROC_ADD
68594+ bool "Additional restrictions"
68595+ default y if GRKERNSEC_CONFIG_AUTO
68596+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68597+ help
68598+ If you say Y here, additional restrictions will be placed on
68599+ /proc that keep normal users from viewing device information and
68600+ slabinfo information that could be useful for exploits.
68601+
68602+config GRKERNSEC_LINK
68603+ bool "Linking restrictions"
68604+ default y if GRKERNSEC_CONFIG_AUTO
68605+ help
68606+ If you say Y here, /tmp race exploits will be prevented, since users
68607+ will no longer be able to follow symlinks owned by other users in
68608+ world-writable +t directories (e.g. /tmp), unless the owner of the
68609+ symlink is the owner of the directory. users will also not be
68610+ able to hardlink to files they do not own. If the sysctl option is
68611+ enabled, a sysctl option with name "linking_restrictions" is created.
68612+
68613+config GRKERNSEC_SYMLINKOWN
68614+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68615+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68616+ help
68617+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68618+ that prevents it from being used as a security feature. As Apache
68619+ verifies the symlink by performing a stat() against the target of
68620+ the symlink before it is followed, an attacker can setup a symlink
68621+ to point to a same-owned file, then replace the symlink with one
68622+ that targets another user's file just after Apache "validates" the
68623+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68624+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68625+ will be in place for the group you specify. If the sysctl option
68626+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68627+ created.
68628+
68629+config GRKERNSEC_SYMLINKOWN_GID
68630+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68631+ depends on GRKERNSEC_SYMLINKOWN
68632+ default 1006
68633+ help
68634+ Setting this GID determines what group kernel-enforced
68635+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68636+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68637+
68638+config GRKERNSEC_FIFO
68639+ bool "FIFO restrictions"
68640+ default y if GRKERNSEC_CONFIG_AUTO
68641+ help
68642+ If you say Y here, users will not be able to write to FIFOs they don't
68643+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68644+ the FIFO is the same owner of the directory it's held in. If the sysctl
68645+ option is enabled, a sysctl option with name "fifo_restrictions" is
68646+ created.
68647+
68648+config GRKERNSEC_SYSFS_RESTRICT
68649+ bool "Sysfs/debugfs restriction"
68650+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68651+ depends on SYSFS
68652+ help
68653+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68654+ any filesystem normally mounted under it (e.g. debugfs) will be
68655+ mostly accessible only by root. These filesystems generally provide access
68656+ to hardware and debug information that isn't appropriate for unprivileged
68657+ users of the system. Sysfs and debugfs have also become a large source
68658+ of new vulnerabilities, ranging from infoleaks to local compromise.
68659+ There has been very little oversight with an eye toward security involved
68660+ in adding new exporters of information to these filesystems, so their
68661+ use is discouraged.
68662+ For reasons of compatibility, a few directories have been whitelisted
68663+ for access by non-root users:
68664+ /sys/fs/selinux
68665+ /sys/fs/fuse
68666+ /sys/devices/system/cpu
68667+
68668+config GRKERNSEC_ROFS
68669+ bool "Runtime read-only mount protection"
68670+ depends on SYSCTL
68671+ help
68672+ If you say Y here, a sysctl option with name "romount_protect" will
68673+ be created. By setting this option to 1 at runtime, filesystems
68674+ will be protected in the following ways:
68675+ * No new writable mounts will be allowed
68676+ * Existing read-only mounts won't be able to be remounted read/write
68677+ * Write operations will be denied on all block devices
68678+ This option acts independently of grsec_lock: once it is set to 1,
68679+ it cannot be turned off. Therefore, please be mindful of the resulting
68680+ behavior if this option is enabled in an init script on a read-only
68681+ filesystem.
68682+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68683+ and GRKERNSEC_IO should be enabled and module loading disabled via
68684+ config or at runtime.
68685+ This feature is mainly intended for secure embedded systems.
68686+
68687+
68688+config GRKERNSEC_DEVICE_SIDECHANNEL
68689+ bool "Eliminate stat/notify-based device sidechannels"
68690+ default y if GRKERNSEC_CONFIG_AUTO
68691+ help
68692+ If you say Y here, timing analyses on block or character
68693+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68694+ will be thwarted for unprivileged users. If a process without
68695+ CAP_MKNOD stats such a device, the last access and last modify times
68696+ will match the device's create time. No access or modify events
68697+ will be triggered through inotify/dnotify/fanotify for such devices.
68698+ This feature will prevent attacks that may at a minimum
68699+ allow an attacker to determine the administrator's password length.
68700+
68701+config GRKERNSEC_CHROOT
68702+ bool "Chroot jail restrictions"
68703+ default y if GRKERNSEC_CONFIG_AUTO
68704+ help
68705+ If you say Y here, you will be able to choose several options that will
68706+ make breaking out of a chrooted jail much more difficult. If you
68707+ encounter no software incompatibilities with the following options, it
68708+ is recommended that you enable each one.
68709+
68710+ Note that the chroot restrictions are not intended to apply to "chroots"
68711+ to directories that are simple bind mounts of the global root filesystem.
68712+ For several other reasons, a user shouldn't expect any significant
68713+ security by performing such a chroot.
68714+
68715+config GRKERNSEC_CHROOT_MOUNT
68716+ bool "Deny mounts"
68717+ default y if GRKERNSEC_CONFIG_AUTO
68718+ depends on GRKERNSEC_CHROOT
68719+ help
68720+ If you say Y here, processes inside a chroot will not be able to
68721+ mount or remount filesystems. If the sysctl option is enabled, a
68722+ sysctl option with name "chroot_deny_mount" is created.
68723+
68724+config GRKERNSEC_CHROOT_DOUBLE
68725+ bool "Deny double-chroots"
68726+ default y if GRKERNSEC_CONFIG_AUTO
68727+ depends on GRKERNSEC_CHROOT
68728+ help
68729+ If you say Y here, processes inside a chroot will not be able to chroot
68730+ again outside the chroot. This is a widely used method of breaking
68731+ out of a chroot jail and should not be allowed. If the sysctl
68732+ option is enabled, a sysctl option with name
68733+ "chroot_deny_chroot" is created.
68734+
68735+config GRKERNSEC_CHROOT_PIVOT
68736+ bool "Deny pivot_root in chroot"
68737+ default y if GRKERNSEC_CONFIG_AUTO
68738+ depends on GRKERNSEC_CHROOT
68739+ help
68740+ If you say Y here, processes inside a chroot will not be able to use
68741+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68742+ works similar to chroot in that it changes the root filesystem. This
68743+ function could be misused in a chrooted process to attempt to break out
68744+ of the chroot, and therefore should not be allowed. If the sysctl
68745+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68746+ created.
68747+
68748+config GRKERNSEC_CHROOT_CHDIR
68749+ bool "Enforce chdir(\"/\") on all chroots"
68750+ default y if GRKERNSEC_CONFIG_AUTO
68751+ depends on GRKERNSEC_CHROOT
68752+ help
68753+ If you say Y here, the current working directory of all newly-chrooted
68754+ applications will be set to the the root directory of the chroot.
68755+ The man page on chroot(2) states:
68756+ Note that this call does not change the current working
68757+ directory, so that `.' can be outside the tree rooted at
68758+ `/'. In particular, the super-user can escape from a
68759+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68760+
68761+ It is recommended that you say Y here, since it's not known to break
68762+ any software. If the sysctl option is enabled, a sysctl option with
68763+ name "chroot_enforce_chdir" is created.
68764+
68765+config GRKERNSEC_CHROOT_CHMOD
68766+ bool "Deny (f)chmod +s"
68767+ default y if GRKERNSEC_CONFIG_AUTO
68768+ depends on GRKERNSEC_CHROOT
68769+ help
68770+ If you say Y here, processes inside a chroot will not be able to chmod
68771+ or fchmod files to make them have suid or sgid bits. This protects
68772+ against another published method of breaking a chroot. If the sysctl
68773+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68774+ created.
68775+
68776+config GRKERNSEC_CHROOT_FCHDIR
68777+ bool "Deny fchdir and fhandle out of chroot"
68778+ default y if GRKERNSEC_CONFIG_AUTO
68779+ depends on GRKERNSEC_CHROOT
68780+ help
68781+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68782+ to a file descriptor of the chrooting process that points to a directory
68783+ outside the filesystem will be stopped. Additionally, this option prevents
68784+ use of the recently-created syscall for opening files by a guessable "file
68785+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68786+ with name "chroot_deny_fchdir" is created.
68787+
68788+config GRKERNSEC_CHROOT_MKNOD
68789+ bool "Deny mknod"
68790+ default y if GRKERNSEC_CONFIG_AUTO
68791+ depends on GRKERNSEC_CHROOT
68792+ help
68793+ If you say Y here, processes inside a chroot will not be allowed to
68794+ mknod. The problem with using mknod inside a chroot is that it
68795+ would allow an attacker to create a device entry that is the same
68796+ as one on the physical root of your system, which could range from
68797+ anything from the console device to a device for your harddrive (which
68798+ they could then use to wipe the drive or steal data). It is recommended
68799+ that you say Y here, unless you run into software incompatibilities.
68800+ If the sysctl option is enabled, a sysctl option with name
68801+ "chroot_deny_mknod" is created.
68802+
68803+config GRKERNSEC_CHROOT_SHMAT
68804+ bool "Deny shmat() out of chroot"
68805+ default y if GRKERNSEC_CONFIG_AUTO
68806+ depends on GRKERNSEC_CHROOT
68807+ help
68808+ If you say Y here, processes inside a chroot will not be able to attach
68809+ to shared memory segments that were created outside of the chroot jail.
68810+ It is recommended that you say Y here. If the sysctl option is enabled,
68811+ a sysctl option with name "chroot_deny_shmat" is created.
68812+
68813+config GRKERNSEC_CHROOT_UNIX
68814+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68815+ default y if GRKERNSEC_CONFIG_AUTO
68816+ depends on GRKERNSEC_CHROOT
68817+ help
68818+ If you say Y here, processes inside a chroot will not be able to
68819+ connect to abstract (meaning not belonging to a filesystem) Unix
68820+ domain sockets that were bound outside of a chroot. It is recommended
68821+ that you say Y here. If the sysctl option is enabled, a sysctl option
68822+ with name "chroot_deny_unix" is created.
68823+
68824+config GRKERNSEC_CHROOT_FINDTASK
68825+ bool "Protect outside processes"
68826+ default y if GRKERNSEC_CONFIG_AUTO
68827+ depends on GRKERNSEC_CHROOT
68828+ help
68829+ If you say Y here, processes inside a chroot will not be able to
68830+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68831+ getsid, or view any process outside of the chroot. If the sysctl
68832+ option is enabled, a sysctl option with name "chroot_findtask" is
68833+ created.
68834+
68835+config GRKERNSEC_CHROOT_NICE
68836+ bool "Restrict priority changes"
68837+ default y if GRKERNSEC_CONFIG_AUTO
68838+ depends on GRKERNSEC_CHROOT
68839+ help
68840+ If you say Y here, processes inside a chroot will not be able to raise
68841+ the priority of processes in the chroot, or alter the priority of
68842+ processes outside the chroot. This provides more security than simply
68843+ removing CAP_SYS_NICE from the process' capability set. If the
68844+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68845+ is created.
68846+
68847+config GRKERNSEC_CHROOT_SYSCTL
68848+ bool "Deny sysctl writes"
68849+ default y if GRKERNSEC_CONFIG_AUTO
68850+ depends on GRKERNSEC_CHROOT
68851+ help
68852+ If you say Y here, an attacker in a chroot will not be able to
68853+ write to sysctl entries, either by sysctl(2) or through a /proc
68854+ interface. It is strongly recommended that you say Y here. If the
68855+ sysctl option is enabled, a sysctl option with name
68856+ "chroot_deny_sysctl" is created.
68857+
68858+config GRKERNSEC_CHROOT_RENAME
68859+ bool "Deny bad renames"
68860+ default y if GRKERNSEC_CONFIG_AUTO
68861+ depends on GRKERNSEC_CHROOT
68862+ help
68863+ If you say Y here, an attacker in a chroot will not be able to
68864+ abuse the ability to create double chroots to break out of the
68865+ chroot by exploiting a race condition between a rename of a directory
68866+ within a chroot against an open of a symlink with relative path
68867+ components. This feature will likewise prevent an accomplice outside
68868+ a chroot from enabling a user inside the chroot to break out and make
68869+ use of their credentials on the global filesystem. Enabling this
68870+ feature is essential to prevent root users from breaking out of a
68871+ chroot. If the sysctl option is enabled, a sysctl option with name
68872+ "chroot_deny_bad_rename" is created.
68873+
68874+config GRKERNSEC_CHROOT_CAPS
68875+ bool "Capability restrictions"
68876+ default y if GRKERNSEC_CONFIG_AUTO
68877+ depends on GRKERNSEC_CHROOT
68878+ help
68879+ If you say Y here, the capabilities on all processes within a
68880+ chroot jail will be lowered to stop module insertion, raw i/o,
68881+ system and net admin tasks, rebooting the system, modifying immutable
68882+ files, modifying IPC owned by another, and changing the system time.
68883+ This is left an option because it can break some apps. Disable this
68884+ if your chrooted apps are having problems performing those kinds of
68885+ tasks. If the sysctl option is enabled, a sysctl option with
68886+ name "chroot_caps" is created.
68887+
68888+config GRKERNSEC_CHROOT_INITRD
68889+ bool "Exempt initrd tasks from restrictions"
68890+ default y if GRKERNSEC_CONFIG_AUTO
68891+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68892+ help
68893+ If you say Y here, tasks started prior to init will be exempted from
68894+ grsecurity's chroot restrictions. This option is mainly meant to
68895+ resolve Plymouth's performing privileged operations unnecessarily
68896+ in a chroot.
68897+
68898+endmenu
68899+menu "Kernel Auditing"
68900+depends on GRKERNSEC
68901+
68902+config GRKERNSEC_AUDIT_GROUP
68903+ bool "Single group for auditing"
68904+ help
68905+ If you say Y here, the exec and chdir logging features will only operate
68906+ on a group you specify. This option is recommended if you only want to
68907+ watch certain users instead of having a large amount of logs from the
68908+ entire system. If the sysctl option is enabled, a sysctl option with
68909+ name "audit_group" is created.
68910+
68911+config GRKERNSEC_AUDIT_GID
68912+ int "GID for auditing"
68913+ depends on GRKERNSEC_AUDIT_GROUP
68914+ default 1007
68915+
68916+config GRKERNSEC_EXECLOG
68917+ bool "Exec logging"
68918+ help
68919+ If you say Y here, all execve() calls will be logged (since the
68920+ other exec*() calls are frontends to execve(), all execution
68921+ will be logged). Useful for shell-servers that like to keep track
68922+ of their users. If the sysctl option is enabled, a sysctl option with
68923+ name "exec_logging" is created.
68924+ WARNING: This option when enabled will produce a LOT of logs, especially
68925+ on an active system.
68926+
68927+config GRKERNSEC_RESLOG
68928+ bool "Resource logging"
68929+ default y if GRKERNSEC_CONFIG_AUTO
68930+ help
68931+ If you say Y here, all attempts to overstep resource limits will
68932+ be logged with the resource name, the requested size, and the current
68933+ limit. It is highly recommended that you say Y here. If the sysctl
68934+ option is enabled, a sysctl option with name "resource_logging" is
68935+ created. If the RBAC system is enabled, the sysctl value is ignored.
68936+
68937+config GRKERNSEC_CHROOT_EXECLOG
68938+ bool "Log execs within chroot"
68939+ help
68940+ If you say Y here, all executions inside a chroot jail will be logged
68941+ to syslog. This can cause a large amount of logs if certain
68942+ applications (eg. djb's daemontools) are installed on the system, and
68943+ is therefore left as an option. If the sysctl option is enabled, a
68944+ sysctl option with name "chroot_execlog" is created.
68945+
68946+config GRKERNSEC_AUDIT_PTRACE
68947+ bool "Ptrace logging"
68948+ help
68949+ If you say Y here, all attempts to attach to a process via ptrace
68950+ will be logged. If the sysctl option is enabled, a sysctl option
68951+ with name "audit_ptrace" is created.
68952+
68953+config GRKERNSEC_AUDIT_CHDIR
68954+ bool "Chdir logging"
68955+ help
68956+ If you say Y here, all chdir() calls will be logged. If the sysctl
68957+ option is enabled, a sysctl option with name "audit_chdir" is created.
68958+
68959+config GRKERNSEC_AUDIT_MOUNT
68960+ bool "(Un)Mount logging"
68961+ help
68962+ If you say Y here, all mounts and unmounts will be logged. If the
68963+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68964+ created.
68965+
68966+config GRKERNSEC_SIGNAL
68967+ bool "Signal logging"
68968+ default y if GRKERNSEC_CONFIG_AUTO
68969+ help
68970+ If you say Y here, certain important signals will be logged, such as
68971+ SIGSEGV, which will as a result inform you of when a error in a program
68972+ occurred, which in some cases could mean a possible exploit attempt.
68973+ If the sysctl option is enabled, a sysctl option with name
68974+ "signal_logging" is created.
68975+
68976+config GRKERNSEC_FORKFAIL
68977+ bool "Fork failure logging"
68978+ help
68979+ If you say Y here, all failed fork() attempts will be logged.
68980+ This could suggest a fork bomb, or someone attempting to overstep
68981+ their process limit. If the sysctl option is enabled, a sysctl option
68982+ with name "forkfail_logging" is created.
68983+
68984+config GRKERNSEC_TIME
68985+ bool "Time change logging"
68986+ default y if GRKERNSEC_CONFIG_AUTO
68987+ help
68988+ If you say Y here, any changes of the system clock will be logged.
68989+ If the sysctl option is enabled, a sysctl option with name
68990+ "timechange_logging" is created.
68991+
68992+config GRKERNSEC_PROC_IPADDR
68993+ bool "/proc/<pid>/ipaddr support"
68994+ default y if GRKERNSEC_CONFIG_AUTO
68995+ help
68996+ If you say Y here, a new entry will be added to each /proc/<pid>
68997+ directory that contains the IP address of the person using the task.
68998+ The IP is carried across local TCP and AF_UNIX stream sockets.
68999+ This information can be useful for IDS/IPSes to perform remote response
69000+ to a local attack. The entry is readable by only the owner of the
69001+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69002+ the RBAC system), and thus does not create privacy concerns.
69003+
69004+config GRKERNSEC_RWXMAP_LOG
69005+ bool 'Denied RWX mmap/mprotect logging'
69006+ default y if GRKERNSEC_CONFIG_AUTO
69007+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69008+ help
69009+ If you say Y here, calls to mmap() and mprotect() with explicit
69010+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69011+ denied by the PAX_MPROTECT feature. This feature will also
69012+ log other problematic scenarios that can occur when PAX_MPROTECT
69013+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69014+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69015+ is created.
69016+
69017+endmenu
69018+
69019+menu "Executable Protections"
69020+depends on GRKERNSEC
69021+
69022+config GRKERNSEC_DMESG
69023+ bool "Dmesg(8) restriction"
69024+ default y if GRKERNSEC_CONFIG_AUTO
69025+ help
69026+ If you say Y here, non-root users will not be able to use dmesg(8)
69027+ to view the contents of the kernel's circular log buffer.
69028+ The kernel's log buffer often contains kernel addresses and other
69029+ identifying information useful to an attacker in fingerprinting a
69030+ system for a targeted exploit.
69031+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69032+ created.
69033+
69034+config GRKERNSEC_HARDEN_PTRACE
69035+ bool "Deter ptrace-based process snooping"
69036+ default y if GRKERNSEC_CONFIG_AUTO
69037+ help
69038+ If you say Y here, TTY sniffers and other malicious monitoring
69039+ programs implemented through ptrace will be defeated. If you
69040+ have been using the RBAC system, this option has already been
69041+ enabled for several years for all users, with the ability to make
69042+ fine-grained exceptions.
69043+
69044+ This option only affects the ability of non-root users to ptrace
69045+ processes that are not a descendent of the ptracing process.
69046+ This means that strace ./binary and gdb ./binary will still work,
69047+ but attaching to arbitrary processes will not. If the sysctl
69048+ option is enabled, a sysctl option with name "harden_ptrace" is
69049+ created.
69050+
69051+config GRKERNSEC_PTRACE_READEXEC
69052+ bool "Require read access to ptrace sensitive binaries"
69053+ default y if GRKERNSEC_CONFIG_AUTO
69054+ help
69055+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69056+ binaries. This option is useful in environments that
69057+ remove the read bits (e.g. file mode 4711) from suid binaries to
69058+ prevent infoleaking of their contents. This option adds
69059+ consistency to the use of that file mode, as the binary could normally
69060+ be read out when run without privileges while ptracing.
69061+
69062+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69063+ is created.
69064+
69065+config GRKERNSEC_SETXID
69066+ bool "Enforce consistent multithreaded privileges"
69067+ default y if GRKERNSEC_CONFIG_AUTO
69068+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69069+ help
69070+ If you say Y here, a change from a root uid to a non-root uid
69071+ in a multithreaded application will cause the resulting uids,
69072+ gids, supplementary groups, and capabilities in that thread
69073+ to be propagated to the other threads of the process. In most
69074+ cases this is unnecessary, as glibc will emulate this behavior
69075+ on behalf of the application. Other libcs do not act in the
69076+ same way, allowing the other threads of the process to continue
69077+ running with root privileges. If the sysctl option is enabled,
69078+ a sysctl option with name "consistent_setxid" is created.
69079+
69080+config GRKERNSEC_HARDEN_IPC
69081+ bool "Disallow access to overly-permissive IPC objects"
69082+ default y if GRKERNSEC_CONFIG_AUTO
69083+ depends on SYSVIPC
69084+ help
69085+ If you say Y here, access to overly-permissive IPC objects (shared
69086+ memory, message queues, and semaphores) will be denied for processes
69087+ given the following criteria beyond normal permission checks:
69088+ 1) If the IPC object is world-accessible and the euid doesn't match
69089+ that of the creator or current uid for the IPC object
69090+ 2) If the IPC object is group-accessible and the egid doesn't
69091+ match that of the creator or current gid for the IPC object
69092+ It's a common error to grant too much permission to these objects,
69093+ with impact ranging from denial of service and information leaking to
69094+ privilege escalation. This feature was developed in response to
69095+ research by Tim Brown:
69096+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69097+ who found hundreds of such insecure usages. Processes with
69098+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69099+ If the sysctl option is enabled, a sysctl option with name
69100+ "harden_ipc" is created.
69101+
69102+config GRKERNSEC_TPE
69103+ bool "Trusted Path Execution (TPE)"
69104+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69105+ help
69106+ If you say Y here, you will be able to choose a gid to add to the
69107+ supplementary groups of users you want to mark as "untrusted."
69108+ These users will not be able to execute any files that are not in
69109+ root-owned directories writable only by root. If the sysctl option
69110+ is enabled, a sysctl option with name "tpe" is created.
69111+
69112+config GRKERNSEC_TPE_ALL
69113+ bool "Partially restrict all non-root users"
69114+ depends on GRKERNSEC_TPE
69115+ help
69116+ If you say Y here, all non-root users will be covered under
69117+ a weaker TPE restriction. This is separate from, and in addition to,
69118+ the main TPE options that you have selected elsewhere. Thus, if a
69119+ "trusted" GID is chosen, this restriction applies to even that GID.
69120+ Under this restriction, all non-root users will only be allowed to
69121+ execute files in directories they own that are not group or
69122+ world-writable, or in directories owned by root and writable only by
69123+ root. If the sysctl option is enabled, a sysctl option with name
69124+ "tpe_restrict_all" is created.
69125+
69126+config GRKERNSEC_TPE_INVERT
69127+ bool "Invert GID option"
69128+ depends on GRKERNSEC_TPE
69129+ help
69130+ If you say Y here, the group you specify in the TPE configuration will
69131+ decide what group TPE restrictions will be *disabled* for. This
69132+ option is useful if you want TPE restrictions to be applied to most
69133+ users on the system. If the sysctl option is enabled, a sysctl option
69134+ with name "tpe_invert" is created. Unlike other sysctl options, this
69135+ entry will default to on for backward-compatibility.
69136+
69137+config GRKERNSEC_TPE_GID
69138+ int
69139+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69140+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69141+
69142+config GRKERNSEC_TPE_UNTRUSTED_GID
69143+ int "GID for TPE-untrusted users"
69144+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69145+ default 1005
69146+ help
69147+ Setting this GID determines what group TPE restrictions will be
69148+ *enabled* for. If the sysctl option is enabled, a sysctl option
69149+ with name "tpe_gid" is created.
69150+
69151+config GRKERNSEC_TPE_TRUSTED_GID
69152+ int "GID for TPE-trusted users"
69153+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69154+ default 1005
69155+ help
69156+ Setting this GID determines what group TPE restrictions will be
69157+ *disabled* for. If the sysctl option is enabled, a sysctl option
69158+ with name "tpe_gid" is created.
69159+
69160+endmenu
69161+menu "Network Protections"
69162+depends on GRKERNSEC
69163+
69164+config GRKERNSEC_BLACKHOLE
69165+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69166+ default y if GRKERNSEC_CONFIG_AUTO
69167+ depends on NET
69168+ help
69169+ If you say Y here, neither TCP resets nor ICMP
69170+ destination-unreachable packets will be sent in response to packets
69171+ sent to ports for which no associated listening process exists.
69172+ It will also prevent the sending of ICMP protocol unreachable packets
69173+ in response to packets with unknown protocols.
69174+ This feature supports both IPV4 and IPV6 and exempts the
69175+ loopback interface from blackholing. Enabling this feature
69176+ makes a host more resilient to DoS attacks and reduces network
69177+ visibility against scanners.
69178+
69179+ The blackhole feature as-implemented is equivalent to the FreeBSD
69180+ blackhole feature, as it prevents RST responses to all packets, not
69181+ just SYNs. Under most application behavior this causes no
69182+ problems, but applications (like haproxy) may not close certain
69183+ connections in a way that cleanly terminates them on the remote
69184+ end, leaving the remote host in LAST_ACK state. Because of this
69185+ side-effect and to prevent intentional LAST_ACK DoSes, this
69186+ feature also adds automatic mitigation against such attacks.
69187+ The mitigation drastically reduces the amount of time a socket
69188+ can spend in LAST_ACK state. If you're using haproxy and not
69189+ all servers it connects to have this option enabled, consider
69190+ disabling this feature on the haproxy host.
69191+
69192+ If the sysctl option is enabled, two sysctl options with names
69193+ "ip_blackhole" and "lastack_retries" will be created.
69194+ While "ip_blackhole" takes the standard zero/non-zero on/off
69195+ toggle, "lastack_retries" uses the same kinds of values as
69196+ "tcp_retries1" and "tcp_retries2". The default value of 4
69197+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69198+ state.
69199+
69200+config GRKERNSEC_NO_SIMULT_CONNECT
69201+ bool "Disable TCP Simultaneous Connect"
69202+ default y if GRKERNSEC_CONFIG_AUTO
69203+ depends on NET
69204+ help
69205+ If you say Y here, a feature by Willy Tarreau will be enabled that
69206+ removes a weakness in Linux's strict implementation of TCP that
69207+ allows two clients to connect to each other without either entering
69208+ a listening state. The weakness allows an attacker to easily prevent
69209+ a client from connecting to a known server provided the source port
69210+ for the connection is guessed correctly.
69211+
69212+ As the weakness could be used to prevent an antivirus or IPS from
69213+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69214+ it should be eliminated by enabling this option. Though Linux is
69215+ one of few operating systems supporting simultaneous connect, it
69216+ has no legitimate use in practice and is rarely supported by firewalls.
69217+
69218+config GRKERNSEC_SOCKET
69219+ bool "Socket restrictions"
69220+ depends on NET
69221+ help
69222+ If you say Y here, you will be able to choose from several options.
69223+ If you assign a GID on your system and add it to the supplementary
69224+ groups of users you want to restrict socket access to, this patch
69225+ will perform up to three things, based on the option(s) you choose.
69226+
69227+config GRKERNSEC_SOCKET_ALL
69228+ bool "Deny any sockets to group"
69229+ depends on GRKERNSEC_SOCKET
69230+ help
69231+ If you say Y here, you will be able to choose a GID of whose users will
69232+ be unable to connect to other hosts from your machine or run server
69233+ applications from your machine. If the sysctl option is enabled, a
69234+ sysctl option with name "socket_all" is created.
69235+
69236+config GRKERNSEC_SOCKET_ALL_GID
69237+ int "GID to deny all sockets for"
69238+ depends on GRKERNSEC_SOCKET_ALL
69239+ default 1004
69240+ help
69241+ Here you can choose the GID to disable socket access for. Remember to
69242+ add the users you want socket access disabled for to the GID
69243+ specified here. If the sysctl option is enabled, a sysctl option
69244+ with name "socket_all_gid" is created.
69245+
69246+config GRKERNSEC_SOCKET_CLIENT
69247+ bool "Deny client sockets to group"
69248+ depends on GRKERNSEC_SOCKET
69249+ help
69250+ If you say Y here, you will be able to choose a GID of whose users will
69251+ be unable to connect to other hosts from your machine, but will be
69252+ able to run servers. If this option is enabled, all users in the group
69253+ you specify will have to use passive mode when initiating ftp transfers
69254+ from the shell on your machine. If the sysctl option is enabled, a
69255+ sysctl option with name "socket_client" is created.
69256+
69257+config GRKERNSEC_SOCKET_CLIENT_GID
69258+ int "GID to deny client sockets for"
69259+ depends on GRKERNSEC_SOCKET_CLIENT
69260+ default 1003
69261+ help
69262+ Here you can choose the GID to disable client socket access for.
69263+ Remember to add the users you want client socket access disabled for to
69264+ the GID specified here. If the sysctl option is enabled, a sysctl
69265+ option with name "socket_client_gid" is created.
69266+
69267+config GRKERNSEC_SOCKET_SERVER
69268+ bool "Deny server sockets to group"
69269+ depends on GRKERNSEC_SOCKET
69270+ help
69271+ If you say Y here, you will be able to choose a GID of whose users will
69272+ be unable to run server applications from your machine. If the sysctl
69273+ option is enabled, a sysctl option with name "socket_server" is created.
69274+
69275+config GRKERNSEC_SOCKET_SERVER_GID
69276+ int "GID to deny server sockets for"
69277+ depends on GRKERNSEC_SOCKET_SERVER
69278+ default 1002
69279+ help
69280+ Here you can choose the GID to disable server socket access for.
69281+ Remember to add the users you want server socket access disabled for to
69282+ the GID specified here. If the sysctl option is enabled, a sysctl
69283+ option with name "socket_server_gid" is created.
69284+
69285+endmenu
69286+
69287+menu "Physical Protections"
69288+depends on GRKERNSEC
69289+
69290+config GRKERNSEC_DENYUSB
69291+ bool "Deny new USB connections after toggle"
69292+ default y if GRKERNSEC_CONFIG_AUTO
69293+ depends on SYSCTL && USB_SUPPORT
69294+ help
69295+ If you say Y here, a new sysctl option with name "deny_new_usb"
69296+ will be created. Setting its value to 1 will prevent any new
69297+ USB devices from being recognized by the OS. Any attempted USB
69298+ device insertion will be logged. This option is intended to be
69299+ used against custom USB devices designed to exploit vulnerabilities
69300+ in various USB device drivers.
69301+
69302+ For greatest effectiveness, this sysctl should be set after any
69303+ relevant init scripts. This option is safe to enable in distros
69304+ as each user can choose whether or not to toggle the sysctl.
69305+
69306+config GRKERNSEC_DENYUSB_FORCE
69307+ bool "Reject all USB devices not connected at boot"
69308+ select USB
69309+ depends on GRKERNSEC_DENYUSB
69310+ help
69311+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69312+ that doesn't involve a sysctl entry. This option should only be
69313+ enabled if you're sure you want to deny all new USB connections
69314+ at runtime and don't want to modify init scripts. This should not
69315+ be enabled by distros. It forces the core USB code to be built
69316+ into the kernel image so that all devices connected at boot time
69317+ can be recognized and new USB device connections can be prevented
69318+ prior to init running.
69319+
69320+endmenu
69321+
69322+menu "Sysctl Support"
69323+depends on GRKERNSEC && SYSCTL
69324+
69325+config GRKERNSEC_SYSCTL
69326+ bool "Sysctl support"
69327+ default y if GRKERNSEC_CONFIG_AUTO
69328+ help
69329+ If you say Y here, you will be able to change the options that
69330+ grsecurity runs with at bootup, without having to recompile your
69331+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69332+ to enable (1) or disable (0) various features. All the sysctl entries
69333+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69334+ All features enabled in the kernel configuration are disabled at boot
69335+ if you do not say Y to the "Turn on features by default" option.
69336+ All options should be set at startup, and the grsec_lock entry should
69337+ be set to a non-zero value after all the options are set.
69338+ *THIS IS EXTREMELY IMPORTANT*
69339+
69340+config GRKERNSEC_SYSCTL_DISTRO
69341+ bool "Extra sysctl support for distro makers (READ HELP)"
69342+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69343+ help
69344+ If you say Y here, additional sysctl options will be created
69345+ for features that affect processes running as root. Therefore,
69346+ it is critical when using this option that the grsec_lock entry be
69347+ enabled after boot. Only distros with prebuilt kernel packages
69348+ with this option enabled that can ensure grsec_lock is enabled
69349+ after boot should use this option.
69350+ *Failure to set grsec_lock after boot makes all grsec features
69351+ this option covers useless*
69352+
69353+ Currently this option creates the following sysctl entries:
69354+ "Disable Privileged I/O": "disable_priv_io"
69355+
69356+config GRKERNSEC_SYSCTL_ON
69357+ bool "Turn on features by default"
69358+ default y if GRKERNSEC_CONFIG_AUTO
69359+ depends on GRKERNSEC_SYSCTL
69360+ help
69361+ If you say Y here, instead of having all features enabled in the
69362+ kernel configuration disabled at boot time, the features will be
69363+ enabled at boot time. It is recommended you say Y here unless
69364+ there is some reason you would want all sysctl-tunable features to
69365+ be disabled by default. As mentioned elsewhere, it is important
69366+ to enable the grsec_lock entry once you have finished modifying
69367+ the sysctl entries.
69368+
69369+endmenu
69370+menu "Logging Options"
69371+depends on GRKERNSEC
69372+
69373+config GRKERNSEC_FLOODTIME
69374+ int "Seconds in between log messages (minimum)"
69375+ default 10
69376+ help
69377+ This option allows you to enforce the number of seconds between
69378+ grsecurity log messages. The default should be suitable for most
69379+ people, however, if you choose to change it, choose a value small enough
69380+ to allow informative logs to be produced, but large enough to
69381+ prevent flooding.
69382+
69383+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69384+ any rate limiting on grsecurity log messages.
69385+
69386+config GRKERNSEC_FLOODBURST
69387+ int "Number of messages in a burst (maximum)"
69388+ default 6
69389+ help
69390+ This option allows you to choose the maximum number of messages allowed
69391+ within the flood time interval you chose in a separate option. The
69392+ default should be suitable for most people, however if you find that
69393+ many of your logs are being interpreted as flooding, you may want to
69394+ raise this value.
69395+
69396+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69397+ any rate limiting on grsecurity log messages.
69398+
69399+endmenu
69400diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69401new file mode 100644
69402index 0000000..30ababb
69403--- /dev/null
69404+++ b/grsecurity/Makefile
69405@@ -0,0 +1,54 @@
69406+# grsecurity – access control and security hardening for Linux
69407+# All code in this directory and various hooks located throughout the Linux kernel are
69408+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69409+# http://www.grsecurity.net spender@grsecurity.net
69410+#
69411+# This program is free software; you can redistribute it and/or
69412+# modify it under the terms of the GNU General Public License version 2
69413+# as published by the Free Software Foundation.
69414+#
69415+# This program is distributed in the hope that it will be useful,
69416+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69417+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69418+# GNU General Public License for more details.
69419+#
69420+# You should have received a copy of the GNU General Public License
69421+# along with this program; if not, write to the Free Software
69422+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69423+
69424+KBUILD_CFLAGS += -Werror
69425+
69426+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69427+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69428+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69429+ grsec_usb.o grsec_ipc.o grsec_proc.o
69430+
69431+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69432+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69433+ gracl_learn.o grsec_log.o gracl_policy.o
69434+ifdef CONFIG_COMPAT
69435+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69436+endif
69437+
69438+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69439+
69440+ifdef CONFIG_NET
69441+obj-y += grsec_sock.o
69442+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69443+endif
69444+
69445+ifndef CONFIG_GRKERNSEC
69446+obj-y += grsec_disabled.o
69447+endif
69448+
69449+ifdef CONFIG_GRKERNSEC_HIDESYM
69450+extra-y := grsec_hidesym.o
69451+$(obj)/grsec_hidesym.o:
69452+ @-chmod -f 500 /boot
69453+ @-chmod -f 500 /lib/modules
69454+ @-chmod -f 500 /lib64/modules
69455+ @-chmod -f 500 /lib32/modules
69456+ @-chmod -f 700 .
69457+ @-chmod -f 700 $(objtree)
69458+ @echo ' grsec: protected kernel image paths'
69459+endif
69460diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69461new file mode 100644
69462index 0000000..6c1e154
69463--- /dev/null
69464+++ b/grsecurity/gracl.c
69465@@ -0,0 +1,2749 @@
69466+#include <linux/kernel.h>
69467+#include <linux/module.h>
69468+#include <linux/sched.h>
69469+#include <linux/mm.h>
69470+#include <linux/file.h>
69471+#include <linux/fs.h>
69472+#include <linux/namei.h>
69473+#include <linux/mount.h>
69474+#include <linux/tty.h>
69475+#include <linux/proc_fs.h>
69476+#include <linux/lglock.h>
69477+#include <linux/slab.h>
69478+#include <linux/vmalloc.h>
69479+#include <linux/types.h>
69480+#include <linux/sysctl.h>
69481+#include <linux/netdevice.h>
69482+#include <linux/ptrace.h>
69483+#include <linux/gracl.h>
69484+#include <linux/gralloc.h>
69485+#include <linux/security.h>
69486+#include <linux/grinternal.h>
69487+#include <linux/pid_namespace.h>
69488+#include <linux/stop_machine.h>
69489+#include <linux/fdtable.h>
69490+#include <linux/percpu.h>
69491+#include <linux/lglock.h>
69492+#include <linux/hugetlb.h>
69493+#include <linux/posix-timers.h>
69494+#include <linux/prefetch.h>
69495+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69496+#include <linux/magic.h>
69497+#include <linux/pagemap.h>
69498+#include "../fs/btrfs/async-thread.h"
69499+#include "../fs/btrfs/ctree.h"
69500+#include "../fs/btrfs/btrfs_inode.h"
69501+#endif
69502+#include "../fs/mount.h"
69503+
69504+#include <asm/uaccess.h>
69505+#include <asm/errno.h>
69506+#include <asm/mman.h>
69507+
69508+#define FOR_EACH_ROLE_START(role) \
69509+ role = running_polstate.role_list; \
69510+ while (role) {
69511+
69512+#define FOR_EACH_ROLE_END(role) \
69513+ role = role->prev; \
69514+ }
69515+
69516+extern struct path gr_real_root;
69517+
69518+static struct gr_policy_state running_polstate;
69519+struct gr_policy_state *polstate = &running_polstate;
69520+extern struct gr_alloc_state *current_alloc_state;
69521+
69522+extern char *gr_shared_page[4];
69523+DEFINE_RWLOCK(gr_inode_lock);
69524+
69525+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69526+
69527+#ifdef CONFIG_NET
69528+extern struct vfsmount *sock_mnt;
69529+#endif
69530+
69531+extern struct vfsmount *pipe_mnt;
69532+extern struct vfsmount *shm_mnt;
69533+
69534+#ifdef CONFIG_HUGETLBFS
69535+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69536+#endif
69537+
69538+extern u16 acl_sp_role_value;
69539+extern struct acl_object_label *fakefs_obj_rw;
69540+extern struct acl_object_label *fakefs_obj_rwx;
69541+
69542+int gr_acl_is_enabled(void)
69543+{
69544+ return (gr_status & GR_READY);
69545+}
69546+
69547+void gr_enable_rbac_system(void)
69548+{
69549+ pax_open_kernel();
69550+ gr_status |= GR_READY;
69551+ pax_close_kernel();
69552+}
69553+
69554+int gr_rbac_disable(void *unused)
69555+{
69556+ pax_open_kernel();
69557+ gr_status &= ~GR_READY;
69558+ pax_close_kernel();
69559+
69560+ return 0;
69561+}
69562+
69563+static inline dev_t __get_dev(const struct dentry *dentry)
69564+{
69565+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69566+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69567+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69568+ else
69569+#endif
69570+ return dentry->d_sb->s_dev;
69571+}
69572+
69573+static inline u64 __get_ino(const struct dentry *dentry)
69574+{
69575+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69576+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69577+ return btrfs_ino(dentry->d_inode);
69578+ else
69579+#endif
69580+ return dentry->d_inode->i_ino;
69581+}
69582+
69583+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69584+{
69585+ return __get_dev(dentry);
69586+}
69587+
69588+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69589+{
69590+ return __get_ino(dentry);
69591+}
69592+
69593+static char gr_task_roletype_to_char(struct task_struct *task)
69594+{
69595+ switch (task->role->roletype &
69596+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69597+ GR_ROLE_SPECIAL)) {
69598+ case GR_ROLE_DEFAULT:
69599+ return 'D';
69600+ case GR_ROLE_USER:
69601+ return 'U';
69602+ case GR_ROLE_GROUP:
69603+ return 'G';
69604+ case GR_ROLE_SPECIAL:
69605+ return 'S';
69606+ }
69607+
69608+ return 'X';
69609+}
69610+
69611+char gr_roletype_to_char(void)
69612+{
69613+ return gr_task_roletype_to_char(current);
69614+}
69615+
69616+__inline__ int
69617+gr_acl_tpe_check(void)
69618+{
69619+ if (unlikely(!(gr_status & GR_READY)))
69620+ return 0;
69621+ if (current->role->roletype & GR_ROLE_TPE)
69622+ return 1;
69623+ else
69624+ return 0;
69625+}
69626+
69627+int
69628+gr_handle_rawio(const struct inode *inode)
69629+{
69630+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69631+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69632+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69633+ !capable(CAP_SYS_RAWIO))
69634+ return 1;
69635+#endif
69636+ return 0;
69637+}
69638+
69639+int
69640+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69641+{
69642+ if (likely(lena != lenb))
69643+ return 0;
69644+
69645+ return !memcmp(a, b, lena);
69646+}
69647+
69648+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69649+{
69650+ *buflen -= namelen;
69651+ if (*buflen < 0)
69652+ return -ENAMETOOLONG;
69653+ *buffer -= namelen;
69654+ memcpy(*buffer, str, namelen);
69655+ return 0;
69656+}
69657+
69658+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69659+{
69660+ return prepend(buffer, buflen, name->name, name->len);
69661+}
69662+
69663+static int prepend_path(const struct path *path, struct path *root,
69664+ char **buffer, int *buflen)
69665+{
69666+ struct dentry *dentry = path->dentry;
69667+ struct vfsmount *vfsmnt = path->mnt;
69668+ struct mount *mnt = real_mount(vfsmnt);
69669+ bool slash = false;
69670+ int error = 0;
69671+
69672+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69673+ struct dentry * parent;
69674+
69675+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69676+ /* Global root? */
69677+ if (!mnt_has_parent(mnt)) {
69678+ goto out;
69679+ }
69680+ dentry = mnt->mnt_mountpoint;
69681+ mnt = mnt->mnt_parent;
69682+ vfsmnt = &mnt->mnt;
69683+ continue;
69684+ }
69685+ parent = dentry->d_parent;
69686+ prefetch(parent);
69687+ spin_lock(&dentry->d_lock);
69688+ error = prepend_name(buffer, buflen, &dentry->d_name);
69689+ spin_unlock(&dentry->d_lock);
69690+ if (!error)
69691+ error = prepend(buffer, buflen, "/", 1);
69692+ if (error)
69693+ break;
69694+
69695+ slash = true;
69696+ dentry = parent;
69697+ }
69698+
69699+out:
69700+ if (!error && !slash)
69701+ error = prepend(buffer, buflen, "/", 1);
69702+
69703+ return error;
69704+}
69705+
69706+/* this must be called with mount_lock and rename_lock held */
69707+
69708+static char *__our_d_path(const struct path *path, struct path *root,
69709+ char *buf, int buflen)
69710+{
69711+ char *res = buf + buflen;
69712+ int error;
69713+
69714+ prepend(&res, &buflen, "\0", 1);
69715+ error = prepend_path(path, root, &res, &buflen);
69716+ if (error)
69717+ return ERR_PTR(error);
69718+
69719+ return res;
69720+}
69721+
69722+static char *
69723+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69724+{
69725+ char *retval;
69726+
69727+ retval = __our_d_path(path, root, buf, buflen);
69728+ if (unlikely(IS_ERR(retval)))
69729+ retval = strcpy(buf, "<path too long>");
69730+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69731+ retval[1] = '\0';
69732+
69733+ return retval;
69734+}
69735+
69736+static char *
69737+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69738+ char *buf, int buflen)
69739+{
69740+ struct path path;
69741+ char *res;
69742+
69743+ path.dentry = (struct dentry *)dentry;
69744+ path.mnt = (struct vfsmount *)vfsmnt;
69745+
69746+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69747+ by the RBAC system */
69748+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69749+
69750+ return res;
69751+}
69752+
69753+static char *
69754+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69755+ char *buf, int buflen)
69756+{
69757+ char *res;
69758+ struct path path;
69759+ struct path root;
69760+ struct task_struct *reaper = init_pid_ns.child_reaper;
69761+
69762+ path.dentry = (struct dentry *)dentry;
69763+ path.mnt = (struct vfsmount *)vfsmnt;
69764+
69765+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69766+ get_fs_root(reaper->fs, &root);
69767+
69768+ read_seqlock_excl(&mount_lock);
69769+ write_seqlock(&rename_lock);
69770+ res = gen_full_path(&path, &root, buf, buflen);
69771+ write_sequnlock(&rename_lock);
69772+ read_sequnlock_excl(&mount_lock);
69773+
69774+ path_put(&root);
69775+ return res;
69776+}
69777+
69778+char *
69779+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69780+{
69781+ char *ret;
69782+ read_seqlock_excl(&mount_lock);
69783+ write_seqlock(&rename_lock);
69784+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69785+ PAGE_SIZE);
69786+ write_sequnlock(&rename_lock);
69787+ read_sequnlock_excl(&mount_lock);
69788+ return ret;
69789+}
69790+
69791+static char *
69792+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69793+{
69794+ char *ret;
69795+ char *buf;
69796+ int buflen;
69797+
69798+ read_seqlock_excl(&mount_lock);
69799+ write_seqlock(&rename_lock);
69800+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69801+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69802+ buflen = (int)(ret - buf);
69803+ if (buflen >= 5)
69804+ prepend(&ret, &buflen, "/proc", 5);
69805+ else
69806+ ret = strcpy(buf, "<path too long>");
69807+ write_sequnlock(&rename_lock);
69808+ read_sequnlock_excl(&mount_lock);
69809+ return ret;
69810+}
69811+
69812+char *
69813+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69814+{
69815+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69816+ PAGE_SIZE);
69817+}
69818+
69819+char *
69820+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69821+{
69822+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69823+ PAGE_SIZE);
69824+}
69825+
69826+char *
69827+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69828+{
69829+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69830+ PAGE_SIZE);
69831+}
69832+
69833+char *
69834+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69835+{
69836+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69837+ PAGE_SIZE);
69838+}
69839+
69840+char *
69841+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69842+{
69843+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69844+ PAGE_SIZE);
69845+}
69846+
69847+__inline__ __u32
69848+to_gr_audit(const __u32 reqmode)
69849+{
69850+ /* masks off auditable permission flags, then shifts them to create
69851+ auditing flags, and adds the special case of append auditing if
69852+ we're requesting write */
69853+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69854+}
69855+
69856+struct acl_role_label *
69857+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69858+ const gid_t gid)
69859+{
69860+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69861+ struct acl_role_label *match;
69862+ struct role_allowed_ip *ipp;
69863+ unsigned int x;
69864+ u32 curr_ip = task->signal->saved_ip;
69865+
69866+ match = state->acl_role_set.r_hash[index];
69867+
69868+ while (match) {
69869+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69870+ for (x = 0; x < match->domain_child_num; x++) {
69871+ if (match->domain_children[x] == uid)
69872+ goto found;
69873+ }
69874+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69875+ break;
69876+ match = match->next;
69877+ }
69878+found:
69879+ if (match == NULL) {
69880+ try_group:
69881+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69882+ match = state->acl_role_set.r_hash[index];
69883+
69884+ while (match) {
69885+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69886+ for (x = 0; x < match->domain_child_num; x++) {
69887+ if (match->domain_children[x] == gid)
69888+ goto found2;
69889+ }
69890+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69891+ break;
69892+ match = match->next;
69893+ }
69894+found2:
69895+ if (match == NULL)
69896+ match = state->default_role;
69897+ if (match->allowed_ips == NULL)
69898+ return match;
69899+ else {
69900+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69901+ if (likely
69902+ ((ntohl(curr_ip) & ipp->netmask) ==
69903+ (ntohl(ipp->addr) & ipp->netmask)))
69904+ return match;
69905+ }
69906+ match = state->default_role;
69907+ }
69908+ } else if (match->allowed_ips == NULL) {
69909+ return match;
69910+ } else {
69911+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69912+ if (likely
69913+ ((ntohl(curr_ip) & ipp->netmask) ==
69914+ (ntohl(ipp->addr) & ipp->netmask)))
69915+ return match;
69916+ }
69917+ goto try_group;
69918+ }
69919+
69920+ return match;
69921+}
69922+
69923+static struct acl_role_label *
69924+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69925+ const gid_t gid)
69926+{
69927+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69928+}
69929+
69930+struct acl_subject_label *
69931+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69932+ const struct acl_role_label *role)
69933+{
69934+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69935+ struct acl_subject_label *match;
69936+
69937+ match = role->subj_hash[index];
69938+
69939+ while (match && (match->inode != ino || match->device != dev ||
69940+ (match->mode & GR_DELETED))) {
69941+ match = match->next;
69942+ }
69943+
69944+ if (match && !(match->mode & GR_DELETED))
69945+ return match;
69946+ else
69947+ return NULL;
69948+}
69949+
69950+struct acl_subject_label *
69951+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69952+ const struct acl_role_label *role)
69953+{
69954+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69955+ struct acl_subject_label *match;
69956+
69957+ match = role->subj_hash[index];
69958+
69959+ while (match && (match->inode != ino || match->device != dev ||
69960+ !(match->mode & GR_DELETED))) {
69961+ match = match->next;
69962+ }
69963+
69964+ if (match && (match->mode & GR_DELETED))
69965+ return match;
69966+ else
69967+ return NULL;
69968+}
69969+
69970+static struct acl_object_label *
69971+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69972+ const struct acl_subject_label *subj)
69973+{
69974+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69975+ struct acl_object_label *match;
69976+
69977+ match = subj->obj_hash[index];
69978+
69979+ while (match && (match->inode != ino || match->device != dev ||
69980+ (match->mode & GR_DELETED))) {
69981+ match = match->next;
69982+ }
69983+
69984+ if (match && !(match->mode & GR_DELETED))
69985+ return match;
69986+ else
69987+ return NULL;
69988+}
69989+
69990+static struct acl_object_label *
69991+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69992+ const struct acl_subject_label *subj)
69993+{
69994+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69995+ struct acl_object_label *match;
69996+
69997+ match = subj->obj_hash[index];
69998+
69999+ while (match && (match->inode != ino || match->device != dev ||
70000+ !(match->mode & GR_DELETED))) {
70001+ match = match->next;
70002+ }
70003+
70004+ if (match && (match->mode & GR_DELETED))
70005+ return match;
70006+
70007+ match = subj->obj_hash[index];
70008+
70009+ while (match && (match->inode != ino || match->device != dev ||
70010+ (match->mode & GR_DELETED))) {
70011+ match = match->next;
70012+ }
70013+
70014+ if (match && !(match->mode & GR_DELETED))
70015+ return match;
70016+ else
70017+ return NULL;
70018+}
70019+
70020+struct name_entry *
70021+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70022+{
70023+ unsigned int len = strlen(name);
70024+ unsigned int key = full_name_hash(name, len);
70025+ unsigned int index = key % state->name_set.n_size;
70026+ struct name_entry *match;
70027+
70028+ match = state->name_set.n_hash[index];
70029+
70030+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70031+ match = match->next;
70032+
70033+ return match;
70034+}
70035+
70036+static struct name_entry *
70037+lookup_name_entry(const char *name)
70038+{
70039+ return __lookup_name_entry(&running_polstate, name);
70040+}
70041+
70042+static struct name_entry *
70043+lookup_name_entry_create(const char *name)
70044+{
70045+ unsigned int len = strlen(name);
70046+ unsigned int key = full_name_hash(name, len);
70047+ unsigned int index = key % running_polstate.name_set.n_size;
70048+ struct name_entry *match;
70049+
70050+ match = running_polstate.name_set.n_hash[index];
70051+
70052+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70053+ !match->deleted))
70054+ match = match->next;
70055+
70056+ if (match && match->deleted)
70057+ return match;
70058+
70059+ match = running_polstate.name_set.n_hash[index];
70060+
70061+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70062+ match->deleted))
70063+ match = match->next;
70064+
70065+ if (match && !match->deleted)
70066+ return match;
70067+ else
70068+ return NULL;
70069+}
70070+
70071+static struct inodev_entry *
70072+lookup_inodev_entry(const u64 ino, const dev_t dev)
70073+{
70074+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70075+ struct inodev_entry *match;
70076+
70077+ match = running_polstate.inodev_set.i_hash[index];
70078+
70079+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70080+ match = match->next;
70081+
70082+ return match;
70083+}
70084+
70085+void
70086+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70087+{
70088+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70089+ state->inodev_set.i_size);
70090+ struct inodev_entry **curr;
70091+
70092+ entry->prev = NULL;
70093+
70094+ curr = &state->inodev_set.i_hash[index];
70095+ if (*curr != NULL)
70096+ (*curr)->prev = entry;
70097+
70098+ entry->next = *curr;
70099+ *curr = entry;
70100+
70101+ return;
70102+}
70103+
70104+static void
70105+insert_inodev_entry(struct inodev_entry *entry)
70106+{
70107+ __insert_inodev_entry(&running_polstate, entry);
70108+}
70109+
70110+void
70111+insert_acl_obj_label(struct acl_object_label *obj,
70112+ struct acl_subject_label *subj)
70113+{
70114+ unsigned int index =
70115+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70116+ struct acl_object_label **curr;
70117+
70118+ obj->prev = NULL;
70119+
70120+ curr = &subj->obj_hash[index];
70121+ if (*curr != NULL)
70122+ (*curr)->prev = obj;
70123+
70124+ obj->next = *curr;
70125+ *curr = obj;
70126+
70127+ return;
70128+}
70129+
70130+void
70131+insert_acl_subj_label(struct acl_subject_label *obj,
70132+ struct acl_role_label *role)
70133+{
70134+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70135+ struct acl_subject_label **curr;
70136+
70137+ obj->prev = NULL;
70138+
70139+ curr = &role->subj_hash[index];
70140+ if (*curr != NULL)
70141+ (*curr)->prev = obj;
70142+
70143+ obj->next = *curr;
70144+ *curr = obj;
70145+
70146+ return;
70147+}
70148+
70149+/* derived from glibc fnmatch() 0: match, 1: no match*/
70150+
70151+static int
70152+glob_match(const char *p, const char *n)
70153+{
70154+ char c;
70155+
70156+ while ((c = *p++) != '\0') {
70157+ switch (c) {
70158+ case '?':
70159+ if (*n == '\0')
70160+ return 1;
70161+ else if (*n == '/')
70162+ return 1;
70163+ break;
70164+ case '\\':
70165+ if (*n != c)
70166+ return 1;
70167+ break;
70168+ case '*':
70169+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70170+ if (*n == '/')
70171+ return 1;
70172+ else if (c == '?') {
70173+ if (*n == '\0')
70174+ return 1;
70175+ else
70176+ ++n;
70177+ }
70178+ }
70179+ if (c == '\0') {
70180+ return 0;
70181+ } else {
70182+ const char *endp;
70183+
70184+ if ((endp = strchr(n, '/')) == NULL)
70185+ endp = n + strlen(n);
70186+
70187+ if (c == '[') {
70188+ for (--p; n < endp; ++n)
70189+ if (!glob_match(p, n))
70190+ return 0;
70191+ } else if (c == '/') {
70192+ while (*n != '\0' && *n != '/')
70193+ ++n;
70194+ if (*n == '/' && !glob_match(p, n + 1))
70195+ return 0;
70196+ } else {
70197+ for (--p; n < endp; ++n)
70198+ if (*n == c && !glob_match(p, n))
70199+ return 0;
70200+ }
70201+
70202+ return 1;
70203+ }
70204+ case '[':
70205+ {
70206+ int not;
70207+ char cold;
70208+
70209+ if (*n == '\0' || *n == '/')
70210+ return 1;
70211+
70212+ not = (*p == '!' || *p == '^');
70213+ if (not)
70214+ ++p;
70215+
70216+ c = *p++;
70217+ for (;;) {
70218+ unsigned char fn = (unsigned char)*n;
70219+
70220+ if (c == '\0')
70221+ return 1;
70222+ else {
70223+ if (c == fn)
70224+ goto matched;
70225+ cold = c;
70226+ c = *p++;
70227+
70228+ if (c == '-' && *p != ']') {
70229+ unsigned char cend = *p++;
70230+
70231+ if (cend == '\0')
70232+ return 1;
70233+
70234+ if (cold <= fn && fn <= cend)
70235+ goto matched;
70236+
70237+ c = *p++;
70238+ }
70239+ }
70240+
70241+ if (c == ']')
70242+ break;
70243+ }
70244+ if (!not)
70245+ return 1;
70246+ break;
70247+ matched:
70248+ while (c != ']') {
70249+ if (c == '\0')
70250+ return 1;
70251+
70252+ c = *p++;
70253+ }
70254+ if (not)
70255+ return 1;
70256+ }
70257+ break;
70258+ default:
70259+ if (c != *n)
70260+ return 1;
70261+ }
70262+
70263+ ++n;
70264+ }
70265+
70266+ if (*n == '\0')
70267+ return 0;
70268+
70269+ if (*n == '/')
70270+ return 0;
70271+
70272+ return 1;
70273+}
70274+
70275+static struct acl_object_label *
70276+chk_glob_label(struct acl_object_label *globbed,
70277+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70278+{
70279+ struct acl_object_label *tmp;
70280+
70281+ if (*path == NULL)
70282+ *path = gr_to_filename_nolock(dentry, mnt);
70283+
70284+ tmp = globbed;
70285+
70286+ while (tmp) {
70287+ if (!glob_match(tmp->filename, *path))
70288+ return tmp;
70289+ tmp = tmp->next;
70290+ }
70291+
70292+ return NULL;
70293+}
70294+
70295+static struct acl_object_label *
70296+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70297+ const u64 curr_ino, const dev_t curr_dev,
70298+ const struct acl_subject_label *subj, char **path, const int checkglob)
70299+{
70300+ struct acl_subject_label *tmpsubj;
70301+ struct acl_object_label *retval;
70302+ struct acl_object_label *retval2;
70303+
70304+ tmpsubj = (struct acl_subject_label *) subj;
70305+ read_lock(&gr_inode_lock);
70306+ do {
70307+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70308+ if (retval) {
70309+ if (checkglob && retval->globbed) {
70310+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70311+ if (retval2)
70312+ retval = retval2;
70313+ }
70314+ break;
70315+ }
70316+ } while ((tmpsubj = tmpsubj->parent_subject));
70317+ read_unlock(&gr_inode_lock);
70318+
70319+ return retval;
70320+}
70321+
70322+static __inline__ struct acl_object_label *
70323+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70324+ struct dentry *curr_dentry,
70325+ const struct acl_subject_label *subj, char **path, const int checkglob)
70326+{
70327+ int newglob = checkglob;
70328+ u64 inode;
70329+ dev_t device;
70330+
70331+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70332+ as we don't want a / * rule to match instead of the / object
70333+ don't do this for create lookups that call this function though, since they're looking up
70334+ on the parent and thus need globbing checks on all paths
70335+ */
70336+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70337+ newglob = GR_NO_GLOB;
70338+
70339+ spin_lock(&curr_dentry->d_lock);
70340+ inode = __get_ino(curr_dentry);
70341+ device = __get_dev(curr_dentry);
70342+ spin_unlock(&curr_dentry->d_lock);
70343+
70344+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70345+}
70346+
70347+#ifdef CONFIG_HUGETLBFS
70348+static inline bool
70349+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70350+{
70351+ int i;
70352+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70353+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70354+ return true;
70355+ }
70356+
70357+ return false;
70358+}
70359+#endif
70360+
70361+static struct acl_object_label *
70362+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70363+ const struct acl_subject_label *subj, char *path, const int checkglob)
70364+{
70365+ struct dentry *dentry = (struct dentry *) l_dentry;
70366+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70367+ struct mount *real_mnt = real_mount(mnt);
70368+ struct acl_object_label *retval;
70369+ struct dentry *parent;
70370+
70371+ read_seqlock_excl(&mount_lock);
70372+ write_seqlock(&rename_lock);
70373+
70374+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70375+#ifdef CONFIG_NET
70376+ mnt == sock_mnt ||
70377+#endif
70378+#ifdef CONFIG_HUGETLBFS
70379+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70380+#endif
70381+ /* ignore Eric Biederman */
70382+ IS_PRIVATE(l_dentry->d_inode))) {
70383+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70384+ goto out;
70385+ }
70386+
70387+ for (;;) {
70388+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70389+ break;
70390+
70391+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70392+ if (!mnt_has_parent(real_mnt))
70393+ break;
70394+
70395+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70396+ if (retval != NULL)
70397+ goto out;
70398+
70399+ dentry = real_mnt->mnt_mountpoint;
70400+ real_mnt = real_mnt->mnt_parent;
70401+ mnt = &real_mnt->mnt;
70402+ continue;
70403+ }
70404+
70405+ parent = dentry->d_parent;
70406+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70407+ if (retval != NULL)
70408+ goto out;
70409+
70410+ dentry = parent;
70411+ }
70412+
70413+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70414+
70415+ /* gr_real_root is pinned so we don't have to hold a reference */
70416+ if (retval == NULL)
70417+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70418+out:
70419+ write_sequnlock(&rename_lock);
70420+ read_sequnlock_excl(&mount_lock);
70421+
70422+ BUG_ON(retval == NULL);
70423+
70424+ return retval;
70425+}
70426+
70427+static __inline__ struct acl_object_label *
70428+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70429+ const struct acl_subject_label *subj)
70430+{
70431+ char *path = NULL;
70432+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70433+}
70434+
70435+static __inline__ struct acl_object_label *
70436+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70437+ const struct acl_subject_label *subj)
70438+{
70439+ char *path = NULL;
70440+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70441+}
70442+
70443+static __inline__ struct acl_object_label *
70444+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70445+ const struct acl_subject_label *subj, char *path)
70446+{
70447+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70448+}
70449+
70450+struct acl_subject_label *
70451+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70452+ const struct acl_role_label *role)
70453+{
70454+ struct dentry *dentry = (struct dentry *) l_dentry;
70455+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70456+ struct mount *real_mnt = real_mount(mnt);
70457+ struct acl_subject_label *retval;
70458+ struct dentry *parent;
70459+
70460+ read_seqlock_excl(&mount_lock);
70461+ write_seqlock(&rename_lock);
70462+
70463+ for (;;) {
70464+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70465+ break;
70466+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70467+ if (!mnt_has_parent(real_mnt))
70468+ break;
70469+
70470+ spin_lock(&dentry->d_lock);
70471+ read_lock(&gr_inode_lock);
70472+ retval =
70473+ lookup_acl_subj_label(__get_ino(dentry),
70474+ __get_dev(dentry), role);
70475+ read_unlock(&gr_inode_lock);
70476+ spin_unlock(&dentry->d_lock);
70477+ if (retval != NULL)
70478+ goto out;
70479+
70480+ dentry = real_mnt->mnt_mountpoint;
70481+ real_mnt = real_mnt->mnt_parent;
70482+ mnt = &real_mnt->mnt;
70483+ continue;
70484+ }
70485+
70486+ spin_lock(&dentry->d_lock);
70487+ read_lock(&gr_inode_lock);
70488+ retval = lookup_acl_subj_label(__get_ino(dentry),
70489+ __get_dev(dentry), role);
70490+ read_unlock(&gr_inode_lock);
70491+ parent = dentry->d_parent;
70492+ spin_unlock(&dentry->d_lock);
70493+
70494+ if (retval != NULL)
70495+ goto out;
70496+
70497+ dentry = parent;
70498+ }
70499+
70500+ spin_lock(&dentry->d_lock);
70501+ read_lock(&gr_inode_lock);
70502+ retval = lookup_acl_subj_label(__get_ino(dentry),
70503+ __get_dev(dentry), role);
70504+ read_unlock(&gr_inode_lock);
70505+ spin_unlock(&dentry->d_lock);
70506+
70507+ if (unlikely(retval == NULL)) {
70508+ /* gr_real_root is pinned, we don't need to hold a reference */
70509+ read_lock(&gr_inode_lock);
70510+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70511+ __get_dev(gr_real_root.dentry), role);
70512+ read_unlock(&gr_inode_lock);
70513+ }
70514+out:
70515+ write_sequnlock(&rename_lock);
70516+ read_sequnlock_excl(&mount_lock);
70517+
70518+ BUG_ON(retval == NULL);
70519+
70520+ return retval;
70521+}
70522+
70523+void
70524+assign_special_role(const char *rolename)
70525+{
70526+ struct acl_object_label *obj;
70527+ struct acl_role_label *r;
70528+ struct acl_role_label *assigned = NULL;
70529+ struct task_struct *tsk;
70530+ struct file *filp;
70531+
70532+ FOR_EACH_ROLE_START(r)
70533+ if (!strcmp(rolename, r->rolename) &&
70534+ (r->roletype & GR_ROLE_SPECIAL)) {
70535+ assigned = r;
70536+ break;
70537+ }
70538+ FOR_EACH_ROLE_END(r)
70539+
70540+ if (!assigned)
70541+ return;
70542+
70543+ read_lock(&tasklist_lock);
70544+ read_lock(&grsec_exec_file_lock);
70545+
70546+ tsk = current->real_parent;
70547+ if (tsk == NULL)
70548+ goto out_unlock;
70549+
70550+ filp = tsk->exec_file;
70551+ if (filp == NULL)
70552+ goto out_unlock;
70553+
70554+ tsk->is_writable = 0;
70555+ tsk->inherited = 0;
70556+
70557+ tsk->acl_sp_role = 1;
70558+ tsk->acl_role_id = ++acl_sp_role_value;
70559+ tsk->role = assigned;
70560+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70561+
70562+ /* ignore additional mmap checks for processes that are writable
70563+ by the default ACL */
70564+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70565+ if (unlikely(obj->mode & GR_WRITE))
70566+ tsk->is_writable = 1;
70567+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70568+ if (unlikely(obj->mode & GR_WRITE))
70569+ tsk->is_writable = 1;
70570+
70571+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70572+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70573+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70574+#endif
70575+
70576+out_unlock:
70577+ read_unlock(&grsec_exec_file_lock);
70578+ read_unlock(&tasklist_lock);
70579+ return;
70580+}
70581+
70582+
70583+static void
70584+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70585+{
70586+ struct task_struct *task = current;
70587+ const struct cred *cred = current_cred();
70588+
70589+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70590+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70591+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70592+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70593+
70594+ return;
70595+}
70596+
70597+static void
70598+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70599+{
70600+ struct task_struct *task = current;
70601+ const struct cred *cred = current_cred();
70602+
70603+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70604+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70605+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70606+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70607+
70608+ return;
70609+}
70610+
70611+static void
70612+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70613+{
70614+ struct task_struct *task = current;
70615+ const struct cred *cred = current_cred();
70616+
70617+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70618+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70619+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70620+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70621+
70622+ return;
70623+}
70624+
70625+static void
70626+gr_set_proc_res(struct task_struct *task)
70627+{
70628+ struct acl_subject_label *proc;
70629+ unsigned short i;
70630+
70631+ proc = task->acl;
70632+
70633+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70634+ return;
70635+
70636+ for (i = 0; i < RLIM_NLIMITS; i++) {
70637+ unsigned long rlim_cur, rlim_max;
70638+
70639+ if (!(proc->resmask & (1U << i)))
70640+ continue;
70641+
70642+ rlim_cur = proc->res[i].rlim_cur;
70643+ rlim_max = proc->res[i].rlim_max;
70644+
70645+ if (i == RLIMIT_NOFILE) {
70646+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70647+ if (rlim_cur > saved_sysctl_nr_open)
70648+ rlim_cur = saved_sysctl_nr_open;
70649+ if (rlim_max > saved_sysctl_nr_open)
70650+ rlim_max = saved_sysctl_nr_open;
70651+ }
70652+
70653+ task->signal->rlim[i].rlim_cur = rlim_cur;
70654+ task->signal->rlim[i].rlim_max = rlim_max;
70655+
70656+ if (i == RLIMIT_CPU)
70657+ update_rlimit_cpu(task, rlim_cur);
70658+ }
70659+
70660+ return;
70661+}
70662+
70663+/* both of the below must be called with
70664+ rcu_read_lock();
70665+ read_lock(&tasklist_lock);
70666+ read_lock(&grsec_exec_file_lock);
70667+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70668+*/
70669+
70670+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70671+{
70672+ char *tmpname;
70673+ struct acl_subject_label *tmpsubj;
70674+ struct file *filp;
70675+ struct name_entry *nmatch;
70676+
70677+ filp = task->exec_file;
70678+ if (filp == NULL)
70679+ return NULL;
70680+
70681+ /* the following is to apply the correct subject
70682+ on binaries running when the RBAC system
70683+ is enabled, when the binaries have been
70684+ replaced or deleted since their execution
70685+ -----
70686+ when the RBAC system starts, the inode/dev
70687+ from exec_file will be one the RBAC system
70688+ is unaware of. It only knows the inode/dev
70689+ of the present file on disk, or the absence
70690+ of it.
70691+ */
70692+
70693+ if (filename)
70694+ nmatch = __lookup_name_entry(state, filename);
70695+ else {
70696+ preempt_disable();
70697+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70698+
70699+ nmatch = __lookup_name_entry(state, tmpname);
70700+ preempt_enable();
70701+ }
70702+ tmpsubj = NULL;
70703+ if (nmatch) {
70704+ if (nmatch->deleted)
70705+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70706+ else
70707+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70708+ }
70709+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70710+ then we fall back to a normal lookup based on the binary's ino/dev
70711+ */
70712+ if (tmpsubj == NULL && fallback)
70713+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70714+
70715+ return tmpsubj;
70716+}
70717+
70718+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70719+{
70720+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70721+}
70722+
70723+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70724+{
70725+ struct acl_object_label *obj;
70726+ struct file *filp;
70727+
70728+ filp = task->exec_file;
70729+
70730+ task->acl = subj;
70731+ task->is_writable = 0;
70732+ /* ignore additional mmap checks for processes that are writable
70733+ by the default ACL */
70734+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70735+ if (unlikely(obj->mode & GR_WRITE))
70736+ task->is_writable = 1;
70737+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70738+ if (unlikely(obj->mode & GR_WRITE))
70739+ task->is_writable = 1;
70740+
70741+ gr_set_proc_res(task);
70742+
70743+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70744+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70745+#endif
70746+}
70747+
70748+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70749+{
70750+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70751+}
70752+
70753+__u32
70754+gr_search_file(const struct dentry * dentry, const __u32 mode,
70755+ const struct vfsmount * mnt)
70756+{
70757+ __u32 retval = mode;
70758+ struct acl_subject_label *curracl;
70759+ struct acl_object_label *currobj;
70760+
70761+ if (unlikely(!(gr_status & GR_READY)))
70762+ return (mode & ~GR_AUDITS);
70763+
70764+ curracl = current->acl;
70765+
70766+ currobj = chk_obj_label(dentry, mnt, curracl);
70767+ retval = currobj->mode & mode;
70768+
70769+ /* if we're opening a specified transfer file for writing
70770+ (e.g. /dev/initctl), then transfer our role to init
70771+ */
70772+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70773+ current->role->roletype & GR_ROLE_PERSIST)) {
70774+ struct task_struct *task = init_pid_ns.child_reaper;
70775+
70776+ if (task->role != current->role) {
70777+ struct acl_subject_label *subj;
70778+
70779+ task->acl_sp_role = 0;
70780+ task->acl_role_id = current->acl_role_id;
70781+ task->role = current->role;
70782+ rcu_read_lock();
70783+ read_lock(&grsec_exec_file_lock);
70784+ subj = gr_get_subject_for_task(task, NULL, 1);
70785+ gr_apply_subject_to_task(task, subj);
70786+ read_unlock(&grsec_exec_file_lock);
70787+ rcu_read_unlock();
70788+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70789+ }
70790+ }
70791+
70792+ if (unlikely
70793+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70794+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70795+ __u32 new_mode = mode;
70796+
70797+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70798+
70799+ retval = new_mode;
70800+
70801+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70802+ new_mode |= GR_INHERIT;
70803+
70804+ if (!(mode & GR_NOLEARN))
70805+ gr_log_learn(dentry, mnt, new_mode);
70806+ }
70807+
70808+ return retval;
70809+}
70810+
70811+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70812+ const struct dentry *parent,
70813+ const struct vfsmount *mnt)
70814+{
70815+ struct name_entry *match;
70816+ struct acl_object_label *matchpo;
70817+ struct acl_subject_label *curracl;
70818+ char *path;
70819+
70820+ if (unlikely(!(gr_status & GR_READY)))
70821+ return NULL;
70822+
70823+ preempt_disable();
70824+ path = gr_to_filename_rbac(new_dentry, mnt);
70825+ match = lookup_name_entry_create(path);
70826+
70827+ curracl = current->acl;
70828+
70829+ if (match) {
70830+ read_lock(&gr_inode_lock);
70831+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70832+ read_unlock(&gr_inode_lock);
70833+
70834+ if (matchpo) {
70835+ preempt_enable();
70836+ return matchpo;
70837+ }
70838+ }
70839+
70840+ // lookup parent
70841+
70842+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70843+
70844+ preempt_enable();
70845+ return matchpo;
70846+}
70847+
70848+__u32
70849+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70850+ const struct vfsmount * mnt, const __u32 mode)
70851+{
70852+ struct acl_object_label *matchpo;
70853+ __u32 retval;
70854+
70855+ if (unlikely(!(gr_status & GR_READY)))
70856+ return (mode & ~GR_AUDITS);
70857+
70858+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70859+
70860+ retval = matchpo->mode & mode;
70861+
70862+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70863+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70864+ __u32 new_mode = mode;
70865+
70866+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70867+
70868+ gr_log_learn(new_dentry, mnt, new_mode);
70869+ return new_mode;
70870+ }
70871+
70872+ return retval;
70873+}
70874+
70875+__u32
70876+gr_check_link(const struct dentry * new_dentry,
70877+ const struct dentry * parent_dentry,
70878+ const struct vfsmount * parent_mnt,
70879+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70880+{
70881+ struct acl_object_label *obj;
70882+ __u32 oldmode, newmode;
70883+ __u32 needmode;
70884+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70885+ GR_DELETE | GR_INHERIT;
70886+
70887+ if (unlikely(!(gr_status & GR_READY)))
70888+ return (GR_CREATE | GR_LINK);
70889+
70890+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70891+ oldmode = obj->mode;
70892+
70893+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70894+ newmode = obj->mode;
70895+
70896+ needmode = newmode & checkmodes;
70897+
70898+ // old name for hardlink must have at least the permissions of the new name
70899+ if ((oldmode & needmode) != needmode)
70900+ goto bad;
70901+
70902+ // if old name had restrictions/auditing, make sure the new name does as well
70903+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70904+
70905+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70906+ if (is_privileged_binary(old_dentry))
70907+ needmode |= GR_SETID;
70908+
70909+ if ((newmode & needmode) != needmode)
70910+ goto bad;
70911+
70912+ // enforce minimum permissions
70913+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70914+ return newmode;
70915+bad:
70916+ needmode = oldmode;
70917+ if (is_privileged_binary(old_dentry))
70918+ needmode |= GR_SETID;
70919+
70920+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70921+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70922+ return (GR_CREATE | GR_LINK);
70923+ } else if (newmode & GR_SUPPRESS)
70924+ return GR_SUPPRESS;
70925+ else
70926+ return 0;
70927+}
70928+
70929+int
70930+gr_check_hidden_task(const struct task_struct *task)
70931+{
70932+ if (unlikely(!(gr_status & GR_READY)))
70933+ return 0;
70934+
70935+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70936+ return 1;
70937+
70938+ return 0;
70939+}
70940+
70941+int
70942+gr_check_protected_task(const struct task_struct *task)
70943+{
70944+ if (unlikely(!(gr_status & GR_READY) || !task))
70945+ return 0;
70946+
70947+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70948+ task->acl != current->acl)
70949+ return 1;
70950+
70951+ return 0;
70952+}
70953+
70954+int
70955+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70956+{
70957+ struct task_struct *p;
70958+ int ret = 0;
70959+
70960+ if (unlikely(!(gr_status & GR_READY) || !pid))
70961+ return ret;
70962+
70963+ read_lock(&tasklist_lock);
70964+ do_each_pid_task(pid, type, p) {
70965+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70966+ p->acl != current->acl) {
70967+ ret = 1;
70968+ goto out;
70969+ }
70970+ } while_each_pid_task(pid, type, p);
70971+out:
70972+ read_unlock(&tasklist_lock);
70973+
70974+ return ret;
70975+}
70976+
70977+void
70978+gr_copy_label(struct task_struct *tsk)
70979+{
70980+ struct task_struct *p = current;
70981+
70982+ tsk->inherited = p->inherited;
70983+ tsk->acl_sp_role = 0;
70984+ tsk->acl_role_id = p->acl_role_id;
70985+ tsk->acl = p->acl;
70986+ tsk->role = p->role;
70987+ tsk->signal->used_accept = 0;
70988+ tsk->signal->curr_ip = p->signal->curr_ip;
70989+ tsk->signal->saved_ip = p->signal->saved_ip;
70990+ if (p->exec_file)
70991+ get_file(p->exec_file);
70992+ tsk->exec_file = p->exec_file;
70993+ tsk->is_writable = p->is_writable;
70994+ if (unlikely(p->signal->used_accept)) {
70995+ p->signal->curr_ip = 0;
70996+ p->signal->saved_ip = 0;
70997+ }
70998+
70999+ return;
71000+}
71001+
71002+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71003+
71004+int
71005+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71006+{
71007+ unsigned int i;
71008+ __u16 num;
71009+ uid_t *uidlist;
71010+ uid_t curuid;
71011+ int realok = 0;
71012+ int effectiveok = 0;
71013+ int fsok = 0;
71014+ uid_t globalreal, globaleffective, globalfs;
71015+
71016+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71017+ struct user_struct *user;
71018+
71019+ if (!uid_valid(real))
71020+ goto skipit;
71021+
71022+ /* find user based on global namespace */
71023+
71024+ globalreal = GR_GLOBAL_UID(real);
71025+
71026+ user = find_user(make_kuid(&init_user_ns, globalreal));
71027+ if (user == NULL)
71028+ goto skipit;
71029+
71030+ if (gr_process_kernel_setuid_ban(user)) {
71031+ /* for find_user */
71032+ free_uid(user);
71033+ return 1;
71034+ }
71035+
71036+ /* for find_user */
71037+ free_uid(user);
71038+
71039+skipit:
71040+#endif
71041+
71042+ if (unlikely(!(gr_status & GR_READY)))
71043+ return 0;
71044+
71045+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71046+ gr_log_learn_uid_change(real, effective, fs);
71047+
71048+ num = current->acl->user_trans_num;
71049+ uidlist = current->acl->user_transitions;
71050+
71051+ if (uidlist == NULL)
71052+ return 0;
71053+
71054+ if (!uid_valid(real)) {
71055+ realok = 1;
71056+ globalreal = (uid_t)-1;
71057+ } else {
71058+ globalreal = GR_GLOBAL_UID(real);
71059+ }
71060+ if (!uid_valid(effective)) {
71061+ effectiveok = 1;
71062+ globaleffective = (uid_t)-1;
71063+ } else {
71064+ globaleffective = GR_GLOBAL_UID(effective);
71065+ }
71066+ if (!uid_valid(fs)) {
71067+ fsok = 1;
71068+ globalfs = (uid_t)-1;
71069+ } else {
71070+ globalfs = GR_GLOBAL_UID(fs);
71071+ }
71072+
71073+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71074+ for (i = 0; i < num; i++) {
71075+ curuid = uidlist[i];
71076+ if (globalreal == curuid)
71077+ realok = 1;
71078+ if (globaleffective == curuid)
71079+ effectiveok = 1;
71080+ if (globalfs == curuid)
71081+ fsok = 1;
71082+ }
71083+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71084+ for (i = 0; i < num; i++) {
71085+ curuid = uidlist[i];
71086+ if (globalreal == curuid)
71087+ break;
71088+ if (globaleffective == curuid)
71089+ break;
71090+ if (globalfs == curuid)
71091+ break;
71092+ }
71093+ /* not in deny list */
71094+ if (i == num) {
71095+ realok = 1;
71096+ effectiveok = 1;
71097+ fsok = 1;
71098+ }
71099+ }
71100+
71101+ if (realok && effectiveok && fsok)
71102+ return 0;
71103+ else {
71104+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71105+ return 1;
71106+ }
71107+}
71108+
71109+int
71110+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71111+{
71112+ unsigned int i;
71113+ __u16 num;
71114+ gid_t *gidlist;
71115+ gid_t curgid;
71116+ int realok = 0;
71117+ int effectiveok = 0;
71118+ int fsok = 0;
71119+ gid_t globalreal, globaleffective, globalfs;
71120+
71121+ if (unlikely(!(gr_status & GR_READY)))
71122+ return 0;
71123+
71124+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71125+ gr_log_learn_gid_change(real, effective, fs);
71126+
71127+ num = current->acl->group_trans_num;
71128+ gidlist = current->acl->group_transitions;
71129+
71130+ if (gidlist == NULL)
71131+ return 0;
71132+
71133+ if (!gid_valid(real)) {
71134+ realok = 1;
71135+ globalreal = (gid_t)-1;
71136+ } else {
71137+ globalreal = GR_GLOBAL_GID(real);
71138+ }
71139+ if (!gid_valid(effective)) {
71140+ effectiveok = 1;
71141+ globaleffective = (gid_t)-1;
71142+ } else {
71143+ globaleffective = GR_GLOBAL_GID(effective);
71144+ }
71145+ if (!gid_valid(fs)) {
71146+ fsok = 1;
71147+ globalfs = (gid_t)-1;
71148+ } else {
71149+ globalfs = GR_GLOBAL_GID(fs);
71150+ }
71151+
71152+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71153+ for (i = 0; i < num; i++) {
71154+ curgid = gidlist[i];
71155+ if (globalreal == curgid)
71156+ realok = 1;
71157+ if (globaleffective == curgid)
71158+ effectiveok = 1;
71159+ if (globalfs == curgid)
71160+ fsok = 1;
71161+ }
71162+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71163+ for (i = 0; i < num; i++) {
71164+ curgid = gidlist[i];
71165+ if (globalreal == curgid)
71166+ break;
71167+ if (globaleffective == curgid)
71168+ break;
71169+ if (globalfs == curgid)
71170+ break;
71171+ }
71172+ /* not in deny list */
71173+ if (i == num) {
71174+ realok = 1;
71175+ effectiveok = 1;
71176+ fsok = 1;
71177+ }
71178+ }
71179+
71180+ if (realok && effectiveok && fsok)
71181+ return 0;
71182+ else {
71183+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71184+ return 1;
71185+ }
71186+}
71187+
71188+extern int gr_acl_is_capable(const int cap);
71189+
71190+void
71191+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71192+{
71193+ struct acl_role_label *role = task->role;
71194+ struct acl_role_label *origrole = role;
71195+ struct acl_subject_label *subj = NULL;
71196+ struct acl_object_label *obj;
71197+ struct file *filp;
71198+ uid_t uid;
71199+ gid_t gid;
71200+
71201+ if (unlikely(!(gr_status & GR_READY)))
71202+ return;
71203+
71204+ uid = GR_GLOBAL_UID(kuid);
71205+ gid = GR_GLOBAL_GID(kgid);
71206+
71207+ filp = task->exec_file;
71208+
71209+ /* kernel process, we'll give them the kernel role */
71210+ if (unlikely(!filp)) {
71211+ task->role = running_polstate.kernel_role;
71212+ task->acl = running_polstate.kernel_role->root_label;
71213+ return;
71214+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71215+ /* save the current ip at time of role lookup so that the proper
71216+ IP will be learned for role_allowed_ip */
71217+ task->signal->saved_ip = task->signal->curr_ip;
71218+ role = lookup_acl_role_label(task, uid, gid);
71219+ }
71220+
71221+ /* don't change the role if we're not a privileged process */
71222+ if (role && task->role != role &&
71223+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71224+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71225+ return;
71226+
71227+ task->role = role;
71228+
71229+ if (task->inherited) {
71230+ /* if we reached our subject through inheritance, then first see
71231+ if there's a subject of the same name in the new role that has
71232+ an object that would result in the same inherited subject
71233+ */
71234+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71235+ if (subj) {
71236+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71237+ if (!(obj->mode & GR_INHERIT))
71238+ subj = NULL;
71239+ }
71240+
71241+ }
71242+ if (subj == NULL) {
71243+ /* otherwise:
71244+ perform subject lookup in possibly new role
71245+ we can use this result below in the case where role == task->role
71246+ */
71247+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71248+ }
71249+
71250+ /* if we changed uid/gid, but result in the same role
71251+ and are using inheritance, don't lose the inherited subject
71252+ if current subject is other than what normal lookup
71253+ would result in, we arrived via inheritance, don't
71254+ lose subject
71255+ */
71256+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71257+ (subj == task->acl)))
71258+ task->acl = subj;
71259+
71260+ /* leave task->inherited unaffected */
71261+
71262+ task->is_writable = 0;
71263+
71264+ /* ignore additional mmap checks for processes that are writable
71265+ by the default ACL */
71266+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71267+ if (unlikely(obj->mode & GR_WRITE))
71268+ task->is_writable = 1;
71269+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71270+ if (unlikely(obj->mode & GR_WRITE))
71271+ task->is_writable = 1;
71272+
71273+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71274+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71275+#endif
71276+
71277+ gr_set_proc_res(task);
71278+
71279+ return;
71280+}
71281+
71282+int
71283+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71284+ const int unsafe_flags)
71285+{
71286+ struct task_struct *task = current;
71287+ struct acl_subject_label *newacl;
71288+ struct acl_object_label *obj;
71289+ __u32 retmode;
71290+
71291+ if (unlikely(!(gr_status & GR_READY)))
71292+ return 0;
71293+
71294+ newacl = chk_subj_label(dentry, mnt, task->role);
71295+
71296+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71297+ did an exec
71298+ */
71299+ rcu_read_lock();
71300+ read_lock(&tasklist_lock);
71301+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71302+ (task->parent->acl->mode & GR_POVERRIDE))) {
71303+ read_unlock(&tasklist_lock);
71304+ rcu_read_unlock();
71305+ goto skip_check;
71306+ }
71307+ read_unlock(&tasklist_lock);
71308+ rcu_read_unlock();
71309+
71310+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71311+ !(task->role->roletype & GR_ROLE_GOD) &&
71312+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71313+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71314+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71315+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71316+ else
71317+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71318+ return -EACCES;
71319+ }
71320+
71321+skip_check:
71322+
71323+ obj = chk_obj_label(dentry, mnt, task->acl);
71324+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71325+
71326+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71327+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71328+ if (obj->nested)
71329+ task->acl = obj->nested;
71330+ else
71331+ task->acl = newacl;
71332+ task->inherited = 0;
71333+ } else {
71334+ task->inherited = 1;
71335+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71336+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71337+ }
71338+
71339+ task->is_writable = 0;
71340+
71341+ /* ignore additional mmap checks for processes that are writable
71342+ by the default ACL */
71343+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71344+ if (unlikely(obj->mode & GR_WRITE))
71345+ task->is_writable = 1;
71346+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71347+ if (unlikely(obj->mode & GR_WRITE))
71348+ task->is_writable = 1;
71349+
71350+ gr_set_proc_res(task);
71351+
71352+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71353+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71354+#endif
71355+ return 0;
71356+}
71357+
71358+/* always called with valid inodev ptr */
71359+static void
71360+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71361+{
71362+ struct acl_object_label *matchpo;
71363+ struct acl_subject_label *matchps;
71364+ struct acl_subject_label *subj;
71365+ struct acl_role_label *role;
71366+ unsigned int x;
71367+
71368+ FOR_EACH_ROLE_START(role)
71369+ FOR_EACH_SUBJECT_START(role, subj, x)
71370+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71371+ matchpo->mode |= GR_DELETED;
71372+ FOR_EACH_SUBJECT_END(subj,x)
71373+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71374+ /* nested subjects aren't in the role's subj_hash table */
71375+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71376+ matchpo->mode |= GR_DELETED;
71377+ FOR_EACH_NESTED_SUBJECT_END(subj)
71378+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71379+ matchps->mode |= GR_DELETED;
71380+ FOR_EACH_ROLE_END(role)
71381+
71382+ inodev->nentry->deleted = 1;
71383+
71384+ return;
71385+}
71386+
71387+void
71388+gr_handle_delete(const u64 ino, const dev_t dev)
71389+{
71390+ struct inodev_entry *inodev;
71391+
71392+ if (unlikely(!(gr_status & GR_READY)))
71393+ return;
71394+
71395+ write_lock(&gr_inode_lock);
71396+ inodev = lookup_inodev_entry(ino, dev);
71397+ if (inodev != NULL)
71398+ do_handle_delete(inodev, ino, dev);
71399+ write_unlock(&gr_inode_lock);
71400+
71401+ return;
71402+}
71403+
71404+static void
71405+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71406+ const u64 newinode, const dev_t newdevice,
71407+ struct acl_subject_label *subj)
71408+{
71409+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71410+ struct acl_object_label *match;
71411+
71412+ match = subj->obj_hash[index];
71413+
71414+ while (match && (match->inode != oldinode ||
71415+ match->device != olddevice ||
71416+ !(match->mode & GR_DELETED)))
71417+ match = match->next;
71418+
71419+ if (match && (match->inode == oldinode)
71420+ && (match->device == olddevice)
71421+ && (match->mode & GR_DELETED)) {
71422+ if (match->prev == NULL) {
71423+ subj->obj_hash[index] = match->next;
71424+ if (match->next != NULL)
71425+ match->next->prev = NULL;
71426+ } else {
71427+ match->prev->next = match->next;
71428+ if (match->next != NULL)
71429+ match->next->prev = match->prev;
71430+ }
71431+ match->prev = NULL;
71432+ match->next = NULL;
71433+ match->inode = newinode;
71434+ match->device = newdevice;
71435+ match->mode &= ~GR_DELETED;
71436+
71437+ insert_acl_obj_label(match, subj);
71438+ }
71439+
71440+ return;
71441+}
71442+
71443+static void
71444+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71445+ const u64 newinode, const dev_t newdevice,
71446+ struct acl_role_label *role)
71447+{
71448+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71449+ struct acl_subject_label *match;
71450+
71451+ match = role->subj_hash[index];
71452+
71453+ while (match && (match->inode != oldinode ||
71454+ match->device != olddevice ||
71455+ !(match->mode & GR_DELETED)))
71456+ match = match->next;
71457+
71458+ if (match && (match->inode == oldinode)
71459+ && (match->device == olddevice)
71460+ && (match->mode & GR_DELETED)) {
71461+ if (match->prev == NULL) {
71462+ role->subj_hash[index] = match->next;
71463+ if (match->next != NULL)
71464+ match->next->prev = NULL;
71465+ } else {
71466+ match->prev->next = match->next;
71467+ if (match->next != NULL)
71468+ match->next->prev = match->prev;
71469+ }
71470+ match->prev = NULL;
71471+ match->next = NULL;
71472+ match->inode = newinode;
71473+ match->device = newdevice;
71474+ match->mode &= ~GR_DELETED;
71475+
71476+ insert_acl_subj_label(match, role);
71477+ }
71478+
71479+ return;
71480+}
71481+
71482+static void
71483+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71484+ const u64 newinode, const dev_t newdevice)
71485+{
71486+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71487+ struct inodev_entry *match;
71488+
71489+ match = running_polstate.inodev_set.i_hash[index];
71490+
71491+ while (match && (match->nentry->inode != oldinode ||
71492+ match->nentry->device != olddevice || !match->nentry->deleted))
71493+ match = match->next;
71494+
71495+ if (match && (match->nentry->inode == oldinode)
71496+ && (match->nentry->device == olddevice) &&
71497+ match->nentry->deleted) {
71498+ if (match->prev == NULL) {
71499+ running_polstate.inodev_set.i_hash[index] = match->next;
71500+ if (match->next != NULL)
71501+ match->next->prev = NULL;
71502+ } else {
71503+ match->prev->next = match->next;
71504+ if (match->next != NULL)
71505+ match->next->prev = match->prev;
71506+ }
71507+ match->prev = NULL;
71508+ match->next = NULL;
71509+ match->nentry->inode = newinode;
71510+ match->nentry->device = newdevice;
71511+ match->nentry->deleted = 0;
71512+
71513+ insert_inodev_entry(match);
71514+ }
71515+
71516+ return;
71517+}
71518+
71519+static void
71520+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71521+{
71522+ struct acl_subject_label *subj;
71523+ struct acl_role_label *role;
71524+ unsigned int x;
71525+
71526+ FOR_EACH_ROLE_START(role)
71527+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71528+
71529+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71530+ if ((subj->inode == ino) && (subj->device == dev)) {
71531+ subj->inode = ino;
71532+ subj->device = dev;
71533+ }
71534+ /* nested subjects aren't in the role's subj_hash table */
71535+ update_acl_obj_label(matchn->inode, matchn->device,
71536+ ino, dev, subj);
71537+ FOR_EACH_NESTED_SUBJECT_END(subj)
71538+ FOR_EACH_SUBJECT_START(role, subj, x)
71539+ update_acl_obj_label(matchn->inode, matchn->device,
71540+ ino, dev, subj);
71541+ FOR_EACH_SUBJECT_END(subj,x)
71542+ FOR_EACH_ROLE_END(role)
71543+
71544+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71545+
71546+ return;
71547+}
71548+
71549+static void
71550+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71551+ const struct vfsmount *mnt)
71552+{
71553+ u64 ino = __get_ino(dentry);
71554+ dev_t dev = __get_dev(dentry);
71555+
71556+ __do_handle_create(matchn, ino, dev);
71557+
71558+ return;
71559+}
71560+
71561+void
71562+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71563+{
71564+ struct name_entry *matchn;
71565+
71566+ if (unlikely(!(gr_status & GR_READY)))
71567+ return;
71568+
71569+ preempt_disable();
71570+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71571+
71572+ if (unlikely((unsigned long)matchn)) {
71573+ write_lock(&gr_inode_lock);
71574+ do_handle_create(matchn, dentry, mnt);
71575+ write_unlock(&gr_inode_lock);
71576+ }
71577+ preempt_enable();
71578+
71579+ return;
71580+}
71581+
71582+void
71583+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71584+{
71585+ struct name_entry *matchn;
71586+
71587+ if (unlikely(!(gr_status & GR_READY)))
71588+ return;
71589+
71590+ preempt_disable();
71591+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71592+
71593+ if (unlikely((unsigned long)matchn)) {
71594+ write_lock(&gr_inode_lock);
71595+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71596+ write_unlock(&gr_inode_lock);
71597+ }
71598+ preempt_enable();
71599+
71600+ return;
71601+}
71602+
71603+void
71604+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71605+ struct dentry *old_dentry,
71606+ struct dentry *new_dentry,
71607+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71608+{
71609+ struct name_entry *matchn;
71610+ struct name_entry *matchn2 = NULL;
71611+ struct inodev_entry *inodev;
71612+ struct inode *inode = new_dentry->d_inode;
71613+ u64 old_ino = __get_ino(old_dentry);
71614+ dev_t old_dev = __get_dev(old_dentry);
71615+ unsigned int exchange = flags & RENAME_EXCHANGE;
71616+
71617+ /* vfs_rename swaps the name and parent link for old_dentry and
71618+ new_dentry
71619+ at this point, old_dentry has the new name, parent link, and inode
71620+ for the renamed file
71621+ if a file is being replaced by a rename, new_dentry has the inode
71622+ and name for the replaced file
71623+ */
71624+
71625+ if (unlikely(!(gr_status & GR_READY)))
71626+ return;
71627+
71628+ preempt_disable();
71629+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71630+
71631+ /* exchange cases:
71632+ a filename exists for the source, but not dest
71633+ do a recreate on source
71634+ a filename exists for the dest, but not source
71635+ do a recreate on dest
71636+ a filename exists for both source and dest
71637+ delete source and dest, then create source and dest
71638+ a filename exists for neither source nor dest
71639+ no updates needed
71640+
71641+ the name entry lookups get us the old inode/dev associated with
71642+ each name, so do the deletes first (if possible) so that when
71643+ we do the create, we pick up on the right entries
71644+ */
71645+
71646+ if (exchange)
71647+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71648+
71649+ /* we wouldn't have to check d_inode if it weren't for
71650+ NFS silly-renaming
71651+ */
71652+
71653+ write_lock(&gr_inode_lock);
71654+ if (unlikely((replace || exchange) && inode)) {
71655+ u64 new_ino = __get_ino(new_dentry);
71656+ dev_t new_dev = __get_dev(new_dentry);
71657+
71658+ inodev = lookup_inodev_entry(new_ino, new_dev);
71659+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71660+ do_handle_delete(inodev, new_ino, new_dev);
71661+ }
71662+
71663+ inodev = lookup_inodev_entry(old_ino, old_dev);
71664+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71665+ do_handle_delete(inodev, old_ino, old_dev);
71666+
71667+ if (unlikely(matchn != NULL))
71668+ do_handle_create(matchn, old_dentry, mnt);
71669+
71670+ if (unlikely(matchn2 != NULL))
71671+ do_handle_create(matchn2, new_dentry, mnt);
71672+
71673+ write_unlock(&gr_inode_lock);
71674+ preempt_enable();
71675+
71676+ return;
71677+}
71678+
71679+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71680+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71681+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71682+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71683+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71684+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71685+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71686+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71687+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71688+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71689+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71690+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71691+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71692+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71693+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71694+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71695+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71696+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71697+};
71698+
71699+void
71700+gr_learn_resource(const struct task_struct *task,
71701+ const int res, const unsigned long wanted, const int gt)
71702+{
71703+ struct acl_subject_label *acl;
71704+ const struct cred *cred;
71705+
71706+ if (unlikely((gr_status & GR_READY) &&
71707+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71708+ goto skip_reslog;
71709+
71710+ gr_log_resource(task, res, wanted, gt);
71711+skip_reslog:
71712+
71713+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71714+ return;
71715+
71716+ acl = task->acl;
71717+
71718+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71719+ !(acl->resmask & (1U << (unsigned short) res))))
71720+ return;
71721+
71722+ if (wanted >= acl->res[res].rlim_cur) {
71723+ unsigned long res_add;
71724+
71725+ res_add = wanted + res_learn_bumps[res];
71726+
71727+ acl->res[res].rlim_cur = res_add;
71728+
71729+ if (wanted > acl->res[res].rlim_max)
71730+ acl->res[res].rlim_max = res_add;
71731+
71732+ /* only log the subject filename, since resource logging is supported for
71733+ single-subject learning only */
71734+ rcu_read_lock();
71735+ cred = __task_cred(task);
71736+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71737+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71738+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71739+ "", (unsigned long) res, &task->signal->saved_ip);
71740+ rcu_read_unlock();
71741+ }
71742+
71743+ return;
71744+}
71745+EXPORT_SYMBOL_GPL(gr_learn_resource);
71746+#endif
71747+
71748+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71749+void
71750+pax_set_initial_flags(struct linux_binprm *bprm)
71751+{
71752+ struct task_struct *task = current;
71753+ struct acl_subject_label *proc;
71754+ unsigned long flags;
71755+
71756+ if (unlikely(!(gr_status & GR_READY)))
71757+ return;
71758+
71759+ flags = pax_get_flags(task);
71760+
71761+ proc = task->acl;
71762+
71763+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71764+ flags &= ~MF_PAX_PAGEEXEC;
71765+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71766+ flags &= ~MF_PAX_SEGMEXEC;
71767+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71768+ flags &= ~MF_PAX_RANDMMAP;
71769+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71770+ flags &= ~MF_PAX_EMUTRAMP;
71771+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71772+ flags &= ~MF_PAX_MPROTECT;
71773+
71774+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71775+ flags |= MF_PAX_PAGEEXEC;
71776+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71777+ flags |= MF_PAX_SEGMEXEC;
71778+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71779+ flags |= MF_PAX_RANDMMAP;
71780+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71781+ flags |= MF_PAX_EMUTRAMP;
71782+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71783+ flags |= MF_PAX_MPROTECT;
71784+
71785+ pax_set_flags(task, flags);
71786+
71787+ return;
71788+}
71789+#endif
71790+
71791+int
71792+gr_handle_proc_ptrace(struct task_struct *task)
71793+{
71794+ struct file *filp;
71795+ struct task_struct *tmp = task;
71796+ struct task_struct *curtemp = current;
71797+ __u32 retmode;
71798+
71799+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71800+ if (unlikely(!(gr_status & GR_READY)))
71801+ return 0;
71802+#endif
71803+
71804+ read_lock(&tasklist_lock);
71805+ read_lock(&grsec_exec_file_lock);
71806+ filp = task->exec_file;
71807+
71808+ while (task_pid_nr(tmp) > 0) {
71809+ if (tmp == curtemp)
71810+ break;
71811+ tmp = tmp->real_parent;
71812+ }
71813+
71814+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71815+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71816+ read_unlock(&grsec_exec_file_lock);
71817+ read_unlock(&tasklist_lock);
71818+ return 1;
71819+ }
71820+
71821+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71822+ if (!(gr_status & GR_READY)) {
71823+ read_unlock(&grsec_exec_file_lock);
71824+ read_unlock(&tasklist_lock);
71825+ return 0;
71826+ }
71827+#endif
71828+
71829+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71830+ read_unlock(&grsec_exec_file_lock);
71831+ read_unlock(&tasklist_lock);
71832+
71833+ if (retmode & GR_NOPTRACE)
71834+ return 1;
71835+
71836+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71837+ && (current->acl != task->acl || (current->acl != current->role->root_label
71838+ && task_pid_nr(current) != task_pid_nr(task))))
71839+ return 1;
71840+
71841+ return 0;
71842+}
71843+
71844+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71845+{
71846+ if (unlikely(!(gr_status & GR_READY)))
71847+ return;
71848+
71849+ if (!(current->role->roletype & GR_ROLE_GOD))
71850+ return;
71851+
71852+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71853+ p->role->rolename, gr_task_roletype_to_char(p),
71854+ p->acl->filename);
71855+}
71856+
71857+int
71858+gr_handle_ptrace(struct task_struct *task, const long request)
71859+{
71860+ struct task_struct *tmp = task;
71861+ struct task_struct *curtemp = current;
71862+ __u32 retmode;
71863+
71864+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71865+ if (unlikely(!(gr_status & GR_READY)))
71866+ return 0;
71867+#endif
71868+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71869+ read_lock(&tasklist_lock);
71870+ while (task_pid_nr(tmp) > 0) {
71871+ if (tmp == curtemp)
71872+ break;
71873+ tmp = tmp->real_parent;
71874+ }
71875+
71876+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71877+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71878+ read_unlock(&tasklist_lock);
71879+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71880+ return 1;
71881+ }
71882+ read_unlock(&tasklist_lock);
71883+ }
71884+
71885+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71886+ if (!(gr_status & GR_READY))
71887+ return 0;
71888+#endif
71889+
71890+ read_lock(&grsec_exec_file_lock);
71891+ if (unlikely(!task->exec_file)) {
71892+ read_unlock(&grsec_exec_file_lock);
71893+ return 0;
71894+ }
71895+
71896+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71897+ read_unlock(&grsec_exec_file_lock);
71898+
71899+ if (retmode & GR_NOPTRACE) {
71900+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71901+ return 1;
71902+ }
71903+
71904+ if (retmode & GR_PTRACERD) {
71905+ switch (request) {
71906+ case PTRACE_SEIZE:
71907+ case PTRACE_POKETEXT:
71908+ case PTRACE_POKEDATA:
71909+ case PTRACE_POKEUSR:
71910+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71911+ case PTRACE_SETREGS:
71912+ case PTRACE_SETFPREGS:
71913+#endif
71914+#ifdef CONFIG_X86
71915+ case PTRACE_SETFPXREGS:
71916+#endif
71917+#ifdef CONFIG_ALTIVEC
71918+ case PTRACE_SETVRREGS:
71919+#endif
71920+ return 1;
71921+ default:
71922+ return 0;
71923+ }
71924+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71925+ !(current->role->roletype & GR_ROLE_GOD) &&
71926+ (current->acl != task->acl)) {
71927+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71928+ return 1;
71929+ }
71930+
71931+ return 0;
71932+}
71933+
71934+static int is_writable_mmap(const struct file *filp)
71935+{
71936+ struct task_struct *task = current;
71937+ struct acl_object_label *obj, *obj2;
71938+
71939+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71940+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71941+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71942+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71943+ task->role->root_label);
71944+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71945+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71946+ return 1;
71947+ }
71948+ }
71949+ return 0;
71950+}
71951+
71952+int
71953+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71954+{
71955+ __u32 mode;
71956+
71957+ if (unlikely(!file || !(prot & PROT_EXEC)))
71958+ return 1;
71959+
71960+ if (is_writable_mmap(file))
71961+ return 0;
71962+
71963+ mode =
71964+ gr_search_file(file->f_path.dentry,
71965+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71966+ file->f_path.mnt);
71967+
71968+ if (!gr_tpe_allow(file))
71969+ return 0;
71970+
71971+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71972+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71973+ return 0;
71974+ } else if (unlikely(!(mode & GR_EXEC))) {
71975+ return 0;
71976+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71977+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71978+ return 1;
71979+ }
71980+
71981+ return 1;
71982+}
71983+
71984+int
71985+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71986+{
71987+ __u32 mode;
71988+
71989+ if (unlikely(!file || !(prot & PROT_EXEC)))
71990+ return 1;
71991+
71992+ if (is_writable_mmap(file))
71993+ return 0;
71994+
71995+ mode =
71996+ gr_search_file(file->f_path.dentry,
71997+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71998+ file->f_path.mnt);
71999+
72000+ if (!gr_tpe_allow(file))
72001+ return 0;
72002+
72003+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72004+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72005+ return 0;
72006+ } else if (unlikely(!(mode & GR_EXEC))) {
72007+ return 0;
72008+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72009+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72010+ return 1;
72011+ }
72012+
72013+ return 1;
72014+}
72015+
72016+void
72017+gr_acl_handle_psacct(struct task_struct *task, const long code)
72018+{
72019+ unsigned long runtime, cputime;
72020+ cputime_t utime, stime;
72021+ unsigned int wday, cday;
72022+ __u8 whr, chr;
72023+ __u8 wmin, cmin;
72024+ __u8 wsec, csec;
72025+ struct timespec curtime, starttime;
72026+
72027+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72028+ !(task->acl->mode & GR_PROCACCT)))
72029+ return;
72030+
72031+ curtime = ns_to_timespec(ktime_get_ns());
72032+ starttime = ns_to_timespec(task->start_time);
72033+ runtime = curtime.tv_sec - starttime.tv_sec;
72034+ wday = runtime / (60 * 60 * 24);
72035+ runtime -= wday * (60 * 60 * 24);
72036+ whr = runtime / (60 * 60);
72037+ runtime -= whr * (60 * 60);
72038+ wmin = runtime / 60;
72039+ runtime -= wmin * 60;
72040+ wsec = runtime;
72041+
72042+ task_cputime(task, &utime, &stime);
72043+ cputime = cputime_to_secs(utime + stime);
72044+ cday = cputime / (60 * 60 * 24);
72045+ cputime -= cday * (60 * 60 * 24);
72046+ chr = cputime / (60 * 60);
72047+ cputime -= chr * (60 * 60);
72048+ cmin = cputime / 60;
72049+ cputime -= cmin * 60;
72050+ csec = cputime;
72051+
72052+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72053+
72054+ return;
72055+}
72056+
72057+#ifdef CONFIG_TASKSTATS
72058+int gr_is_taskstats_denied(int pid)
72059+{
72060+ struct task_struct *task;
72061+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72062+ const struct cred *cred;
72063+#endif
72064+ int ret = 0;
72065+
72066+ /* restrict taskstats viewing to un-chrooted root users
72067+ who have the 'view' subject flag if the RBAC system is enabled
72068+ */
72069+
72070+ rcu_read_lock();
72071+ read_lock(&tasklist_lock);
72072+ task = find_task_by_vpid(pid);
72073+ if (task) {
72074+#ifdef CONFIG_GRKERNSEC_CHROOT
72075+ if (proc_is_chrooted(task))
72076+ ret = -EACCES;
72077+#endif
72078+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72079+ cred = __task_cred(task);
72080+#ifdef CONFIG_GRKERNSEC_PROC_USER
72081+ if (gr_is_global_nonroot(cred->uid))
72082+ ret = -EACCES;
72083+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72084+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72085+ ret = -EACCES;
72086+#endif
72087+#endif
72088+ if (gr_status & GR_READY) {
72089+ if (!(task->acl->mode & GR_VIEW))
72090+ ret = -EACCES;
72091+ }
72092+ } else
72093+ ret = -ENOENT;
72094+
72095+ read_unlock(&tasklist_lock);
72096+ rcu_read_unlock();
72097+
72098+ return ret;
72099+}
72100+#endif
72101+
72102+/* AUXV entries are filled via a descendant of search_binary_handler
72103+ after we've already applied the subject for the target
72104+*/
72105+int gr_acl_enable_at_secure(void)
72106+{
72107+ if (unlikely(!(gr_status & GR_READY)))
72108+ return 0;
72109+
72110+ if (current->acl->mode & GR_ATSECURE)
72111+ return 1;
72112+
72113+ return 0;
72114+}
72115+
72116+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
72117+{
72118+ struct task_struct *task = current;
72119+ struct dentry *dentry = file->f_path.dentry;
72120+ struct vfsmount *mnt = file->f_path.mnt;
72121+ struct acl_object_label *obj, *tmp;
72122+ struct acl_subject_label *subj;
72123+ unsigned int bufsize;
72124+ int is_not_root;
72125+ char *path;
72126+ dev_t dev = __get_dev(dentry);
72127+
72128+ if (unlikely(!(gr_status & GR_READY)))
72129+ return 1;
72130+
72131+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72132+ return 1;
72133+
72134+ /* ignore Eric Biederman */
72135+ if (IS_PRIVATE(dentry->d_inode))
72136+ return 1;
72137+
72138+ subj = task->acl;
72139+ read_lock(&gr_inode_lock);
72140+ do {
72141+ obj = lookup_acl_obj_label(ino, dev, subj);
72142+ if (obj != NULL) {
72143+ read_unlock(&gr_inode_lock);
72144+ return (obj->mode & GR_FIND) ? 1 : 0;
72145+ }
72146+ } while ((subj = subj->parent_subject));
72147+ read_unlock(&gr_inode_lock);
72148+
72149+ /* this is purely an optimization since we're looking for an object
72150+ for the directory we're doing a readdir on
72151+ if it's possible for any globbed object to match the entry we're
72152+ filling into the directory, then the object we find here will be
72153+ an anchor point with attached globbed objects
72154+ */
72155+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72156+ if (obj->globbed == NULL)
72157+ return (obj->mode & GR_FIND) ? 1 : 0;
72158+
72159+ is_not_root = ((obj->filename[0] == '/') &&
72160+ (obj->filename[1] == '\0')) ? 0 : 1;
72161+ bufsize = PAGE_SIZE - namelen - is_not_root;
72162+
72163+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72164+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72165+ return 1;
72166+
72167+ preempt_disable();
72168+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72169+ bufsize);
72170+
72171+ bufsize = strlen(path);
72172+
72173+ /* if base is "/", don't append an additional slash */
72174+ if (is_not_root)
72175+ *(path + bufsize) = '/';
72176+ memcpy(path + bufsize + is_not_root, name, namelen);
72177+ *(path + bufsize + namelen + is_not_root) = '\0';
72178+
72179+ tmp = obj->globbed;
72180+ while (tmp) {
72181+ if (!glob_match(tmp->filename, path)) {
72182+ preempt_enable();
72183+ return (tmp->mode & GR_FIND) ? 1 : 0;
72184+ }
72185+ tmp = tmp->next;
72186+ }
72187+ preempt_enable();
72188+ return (obj->mode & GR_FIND) ? 1 : 0;
72189+}
72190+
72191+void gr_put_exec_file(struct task_struct *task)
72192+{
72193+ struct file *filp;
72194+
72195+ write_lock(&grsec_exec_file_lock);
72196+ filp = task->exec_file;
72197+ task->exec_file = NULL;
72198+ write_unlock(&grsec_exec_file_lock);
72199+
72200+ if (filp)
72201+ fput(filp);
72202+
72203+ return;
72204+}
72205+
72206+
72207+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72208+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72209+#endif
72210+#ifdef CONFIG_SECURITY
72211+EXPORT_SYMBOL_GPL(gr_check_user_change);
72212+EXPORT_SYMBOL_GPL(gr_check_group_change);
72213+#endif
72214+
72215diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72216new file mode 100644
72217index 0000000..18ffbbd
72218--- /dev/null
72219+++ b/grsecurity/gracl_alloc.c
72220@@ -0,0 +1,105 @@
72221+#include <linux/kernel.h>
72222+#include <linux/mm.h>
72223+#include <linux/slab.h>
72224+#include <linux/vmalloc.h>
72225+#include <linux/gracl.h>
72226+#include <linux/grsecurity.h>
72227+
72228+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72229+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72230+
72231+static __inline__ int
72232+alloc_pop(void)
72233+{
72234+ if (current_alloc_state->alloc_stack_next == 1)
72235+ return 0;
72236+
72237+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72238+
72239+ current_alloc_state->alloc_stack_next--;
72240+
72241+ return 1;
72242+}
72243+
72244+static __inline__ int
72245+alloc_push(void *buf)
72246+{
72247+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72248+ return 1;
72249+
72250+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72251+
72252+ current_alloc_state->alloc_stack_next++;
72253+
72254+ return 0;
72255+}
72256+
72257+void *
72258+acl_alloc(unsigned long len)
72259+{
72260+ void *ret = NULL;
72261+
72262+ if (!len || len > PAGE_SIZE)
72263+ goto out;
72264+
72265+ ret = kmalloc(len, GFP_KERNEL);
72266+
72267+ if (ret) {
72268+ if (alloc_push(ret)) {
72269+ kfree(ret);
72270+ ret = NULL;
72271+ }
72272+ }
72273+
72274+out:
72275+ return ret;
72276+}
72277+
72278+void *
72279+acl_alloc_num(unsigned long num, unsigned long len)
72280+{
72281+ if (!len || (num > (PAGE_SIZE / len)))
72282+ return NULL;
72283+
72284+ return acl_alloc(num * len);
72285+}
72286+
72287+void
72288+acl_free_all(void)
72289+{
72290+ if (!current_alloc_state->alloc_stack)
72291+ return;
72292+
72293+ while (alloc_pop()) ;
72294+
72295+ if (current_alloc_state->alloc_stack) {
72296+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72297+ kfree(current_alloc_state->alloc_stack);
72298+ else
72299+ vfree(current_alloc_state->alloc_stack);
72300+ }
72301+
72302+ current_alloc_state->alloc_stack = NULL;
72303+ current_alloc_state->alloc_stack_size = 1;
72304+ current_alloc_state->alloc_stack_next = 1;
72305+
72306+ return;
72307+}
72308+
72309+int
72310+acl_alloc_stack_init(unsigned long size)
72311+{
72312+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72313+ current_alloc_state->alloc_stack =
72314+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72315+ else
72316+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72317+
72318+ current_alloc_state->alloc_stack_size = size;
72319+ current_alloc_state->alloc_stack_next = 1;
72320+
72321+ if (!current_alloc_state->alloc_stack)
72322+ return 0;
72323+ else
72324+ return 1;
72325+}
72326diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72327new file mode 100644
72328index 0000000..1a94c11
72329--- /dev/null
72330+++ b/grsecurity/gracl_cap.c
72331@@ -0,0 +1,127 @@
72332+#include <linux/kernel.h>
72333+#include <linux/module.h>
72334+#include <linux/sched.h>
72335+#include <linux/gracl.h>
72336+#include <linux/grsecurity.h>
72337+#include <linux/grinternal.h>
72338+
72339+extern const char *captab_log[];
72340+extern int captab_log_entries;
72341+
72342+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72343+{
72344+ struct acl_subject_label *curracl;
72345+
72346+ if (!gr_acl_is_enabled())
72347+ return 1;
72348+
72349+ curracl = task->acl;
72350+
72351+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72352+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72353+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72354+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72355+ gr_to_filename(task->exec_file->f_path.dentry,
72356+ task->exec_file->f_path.mnt) : curracl->filename,
72357+ curracl->filename, 0UL,
72358+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72359+ return 1;
72360+ }
72361+
72362+ return 0;
72363+}
72364+
72365+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72366+{
72367+ struct acl_subject_label *curracl;
72368+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72369+ kernel_cap_t cap_audit = __cap_empty_set;
72370+
72371+ if (!gr_acl_is_enabled())
72372+ return 1;
72373+
72374+ curracl = task->acl;
72375+
72376+ cap_drop = curracl->cap_lower;
72377+ cap_mask = curracl->cap_mask;
72378+ cap_audit = curracl->cap_invert_audit;
72379+
72380+ while ((curracl = curracl->parent_subject)) {
72381+ /* if the cap isn't specified in the current computed mask but is specified in the
72382+ current level subject, and is lowered in the current level subject, then add
72383+ it to the set of dropped capabilities
72384+ otherwise, add the current level subject's mask to the current computed mask
72385+ */
72386+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72387+ cap_raise(cap_mask, cap);
72388+ if (cap_raised(curracl->cap_lower, cap))
72389+ cap_raise(cap_drop, cap);
72390+ if (cap_raised(curracl->cap_invert_audit, cap))
72391+ cap_raise(cap_audit, cap);
72392+ }
72393+ }
72394+
72395+ if (!cap_raised(cap_drop, cap)) {
72396+ if (cap_raised(cap_audit, cap))
72397+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72398+ return 1;
72399+ }
72400+
72401+ /* only learn the capability use if the process has the capability in the
72402+ general case, the two uses in sys.c of gr_learn_cap are an exception
72403+ to this rule to ensure any role transition involves what the full-learned
72404+ policy believes in a privileged process
72405+ */
72406+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72407+ return 1;
72408+
72409+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72410+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72411+
72412+ return 0;
72413+}
72414+
72415+int
72416+gr_acl_is_capable(const int cap)
72417+{
72418+ return gr_task_acl_is_capable(current, current_cred(), cap);
72419+}
72420+
72421+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72422+{
72423+ struct acl_subject_label *curracl;
72424+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72425+
72426+ if (!gr_acl_is_enabled())
72427+ return 1;
72428+
72429+ curracl = task->acl;
72430+
72431+ cap_drop = curracl->cap_lower;
72432+ cap_mask = curracl->cap_mask;
72433+
72434+ while ((curracl = curracl->parent_subject)) {
72435+ /* if the cap isn't specified in the current computed mask but is specified in the
72436+ current level subject, and is lowered in the current level subject, then add
72437+ it to the set of dropped capabilities
72438+ otherwise, add the current level subject's mask to the current computed mask
72439+ */
72440+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72441+ cap_raise(cap_mask, cap);
72442+ if (cap_raised(curracl->cap_lower, cap))
72443+ cap_raise(cap_drop, cap);
72444+ }
72445+ }
72446+
72447+ if (!cap_raised(cap_drop, cap))
72448+ return 1;
72449+
72450+ return 0;
72451+}
72452+
72453+int
72454+gr_acl_is_capable_nolog(const int cap)
72455+{
72456+ return gr_task_acl_is_capable_nolog(current, cap);
72457+}
72458+
72459diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72460new file mode 100644
72461index 0000000..a43dd06
72462--- /dev/null
72463+++ b/grsecurity/gracl_compat.c
72464@@ -0,0 +1,269 @@
72465+#include <linux/kernel.h>
72466+#include <linux/gracl.h>
72467+#include <linux/compat.h>
72468+#include <linux/gracl_compat.h>
72469+
72470+#include <asm/uaccess.h>
72471+
72472+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72473+{
72474+ struct gr_arg_wrapper_compat uwrapcompat;
72475+
72476+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72477+ return -EFAULT;
72478+
72479+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72480+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72481+ return -EINVAL;
72482+
72483+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72484+ uwrap->version = uwrapcompat.version;
72485+ uwrap->size = sizeof(struct gr_arg);
72486+
72487+ return 0;
72488+}
72489+
72490+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72491+{
72492+ struct gr_arg_compat argcompat;
72493+
72494+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72495+ return -EFAULT;
72496+
72497+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72498+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72499+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72500+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72501+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72502+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72503+
72504+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72505+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72506+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72507+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72508+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72509+ arg->segv_device = argcompat.segv_device;
72510+ arg->segv_inode = argcompat.segv_inode;
72511+ arg->segv_uid = argcompat.segv_uid;
72512+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72513+ arg->mode = argcompat.mode;
72514+
72515+ return 0;
72516+}
72517+
72518+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72519+{
72520+ struct acl_object_label_compat objcompat;
72521+
72522+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72523+ return -EFAULT;
72524+
72525+ obj->filename = compat_ptr(objcompat.filename);
72526+ obj->inode = objcompat.inode;
72527+ obj->device = objcompat.device;
72528+ obj->mode = objcompat.mode;
72529+
72530+ obj->nested = compat_ptr(objcompat.nested);
72531+ obj->globbed = compat_ptr(objcompat.globbed);
72532+
72533+ obj->prev = compat_ptr(objcompat.prev);
72534+ obj->next = compat_ptr(objcompat.next);
72535+
72536+ return 0;
72537+}
72538+
72539+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72540+{
72541+ unsigned int i;
72542+ struct acl_subject_label_compat subjcompat;
72543+
72544+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72545+ return -EFAULT;
72546+
72547+ subj->filename = compat_ptr(subjcompat.filename);
72548+ subj->inode = subjcompat.inode;
72549+ subj->device = subjcompat.device;
72550+ subj->mode = subjcompat.mode;
72551+ subj->cap_mask = subjcompat.cap_mask;
72552+ subj->cap_lower = subjcompat.cap_lower;
72553+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72554+
72555+ for (i = 0; i < GR_NLIMITS; i++) {
72556+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72557+ subj->res[i].rlim_cur = RLIM_INFINITY;
72558+ else
72559+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72560+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72561+ subj->res[i].rlim_max = RLIM_INFINITY;
72562+ else
72563+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72564+ }
72565+ subj->resmask = subjcompat.resmask;
72566+
72567+ subj->user_trans_type = subjcompat.user_trans_type;
72568+ subj->group_trans_type = subjcompat.group_trans_type;
72569+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72570+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72571+ subj->user_trans_num = subjcompat.user_trans_num;
72572+ subj->group_trans_num = subjcompat.group_trans_num;
72573+
72574+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72575+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72576+ subj->ip_type = subjcompat.ip_type;
72577+ subj->ips = compat_ptr(subjcompat.ips);
72578+ subj->ip_num = subjcompat.ip_num;
72579+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72580+
72581+ subj->crashes = subjcompat.crashes;
72582+ subj->expires = subjcompat.expires;
72583+
72584+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72585+ subj->hash = compat_ptr(subjcompat.hash);
72586+ subj->prev = compat_ptr(subjcompat.prev);
72587+ subj->next = compat_ptr(subjcompat.next);
72588+
72589+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72590+ subj->obj_hash_size = subjcompat.obj_hash_size;
72591+ subj->pax_flags = subjcompat.pax_flags;
72592+
72593+ return 0;
72594+}
72595+
72596+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72597+{
72598+ struct acl_role_label_compat rolecompat;
72599+
72600+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72601+ return -EFAULT;
72602+
72603+ role->rolename = compat_ptr(rolecompat.rolename);
72604+ role->uidgid = rolecompat.uidgid;
72605+ role->roletype = rolecompat.roletype;
72606+
72607+ role->auth_attempts = rolecompat.auth_attempts;
72608+ role->expires = rolecompat.expires;
72609+
72610+ role->root_label = compat_ptr(rolecompat.root_label);
72611+ role->hash = compat_ptr(rolecompat.hash);
72612+
72613+ role->prev = compat_ptr(rolecompat.prev);
72614+ role->next = compat_ptr(rolecompat.next);
72615+
72616+ role->transitions = compat_ptr(rolecompat.transitions);
72617+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72618+ role->domain_children = compat_ptr(rolecompat.domain_children);
72619+ role->domain_child_num = rolecompat.domain_child_num;
72620+
72621+ role->umask = rolecompat.umask;
72622+
72623+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72624+ role->subj_hash_size = rolecompat.subj_hash_size;
72625+
72626+ return 0;
72627+}
72628+
72629+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72630+{
72631+ struct role_allowed_ip_compat roleip_compat;
72632+
72633+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72634+ return -EFAULT;
72635+
72636+ roleip->addr = roleip_compat.addr;
72637+ roleip->netmask = roleip_compat.netmask;
72638+
72639+ roleip->prev = compat_ptr(roleip_compat.prev);
72640+ roleip->next = compat_ptr(roleip_compat.next);
72641+
72642+ return 0;
72643+}
72644+
72645+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72646+{
72647+ struct role_transition_compat trans_compat;
72648+
72649+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72650+ return -EFAULT;
72651+
72652+ trans->rolename = compat_ptr(trans_compat.rolename);
72653+
72654+ trans->prev = compat_ptr(trans_compat.prev);
72655+ trans->next = compat_ptr(trans_compat.next);
72656+
72657+ return 0;
72658+
72659+}
72660+
72661+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72662+{
72663+ struct gr_hash_struct_compat hash_compat;
72664+
72665+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72666+ return -EFAULT;
72667+
72668+ hash->table = compat_ptr(hash_compat.table);
72669+ hash->nametable = compat_ptr(hash_compat.nametable);
72670+ hash->first = compat_ptr(hash_compat.first);
72671+
72672+ hash->table_size = hash_compat.table_size;
72673+ hash->used_size = hash_compat.used_size;
72674+
72675+ hash->type = hash_compat.type;
72676+
72677+ return 0;
72678+}
72679+
72680+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72681+{
72682+ compat_uptr_t ptrcompat;
72683+
72684+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72685+ return -EFAULT;
72686+
72687+ *(void **)ptr = compat_ptr(ptrcompat);
72688+
72689+ return 0;
72690+}
72691+
72692+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72693+{
72694+ struct acl_ip_label_compat ip_compat;
72695+
72696+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72697+ return -EFAULT;
72698+
72699+ ip->iface = compat_ptr(ip_compat.iface);
72700+ ip->addr = ip_compat.addr;
72701+ ip->netmask = ip_compat.netmask;
72702+ ip->low = ip_compat.low;
72703+ ip->high = ip_compat.high;
72704+ ip->mode = ip_compat.mode;
72705+ ip->type = ip_compat.type;
72706+
72707+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72708+
72709+ ip->prev = compat_ptr(ip_compat.prev);
72710+ ip->next = compat_ptr(ip_compat.next);
72711+
72712+ return 0;
72713+}
72714+
72715+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72716+{
72717+ struct sprole_pw_compat pw_compat;
72718+
72719+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72720+ return -EFAULT;
72721+
72722+ pw->rolename = compat_ptr(pw_compat.rolename);
72723+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72724+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72725+
72726+ return 0;
72727+}
72728+
72729+size_t get_gr_arg_wrapper_size_compat(void)
72730+{
72731+ return sizeof(struct gr_arg_wrapper_compat);
72732+}
72733+
72734diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72735new file mode 100644
72736index 0000000..8ee8e4f
72737--- /dev/null
72738+++ b/grsecurity/gracl_fs.c
72739@@ -0,0 +1,447 @@
72740+#include <linux/kernel.h>
72741+#include <linux/sched.h>
72742+#include <linux/types.h>
72743+#include <linux/fs.h>
72744+#include <linux/file.h>
72745+#include <linux/stat.h>
72746+#include <linux/grsecurity.h>
72747+#include <linux/grinternal.h>
72748+#include <linux/gracl.h>
72749+
72750+umode_t
72751+gr_acl_umask(void)
72752+{
72753+ if (unlikely(!gr_acl_is_enabled()))
72754+ return 0;
72755+
72756+ return current->role->umask;
72757+}
72758+
72759+__u32
72760+gr_acl_handle_hidden_file(const struct dentry * dentry,
72761+ const struct vfsmount * mnt)
72762+{
72763+ __u32 mode;
72764+
72765+ if (unlikely(d_is_negative(dentry)))
72766+ return GR_FIND;
72767+
72768+ mode =
72769+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72770+
72771+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72772+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72773+ return mode;
72774+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72775+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72776+ return 0;
72777+ } else if (unlikely(!(mode & GR_FIND)))
72778+ return 0;
72779+
72780+ return GR_FIND;
72781+}
72782+
72783+__u32
72784+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72785+ int acc_mode)
72786+{
72787+ __u32 reqmode = GR_FIND;
72788+ __u32 mode;
72789+
72790+ if (unlikely(d_is_negative(dentry)))
72791+ return reqmode;
72792+
72793+ if (acc_mode & MAY_APPEND)
72794+ reqmode |= GR_APPEND;
72795+ else if (acc_mode & MAY_WRITE)
72796+ reqmode |= GR_WRITE;
72797+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72798+ reqmode |= GR_READ;
72799+
72800+ mode =
72801+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72802+ mnt);
72803+
72804+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72805+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72806+ reqmode & GR_READ ? " reading" : "",
72807+ reqmode & GR_WRITE ? " writing" : reqmode &
72808+ GR_APPEND ? " appending" : "");
72809+ return reqmode;
72810+ } else
72811+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72812+ {
72813+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72814+ reqmode & GR_READ ? " reading" : "",
72815+ reqmode & GR_WRITE ? " writing" : reqmode &
72816+ GR_APPEND ? " appending" : "");
72817+ return 0;
72818+ } else if (unlikely((mode & reqmode) != reqmode))
72819+ return 0;
72820+
72821+ return reqmode;
72822+}
72823+
72824+__u32
72825+gr_acl_handle_creat(const struct dentry * dentry,
72826+ const struct dentry * p_dentry,
72827+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72828+ const int imode)
72829+{
72830+ __u32 reqmode = GR_WRITE | GR_CREATE;
72831+ __u32 mode;
72832+
72833+ if (acc_mode & MAY_APPEND)
72834+ reqmode |= GR_APPEND;
72835+ // if a directory was required or the directory already exists, then
72836+ // don't count this open as a read
72837+ if ((acc_mode & MAY_READ) &&
72838+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72839+ reqmode |= GR_READ;
72840+ if ((open_flags & O_CREAT) &&
72841+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72842+ reqmode |= GR_SETID;
72843+
72844+ mode =
72845+ gr_check_create(dentry, p_dentry, p_mnt,
72846+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72847+
72848+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72849+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72850+ reqmode & GR_READ ? " reading" : "",
72851+ reqmode & GR_WRITE ? " writing" : reqmode &
72852+ GR_APPEND ? " appending" : "");
72853+ return reqmode;
72854+ } else
72855+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72856+ {
72857+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72858+ reqmode & GR_READ ? " reading" : "",
72859+ reqmode & GR_WRITE ? " writing" : reqmode &
72860+ GR_APPEND ? " appending" : "");
72861+ return 0;
72862+ } else if (unlikely((mode & reqmode) != reqmode))
72863+ return 0;
72864+
72865+ return reqmode;
72866+}
72867+
72868+__u32
72869+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72870+ const int fmode)
72871+{
72872+ __u32 mode, reqmode = GR_FIND;
72873+
72874+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72875+ reqmode |= GR_EXEC;
72876+ if (fmode & S_IWOTH)
72877+ reqmode |= GR_WRITE;
72878+ if (fmode & S_IROTH)
72879+ reqmode |= GR_READ;
72880+
72881+ mode =
72882+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72883+ mnt);
72884+
72885+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72886+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72887+ reqmode & GR_READ ? " reading" : "",
72888+ reqmode & GR_WRITE ? " writing" : "",
72889+ reqmode & GR_EXEC ? " executing" : "");
72890+ return reqmode;
72891+ } else
72892+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72893+ {
72894+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72895+ reqmode & GR_READ ? " reading" : "",
72896+ reqmode & GR_WRITE ? " writing" : "",
72897+ reqmode & GR_EXEC ? " executing" : "");
72898+ return 0;
72899+ } else if (unlikely((mode & reqmode) != reqmode))
72900+ return 0;
72901+
72902+ return reqmode;
72903+}
72904+
72905+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72906+{
72907+ __u32 mode;
72908+
72909+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72910+
72911+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72912+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72913+ return mode;
72914+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72915+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72916+ return 0;
72917+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72918+ return 0;
72919+
72920+ return (reqmode);
72921+}
72922+
72923+__u32
72924+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72925+{
72926+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72927+}
72928+
72929+__u32
72930+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72931+{
72932+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72933+}
72934+
72935+__u32
72936+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72937+{
72938+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72939+}
72940+
72941+__u32
72942+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72943+{
72944+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72945+}
72946+
72947+__u32
72948+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72949+ umode_t *modeptr)
72950+{
72951+ umode_t mode;
72952+
72953+ *modeptr &= ~gr_acl_umask();
72954+ mode = *modeptr;
72955+
72956+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72957+ return 1;
72958+
72959+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72960+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72961+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72962+ GR_CHMOD_ACL_MSG);
72963+ } else {
72964+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72965+ }
72966+}
72967+
72968+__u32
72969+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72970+{
72971+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72972+}
72973+
72974+__u32
72975+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72976+{
72977+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72978+}
72979+
72980+__u32
72981+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72982+{
72983+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72984+}
72985+
72986+__u32
72987+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72988+{
72989+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72990+}
72991+
72992+__u32
72993+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72994+{
72995+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72996+ GR_UNIXCONNECT_ACL_MSG);
72997+}
72998+
72999+/* hardlinks require at minimum create and link permission,
73000+ any additional privilege required is based on the
73001+ privilege of the file being linked to
73002+*/
73003+__u32
73004+gr_acl_handle_link(const struct dentry * new_dentry,
73005+ const struct dentry * parent_dentry,
73006+ const struct vfsmount * parent_mnt,
73007+ const struct dentry * old_dentry,
73008+ const struct vfsmount * old_mnt, const struct filename *to)
73009+{
73010+ __u32 mode;
73011+ __u32 needmode = GR_CREATE | GR_LINK;
73012+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73013+
73014+ mode =
73015+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73016+ old_mnt);
73017+
73018+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73019+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73020+ return mode;
73021+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73022+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73023+ return 0;
73024+ } else if (unlikely((mode & needmode) != needmode))
73025+ return 0;
73026+
73027+ return 1;
73028+}
73029+
73030+__u32
73031+gr_acl_handle_symlink(const struct dentry * new_dentry,
73032+ const struct dentry * parent_dentry,
73033+ const struct vfsmount * parent_mnt, const struct filename *from)
73034+{
73035+ __u32 needmode = GR_WRITE | GR_CREATE;
73036+ __u32 mode;
73037+
73038+ mode =
73039+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73040+ GR_CREATE | GR_AUDIT_CREATE |
73041+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73042+
73043+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73044+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73045+ return mode;
73046+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73047+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73048+ return 0;
73049+ } else if (unlikely((mode & needmode) != needmode))
73050+ return 0;
73051+
73052+ return (GR_WRITE | GR_CREATE);
73053+}
73054+
73055+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73056+{
73057+ __u32 mode;
73058+
73059+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73060+
73061+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73062+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73063+ return mode;
73064+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73065+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73066+ return 0;
73067+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73068+ return 0;
73069+
73070+ return (reqmode);
73071+}
73072+
73073+__u32
73074+gr_acl_handle_mknod(const struct dentry * new_dentry,
73075+ const struct dentry * parent_dentry,
73076+ const struct vfsmount * parent_mnt,
73077+ const int mode)
73078+{
73079+ __u32 reqmode = GR_WRITE | GR_CREATE;
73080+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73081+ reqmode |= GR_SETID;
73082+
73083+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73084+ reqmode, GR_MKNOD_ACL_MSG);
73085+}
73086+
73087+__u32
73088+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73089+ const struct dentry *parent_dentry,
73090+ const struct vfsmount *parent_mnt)
73091+{
73092+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73093+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73094+}
73095+
73096+#define RENAME_CHECK_SUCCESS(old, new) \
73097+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73098+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73099+
73100+int
73101+gr_acl_handle_rename(struct dentry *new_dentry,
73102+ struct dentry *parent_dentry,
73103+ const struct vfsmount *parent_mnt,
73104+ struct dentry *old_dentry,
73105+ struct inode *old_parent_inode,
73106+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73107+{
73108+ __u32 comp1, comp2;
73109+ int error = 0;
73110+
73111+ if (unlikely(!gr_acl_is_enabled()))
73112+ return 0;
73113+
73114+ if (flags & RENAME_EXCHANGE) {
73115+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73116+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73117+ GR_SUPPRESS, parent_mnt);
73118+ comp2 =
73119+ gr_search_file(old_dentry,
73120+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73121+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73122+ } else if (d_is_negative(new_dentry)) {
73123+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73124+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73125+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73126+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73127+ GR_DELETE | GR_AUDIT_DELETE |
73128+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73129+ GR_SUPPRESS, old_mnt);
73130+ } else {
73131+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73132+ GR_CREATE | GR_DELETE |
73133+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73134+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73135+ GR_SUPPRESS, parent_mnt);
73136+ comp2 =
73137+ gr_search_file(old_dentry,
73138+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73139+ GR_DELETE | GR_AUDIT_DELETE |
73140+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73141+ }
73142+
73143+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73144+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73145+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73146+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73147+ && !(comp2 & GR_SUPPRESS)) {
73148+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73149+ error = -EACCES;
73150+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73151+ error = -EACCES;
73152+
73153+ return error;
73154+}
73155+
73156+void
73157+gr_acl_handle_exit(void)
73158+{
73159+ u16 id;
73160+ char *rolename;
73161+
73162+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73163+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73164+ id = current->acl_role_id;
73165+ rolename = current->role->rolename;
73166+ gr_set_acls(1);
73167+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73168+ }
73169+
73170+ gr_put_exec_file(current);
73171+ return;
73172+}
73173+
73174+int
73175+gr_acl_handle_procpidmem(const struct task_struct *task)
73176+{
73177+ if (unlikely(!gr_acl_is_enabled()))
73178+ return 0;
73179+
73180+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
73181+ !(current->acl->mode & GR_POVERRIDE) &&
73182+ !(current->role->roletype & GR_ROLE_GOD))
73183+ return -EACCES;
73184+
73185+ return 0;
73186+}
73187diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73188new file mode 100644
73189index 0000000..f056b81
73190--- /dev/null
73191+++ b/grsecurity/gracl_ip.c
73192@@ -0,0 +1,386 @@
73193+#include <linux/kernel.h>
73194+#include <asm/uaccess.h>
73195+#include <asm/errno.h>
73196+#include <net/sock.h>
73197+#include <linux/file.h>
73198+#include <linux/fs.h>
73199+#include <linux/net.h>
73200+#include <linux/in.h>
73201+#include <linux/skbuff.h>
73202+#include <linux/ip.h>
73203+#include <linux/udp.h>
73204+#include <linux/types.h>
73205+#include <linux/sched.h>
73206+#include <linux/netdevice.h>
73207+#include <linux/inetdevice.h>
73208+#include <linux/gracl.h>
73209+#include <linux/grsecurity.h>
73210+#include <linux/grinternal.h>
73211+
73212+#define GR_BIND 0x01
73213+#define GR_CONNECT 0x02
73214+#define GR_INVERT 0x04
73215+#define GR_BINDOVERRIDE 0x08
73216+#define GR_CONNECTOVERRIDE 0x10
73217+#define GR_SOCK_FAMILY 0x20
73218+
73219+static const char * gr_protocols[IPPROTO_MAX] = {
73220+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73221+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73222+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73223+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73224+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73225+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73226+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73227+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73228+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73229+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73230+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73231+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73232+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73233+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73234+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73235+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73236+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73237+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73238+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73239+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73240+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73241+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73242+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73243+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73244+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73245+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73246+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73247+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73248+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73249+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73250+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73251+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73252+ };
73253+
73254+static const char * gr_socktypes[SOCK_MAX] = {
73255+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73256+ "unknown:7", "unknown:8", "unknown:9", "packet"
73257+ };
73258+
73259+static const char * gr_sockfamilies[AF_MAX+1] = {
73260+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73261+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73262+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73263+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73264+ };
73265+
73266+const char *
73267+gr_proto_to_name(unsigned char proto)
73268+{
73269+ return gr_protocols[proto];
73270+}
73271+
73272+const char *
73273+gr_socktype_to_name(unsigned char type)
73274+{
73275+ return gr_socktypes[type];
73276+}
73277+
73278+const char *
73279+gr_sockfamily_to_name(unsigned char family)
73280+{
73281+ return gr_sockfamilies[family];
73282+}
73283+
73284+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73285+
73286+int
73287+gr_search_socket(const int domain, const int type, const int protocol)
73288+{
73289+ struct acl_subject_label *curr;
73290+ const struct cred *cred = current_cred();
73291+
73292+ if (unlikely(!gr_acl_is_enabled()))
73293+ goto exit;
73294+
73295+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73296+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73297+ goto exit; // let the kernel handle it
73298+
73299+ curr = current->acl;
73300+
73301+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73302+ /* the family is allowed, if this is PF_INET allow it only if
73303+ the extra sock type/protocol checks pass */
73304+ if (domain == PF_INET)
73305+ goto inet_check;
73306+ goto exit;
73307+ } else {
73308+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73309+ __u32 fakeip = 0;
73310+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73311+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73312+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73313+ gr_to_filename(current->exec_file->f_path.dentry,
73314+ current->exec_file->f_path.mnt) :
73315+ curr->filename, curr->filename,
73316+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73317+ &current->signal->saved_ip);
73318+ goto exit;
73319+ }
73320+ goto exit_fail;
73321+ }
73322+
73323+inet_check:
73324+ /* the rest of this checking is for IPv4 only */
73325+ if (!curr->ips)
73326+ goto exit;
73327+
73328+ if ((curr->ip_type & (1U << type)) &&
73329+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73330+ goto exit;
73331+
73332+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73333+ /* we don't place acls on raw sockets , and sometimes
73334+ dgram/ip sockets are opened for ioctl and not
73335+ bind/connect, so we'll fake a bind learn log */
73336+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73337+ __u32 fakeip = 0;
73338+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73339+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73340+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73341+ gr_to_filename(current->exec_file->f_path.dentry,
73342+ current->exec_file->f_path.mnt) :
73343+ curr->filename, curr->filename,
73344+ &fakeip, 0, type,
73345+ protocol, GR_CONNECT, &current->signal->saved_ip);
73346+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73347+ __u32 fakeip = 0;
73348+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73349+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73350+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73351+ gr_to_filename(current->exec_file->f_path.dentry,
73352+ current->exec_file->f_path.mnt) :
73353+ curr->filename, curr->filename,
73354+ &fakeip, 0, type,
73355+ protocol, GR_BIND, &current->signal->saved_ip);
73356+ }
73357+ /* we'll log when they use connect or bind */
73358+ goto exit;
73359+ }
73360+
73361+exit_fail:
73362+ if (domain == PF_INET)
73363+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73364+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73365+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73366+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73367+ gr_socktype_to_name(type), protocol);
73368+
73369+ return 0;
73370+exit:
73371+ return 1;
73372+}
73373+
73374+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73375+{
73376+ if ((ip->mode & mode) &&
73377+ (ip_port >= ip->low) &&
73378+ (ip_port <= ip->high) &&
73379+ ((ntohl(ip_addr) & our_netmask) ==
73380+ (ntohl(our_addr) & our_netmask))
73381+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73382+ && (ip->type & (1U << type))) {
73383+ if (ip->mode & GR_INVERT)
73384+ return 2; // specifically denied
73385+ else
73386+ return 1; // allowed
73387+ }
73388+
73389+ return 0; // not specifically allowed, may continue parsing
73390+}
73391+
73392+static int
73393+gr_search_connectbind(const int full_mode, struct sock *sk,
73394+ struct sockaddr_in *addr, const int type)
73395+{
73396+ char iface[IFNAMSIZ] = {0};
73397+ struct acl_subject_label *curr;
73398+ struct acl_ip_label *ip;
73399+ struct inet_sock *isk;
73400+ struct net_device *dev;
73401+ struct in_device *idev;
73402+ unsigned long i;
73403+ int ret;
73404+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73405+ __u32 ip_addr = 0;
73406+ __u32 our_addr;
73407+ __u32 our_netmask;
73408+ char *p;
73409+ __u16 ip_port = 0;
73410+ const struct cred *cred = current_cred();
73411+
73412+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73413+ return 0;
73414+
73415+ curr = current->acl;
73416+ isk = inet_sk(sk);
73417+
73418+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73419+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73420+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73421+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73422+ struct sockaddr_in saddr;
73423+ int err;
73424+
73425+ saddr.sin_family = AF_INET;
73426+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73427+ saddr.sin_port = isk->inet_sport;
73428+
73429+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73430+ if (err)
73431+ return err;
73432+
73433+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73434+ if (err)
73435+ return err;
73436+ }
73437+
73438+ if (!curr->ips)
73439+ return 0;
73440+
73441+ ip_addr = addr->sin_addr.s_addr;
73442+ ip_port = ntohs(addr->sin_port);
73443+
73444+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73445+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73446+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73447+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73448+ gr_to_filename(current->exec_file->f_path.dentry,
73449+ current->exec_file->f_path.mnt) :
73450+ curr->filename, curr->filename,
73451+ &ip_addr, ip_port, type,
73452+ sk->sk_protocol, mode, &current->signal->saved_ip);
73453+ return 0;
73454+ }
73455+
73456+ for (i = 0; i < curr->ip_num; i++) {
73457+ ip = *(curr->ips + i);
73458+ if (ip->iface != NULL) {
73459+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73460+ p = strchr(iface, ':');
73461+ if (p != NULL)
73462+ *p = '\0';
73463+ dev = dev_get_by_name(sock_net(sk), iface);
73464+ if (dev == NULL)
73465+ continue;
73466+ idev = in_dev_get(dev);
73467+ if (idev == NULL) {
73468+ dev_put(dev);
73469+ continue;
73470+ }
73471+ rcu_read_lock();
73472+ for_ifa(idev) {
73473+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73474+ our_addr = ifa->ifa_address;
73475+ our_netmask = 0xffffffff;
73476+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73477+ if (ret == 1) {
73478+ rcu_read_unlock();
73479+ in_dev_put(idev);
73480+ dev_put(dev);
73481+ return 0;
73482+ } else if (ret == 2) {
73483+ rcu_read_unlock();
73484+ in_dev_put(idev);
73485+ dev_put(dev);
73486+ goto denied;
73487+ }
73488+ }
73489+ } endfor_ifa(idev);
73490+ rcu_read_unlock();
73491+ in_dev_put(idev);
73492+ dev_put(dev);
73493+ } else {
73494+ our_addr = ip->addr;
73495+ our_netmask = ip->netmask;
73496+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73497+ if (ret == 1)
73498+ return 0;
73499+ else if (ret == 2)
73500+ goto denied;
73501+ }
73502+ }
73503+
73504+denied:
73505+ if (mode == GR_BIND)
73506+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73507+ else if (mode == GR_CONNECT)
73508+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73509+
73510+ return -EACCES;
73511+}
73512+
73513+int
73514+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73515+{
73516+ /* always allow disconnection of dgram sockets with connect */
73517+ if (addr->sin_family == AF_UNSPEC)
73518+ return 0;
73519+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73520+}
73521+
73522+int
73523+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73524+{
73525+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73526+}
73527+
73528+int gr_search_listen(struct socket *sock)
73529+{
73530+ struct sock *sk = sock->sk;
73531+ struct sockaddr_in addr;
73532+
73533+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73534+ addr.sin_port = inet_sk(sk)->inet_sport;
73535+
73536+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73537+}
73538+
73539+int gr_search_accept(struct socket *sock)
73540+{
73541+ struct sock *sk = sock->sk;
73542+ struct sockaddr_in addr;
73543+
73544+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73545+ addr.sin_port = inet_sk(sk)->inet_sport;
73546+
73547+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73548+}
73549+
73550+int
73551+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73552+{
73553+ if (addr)
73554+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73555+ else {
73556+ struct sockaddr_in sin;
73557+ const struct inet_sock *inet = inet_sk(sk);
73558+
73559+ sin.sin_addr.s_addr = inet->inet_daddr;
73560+ sin.sin_port = inet->inet_dport;
73561+
73562+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73563+ }
73564+}
73565+
73566+int
73567+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73568+{
73569+ struct sockaddr_in sin;
73570+
73571+ if (unlikely(skb->len < sizeof (struct udphdr)))
73572+ return 0; // skip this packet
73573+
73574+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73575+ sin.sin_port = udp_hdr(skb)->source;
73576+
73577+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73578+}
73579diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73580new file mode 100644
73581index 0000000..25f54ef
73582--- /dev/null
73583+++ b/grsecurity/gracl_learn.c
73584@@ -0,0 +1,207 @@
73585+#include <linux/kernel.h>
73586+#include <linux/mm.h>
73587+#include <linux/sched.h>
73588+#include <linux/poll.h>
73589+#include <linux/string.h>
73590+#include <linux/file.h>
73591+#include <linux/types.h>
73592+#include <linux/vmalloc.h>
73593+#include <linux/grinternal.h>
73594+
73595+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73596+ size_t count, loff_t *ppos);
73597+extern int gr_acl_is_enabled(void);
73598+
73599+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73600+static int gr_learn_attached;
73601+
73602+/* use a 512k buffer */
73603+#define LEARN_BUFFER_SIZE (512 * 1024)
73604+
73605+static DEFINE_SPINLOCK(gr_learn_lock);
73606+static DEFINE_MUTEX(gr_learn_user_mutex);
73607+
73608+/* we need to maintain two buffers, so that the kernel context of grlearn
73609+ uses a semaphore around the userspace copying, and the other kernel contexts
73610+ use a spinlock when copying into the buffer, since they cannot sleep
73611+*/
73612+static char *learn_buffer;
73613+static char *learn_buffer_user;
73614+static int learn_buffer_len;
73615+static int learn_buffer_user_len;
73616+
73617+static ssize_t
73618+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73619+{
73620+ DECLARE_WAITQUEUE(wait, current);
73621+ ssize_t retval = 0;
73622+
73623+ add_wait_queue(&learn_wait, &wait);
73624+ set_current_state(TASK_INTERRUPTIBLE);
73625+ do {
73626+ mutex_lock(&gr_learn_user_mutex);
73627+ spin_lock(&gr_learn_lock);
73628+ if (learn_buffer_len)
73629+ break;
73630+ spin_unlock(&gr_learn_lock);
73631+ mutex_unlock(&gr_learn_user_mutex);
73632+ if (file->f_flags & O_NONBLOCK) {
73633+ retval = -EAGAIN;
73634+ goto out;
73635+ }
73636+ if (signal_pending(current)) {
73637+ retval = -ERESTARTSYS;
73638+ goto out;
73639+ }
73640+
73641+ schedule();
73642+ } while (1);
73643+
73644+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73645+ learn_buffer_user_len = learn_buffer_len;
73646+ retval = learn_buffer_len;
73647+ learn_buffer_len = 0;
73648+
73649+ spin_unlock(&gr_learn_lock);
73650+
73651+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73652+ retval = -EFAULT;
73653+
73654+ mutex_unlock(&gr_learn_user_mutex);
73655+out:
73656+ set_current_state(TASK_RUNNING);
73657+ remove_wait_queue(&learn_wait, &wait);
73658+ return retval;
73659+}
73660+
73661+static unsigned int
73662+poll_learn(struct file * file, poll_table * wait)
73663+{
73664+ poll_wait(file, &learn_wait, wait);
73665+
73666+ if (learn_buffer_len)
73667+ return (POLLIN | POLLRDNORM);
73668+
73669+ return 0;
73670+}
73671+
73672+void
73673+gr_clear_learn_entries(void)
73674+{
73675+ char *tmp;
73676+
73677+ mutex_lock(&gr_learn_user_mutex);
73678+ spin_lock(&gr_learn_lock);
73679+ tmp = learn_buffer;
73680+ learn_buffer = NULL;
73681+ spin_unlock(&gr_learn_lock);
73682+ if (tmp)
73683+ vfree(tmp);
73684+ if (learn_buffer_user != NULL) {
73685+ vfree(learn_buffer_user);
73686+ learn_buffer_user = NULL;
73687+ }
73688+ learn_buffer_len = 0;
73689+ mutex_unlock(&gr_learn_user_mutex);
73690+
73691+ return;
73692+}
73693+
73694+void
73695+gr_add_learn_entry(const char *fmt, ...)
73696+{
73697+ va_list args;
73698+ unsigned int len;
73699+
73700+ if (!gr_learn_attached)
73701+ return;
73702+
73703+ spin_lock(&gr_learn_lock);
73704+
73705+ /* leave a gap at the end so we know when it's "full" but don't have to
73706+ compute the exact length of the string we're trying to append
73707+ */
73708+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73709+ spin_unlock(&gr_learn_lock);
73710+ wake_up_interruptible(&learn_wait);
73711+ return;
73712+ }
73713+ if (learn_buffer == NULL) {
73714+ spin_unlock(&gr_learn_lock);
73715+ return;
73716+ }
73717+
73718+ va_start(args, fmt);
73719+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73720+ va_end(args);
73721+
73722+ learn_buffer_len += len + 1;
73723+
73724+ spin_unlock(&gr_learn_lock);
73725+ wake_up_interruptible(&learn_wait);
73726+
73727+ return;
73728+}
73729+
73730+static int
73731+open_learn(struct inode *inode, struct file *file)
73732+{
73733+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73734+ return -EBUSY;
73735+ if (file->f_mode & FMODE_READ) {
73736+ int retval = 0;
73737+ mutex_lock(&gr_learn_user_mutex);
73738+ if (learn_buffer == NULL)
73739+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73740+ if (learn_buffer_user == NULL)
73741+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73742+ if (learn_buffer == NULL) {
73743+ retval = -ENOMEM;
73744+ goto out_error;
73745+ }
73746+ if (learn_buffer_user == NULL) {
73747+ retval = -ENOMEM;
73748+ goto out_error;
73749+ }
73750+ learn_buffer_len = 0;
73751+ learn_buffer_user_len = 0;
73752+ gr_learn_attached = 1;
73753+out_error:
73754+ mutex_unlock(&gr_learn_user_mutex);
73755+ return retval;
73756+ }
73757+ return 0;
73758+}
73759+
73760+static int
73761+close_learn(struct inode *inode, struct file *file)
73762+{
73763+ if (file->f_mode & FMODE_READ) {
73764+ char *tmp = NULL;
73765+ mutex_lock(&gr_learn_user_mutex);
73766+ spin_lock(&gr_learn_lock);
73767+ tmp = learn_buffer;
73768+ learn_buffer = NULL;
73769+ spin_unlock(&gr_learn_lock);
73770+ if (tmp)
73771+ vfree(tmp);
73772+ if (learn_buffer_user != NULL) {
73773+ vfree(learn_buffer_user);
73774+ learn_buffer_user = NULL;
73775+ }
73776+ learn_buffer_len = 0;
73777+ learn_buffer_user_len = 0;
73778+ gr_learn_attached = 0;
73779+ mutex_unlock(&gr_learn_user_mutex);
73780+ }
73781+
73782+ return 0;
73783+}
73784+
73785+const struct file_operations grsec_fops = {
73786+ .read = read_learn,
73787+ .write = write_grsec_handler,
73788+ .open = open_learn,
73789+ .release = close_learn,
73790+ .poll = poll_learn,
73791+};
73792diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73793new file mode 100644
73794index 0000000..fd26052
73795--- /dev/null
73796+++ b/grsecurity/gracl_policy.c
73797@@ -0,0 +1,1781 @@
73798+#include <linux/kernel.h>
73799+#include <linux/module.h>
73800+#include <linux/sched.h>
73801+#include <linux/mm.h>
73802+#include <linux/file.h>
73803+#include <linux/fs.h>
73804+#include <linux/namei.h>
73805+#include <linux/mount.h>
73806+#include <linux/tty.h>
73807+#include <linux/proc_fs.h>
73808+#include <linux/lglock.h>
73809+#include <linux/slab.h>
73810+#include <linux/vmalloc.h>
73811+#include <linux/types.h>
73812+#include <linux/sysctl.h>
73813+#include <linux/netdevice.h>
73814+#include <linux/ptrace.h>
73815+#include <linux/gracl.h>
73816+#include <linux/gralloc.h>
73817+#include <linux/security.h>
73818+#include <linux/grinternal.h>
73819+#include <linux/pid_namespace.h>
73820+#include <linux/stop_machine.h>
73821+#include <linux/fdtable.h>
73822+#include <linux/percpu.h>
73823+#include <linux/lglock.h>
73824+#include <linux/hugetlb.h>
73825+#include <linux/posix-timers.h>
73826+#include "../fs/mount.h"
73827+
73828+#include <asm/uaccess.h>
73829+#include <asm/errno.h>
73830+#include <asm/mman.h>
73831+
73832+extern struct gr_policy_state *polstate;
73833+
73834+#define FOR_EACH_ROLE_START(role) \
73835+ role = polstate->role_list; \
73836+ while (role) {
73837+
73838+#define FOR_EACH_ROLE_END(role) \
73839+ role = role->prev; \
73840+ }
73841+
73842+struct path gr_real_root;
73843+
73844+extern struct gr_alloc_state *current_alloc_state;
73845+
73846+u16 acl_sp_role_value;
73847+
73848+static DEFINE_MUTEX(gr_dev_mutex);
73849+
73850+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73851+extern void gr_clear_learn_entries(void);
73852+
73853+struct gr_arg *gr_usermode __read_only;
73854+unsigned char *gr_system_salt __read_only;
73855+unsigned char *gr_system_sum __read_only;
73856+
73857+static unsigned int gr_auth_attempts = 0;
73858+static unsigned long gr_auth_expires = 0UL;
73859+
73860+struct acl_object_label *fakefs_obj_rw;
73861+struct acl_object_label *fakefs_obj_rwx;
73862+
73863+extern int gr_init_uidset(void);
73864+extern void gr_free_uidset(void);
73865+extern void gr_remove_uid(uid_t uid);
73866+extern int gr_find_uid(uid_t uid);
73867+
73868+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73869+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73870+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73871+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73872+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73873+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73874+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73875+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73876+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73877+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73878+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73879+extern void assign_special_role(const char *rolename);
73880+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73881+extern int gr_rbac_disable(void *unused);
73882+extern void gr_enable_rbac_system(void);
73883+
73884+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73885+{
73886+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73887+ return -EFAULT;
73888+
73889+ return 0;
73890+}
73891+
73892+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73893+{
73894+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73895+ return -EFAULT;
73896+
73897+ return 0;
73898+}
73899+
73900+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73901+{
73902+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73903+ return -EFAULT;
73904+
73905+ return 0;
73906+}
73907+
73908+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73909+{
73910+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73911+ return -EFAULT;
73912+
73913+ return 0;
73914+}
73915+
73916+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73917+{
73918+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73919+ return -EFAULT;
73920+
73921+ return 0;
73922+}
73923+
73924+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73925+{
73926+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73927+ return -EFAULT;
73928+
73929+ return 0;
73930+}
73931+
73932+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73933+{
73934+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73935+ return -EFAULT;
73936+
73937+ return 0;
73938+}
73939+
73940+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73941+{
73942+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73943+ return -EFAULT;
73944+
73945+ return 0;
73946+}
73947+
73948+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73949+{
73950+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73951+ return -EFAULT;
73952+
73953+ return 0;
73954+}
73955+
73956+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73957+{
73958+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73959+ return -EFAULT;
73960+
73961+ if ((uwrap->version != GRSECURITY_VERSION) ||
73962+ (uwrap->size != sizeof(struct gr_arg)))
73963+ return -EINVAL;
73964+
73965+ return 0;
73966+}
73967+
73968+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73969+{
73970+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73971+ return -EFAULT;
73972+
73973+ return 0;
73974+}
73975+
73976+static size_t get_gr_arg_wrapper_size_normal(void)
73977+{
73978+ return sizeof(struct gr_arg_wrapper);
73979+}
73980+
73981+#ifdef CONFIG_COMPAT
73982+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73983+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73984+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73985+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73986+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73987+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73988+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73989+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73990+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73991+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73992+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73993+extern size_t get_gr_arg_wrapper_size_compat(void);
73994+
73995+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73996+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73997+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73998+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73999+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74000+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74001+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74002+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74003+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74004+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74005+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74006+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74007+
74008+#else
74009+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74010+#define copy_gr_arg copy_gr_arg_normal
74011+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74012+#define copy_acl_object_label copy_acl_object_label_normal
74013+#define copy_acl_subject_label copy_acl_subject_label_normal
74014+#define copy_acl_role_label copy_acl_role_label_normal
74015+#define copy_acl_ip_label copy_acl_ip_label_normal
74016+#define copy_pointer_from_array copy_pointer_from_array_normal
74017+#define copy_sprole_pw copy_sprole_pw_normal
74018+#define copy_role_transition copy_role_transition_normal
74019+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74020+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74021+#endif
74022+
74023+static struct acl_subject_label *
74024+lookup_subject_map(const struct acl_subject_label *userp)
74025+{
74026+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74027+ struct subject_map *match;
74028+
74029+ match = polstate->subj_map_set.s_hash[index];
74030+
74031+ while (match && match->user != userp)
74032+ match = match->next;
74033+
74034+ if (match != NULL)
74035+ return match->kernel;
74036+ else
74037+ return NULL;
74038+}
74039+
74040+static void
74041+insert_subj_map_entry(struct subject_map *subjmap)
74042+{
74043+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74044+ struct subject_map **curr;
74045+
74046+ subjmap->prev = NULL;
74047+
74048+ curr = &polstate->subj_map_set.s_hash[index];
74049+ if (*curr != NULL)
74050+ (*curr)->prev = subjmap;
74051+
74052+ subjmap->next = *curr;
74053+ *curr = subjmap;
74054+
74055+ return;
74056+}
74057+
74058+static void
74059+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74060+{
74061+ unsigned int index =
74062+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74063+ struct acl_role_label **curr;
74064+ struct acl_role_label *tmp, *tmp2;
74065+
74066+ curr = &polstate->acl_role_set.r_hash[index];
74067+
74068+ /* simple case, slot is empty, just set it to our role */
74069+ if (*curr == NULL) {
74070+ *curr = role;
74071+ } else {
74072+ /* example:
74073+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74074+ 2 -> 3
74075+ */
74076+ /* first check to see if we can already be reached via this slot */
74077+ tmp = *curr;
74078+ while (tmp && tmp != role)
74079+ tmp = tmp->next;
74080+ if (tmp == role) {
74081+ /* we don't need to add ourselves to this slot's chain */
74082+ return;
74083+ }
74084+ /* we need to add ourselves to this chain, two cases */
74085+ if (role->next == NULL) {
74086+ /* simple case, append the current chain to our role */
74087+ role->next = *curr;
74088+ *curr = role;
74089+ } else {
74090+ /* 1 -> 2 -> 3 -> 4
74091+ 2 -> 3 -> 4
74092+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74093+ */
74094+ /* trickier case: walk our role's chain until we find
74095+ the role for the start of the current slot's chain */
74096+ tmp = role;
74097+ tmp2 = *curr;
74098+ while (tmp->next && tmp->next != tmp2)
74099+ tmp = tmp->next;
74100+ if (tmp->next == tmp2) {
74101+ /* from example above, we found 3, so just
74102+ replace this slot's chain with ours */
74103+ *curr = role;
74104+ } else {
74105+ /* we didn't find a subset of our role's chain
74106+ in the current slot's chain, so append their
74107+ chain to ours, and set us as the first role in
74108+ the slot's chain
74109+
74110+ we could fold this case with the case above,
74111+ but making it explicit for clarity
74112+ */
74113+ tmp->next = tmp2;
74114+ *curr = role;
74115+ }
74116+ }
74117+ }
74118+
74119+ return;
74120+}
74121+
74122+static void
74123+insert_acl_role_label(struct acl_role_label *role)
74124+{
74125+ int i;
74126+
74127+ if (polstate->role_list == NULL) {
74128+ polstate->role_list = role;
74129+ role->prev = NULL;
74130+ } else {
74131+ role->prev = polstate->role_list;
74132+ polstate->role_list = role;
74133+ }
74134+
74135+ /* used for hash chains */
74136+ role->next = NULL;
74137+
74138+ if (role->roletype & GR_ROLE_DOMAIN) {
74139+ for (i = 0; i < role->domain_child_num; i++)
74140+ __insert_acl_role_label(role, role->domain_children[i]);
74141+ } else
74142+ __insert_acl_role_label(role, role->uidgid);
74143+}
74144+
74145+static int
74146+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
74147+{
74148+ struct name_entry **curr, *nentry;
74149+ struct inodev_entry *ientry;
74150+ unsigned int len = strlen(name);
74151+ unsigned int key = full_name_hash(name, len);
74152+ unsigned int index = key % polstate->name_set.n_size;
74153+
74154+ curr = &polstate->name_set.n_hash[index];
74155+
74156+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74157+ curr = &((*curr)->next);
74158+
74159+ if (*curr != NULL)
74160+ return 1;
74161+
74162+ nentry = acl_alloc(sizeof (struct name_entry));
74163+ if (nentry == NULL)
74164+ return 0;
74165+ ientry = acl_alloc(sizeof (struct inodev_entry));
74166+ if (ientry == NULL)
74167+ return 0;
74168+ ientry->nentry = nentry;
74169+
74170+ nentry->key = key;
74171+ nentry->name = name;
74172+ nentry->inode = inode;
74173+ nentry->device = device;
74174+ nentry->len = len;
74175+ nentry->deleted = deleted;
74176+
74177+ nentry->prev = NULL;
74178+ curr = &polstate->name_set.n_hash[index];
74179+ if (*curr != NULL)
74180+ (*curr)->prev = nentry;
74181+ nentry->next = *curr;
74182+ *curr = nentry;
74183+
74184+ /* insert us into the table searchable by inode/dev */
74185+ __insert_inodev_entry(polstate, ientry);
74186+
74187+ return 1;
74188+}
74189+
74190+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74191+
74192+static void *
74193+create_table(__u32 * len, int elementsize)
74194+{
74195+ unsigned int table_sizes[] = {
74196+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74197+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74198+ 4194301, 8388593, 16777213, 33554393, 67108859
74199+ };
74200+ void *newtable = NULL;
74201+ unsigned int pwr = 0;
74202+
74203+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74204+ table_sizes[pwr] <= *len)
74205+ pwr++;
74206+
74207+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74208+ return newtable;
74209+
74210+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74211+ newtable =
74212+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74213+ else
74214+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74215+
74216+ *len = table_sizes[pwr];
74217+
74218+ return newtable;
74219+}
74220+
74221+static int
74222+init_variables(const struct gr_arg *arg, bool reload)
74223+{
74224+ struct task_struct *reaper = init_pid_ns.child_reaper;
74225+ unsigned int stacksize;
74226+
74227+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74228+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74229+ polstate->name_set.n_size = arg->role_db.num_objects;
74230+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74231+
74232+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74233+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74234+ return 1;
74235+
74236+ if (!reload) {
74237+ if (!gr_init_uidset())
74238+ return 1;
74239+ }
74240+
74241+ /* set up the stack that holds allocation info */
74242+
74243+ stacksize = arg->role_db.num_pointers + 5;
74244+
74245+ if (!acl_alloc_stack_init(stacksize))
74246+ return 1;
74247+
74248+ if (!reload) {
74249+ /* grab reference for the real root dentry and vfsmount */
74250+ get_fs_root(reaper->fs, &gr_real_root);
74251+
74252+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74253+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74254+#endif
74255+
74256+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74257+ if (fakefs_obj_rw == NULL)
74258+ return 1;
74259+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74260+
74261+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74262+ if (fakefs_obj_rwx == NULL)
74263+ return 1;
74264+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74265+ }
74266+
74267+ polstate->subj_map_set.s_hash =
74268+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74269+ polstate->acl_role_set.r_hash =
74270+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74271+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74272+ polstate->inodev_set.i_hash =
74273+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74274+
74275+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74276+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74277+ return 1;
74278+
74279+ memset(polstate->subj_map_set.s_hash, 0,
74280+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74281+ memset(polstate->acl_role_set.r_hash, 0,
74282+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74283+ memset(polstate->name_set.n_hash, 0,
74284+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74285+ memset(polstate->inodev_set.i_hash, 0,
74286+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74287+
74288+ return 0;
74289+}
74290+
74291+/* free information not needed after startup
74292+ currently contains user->kernel pointer mappings for subjects
74293+*/
74294+
74295+static void
74296+free_init_variables(void)
74297+{
74298+ __u32 i;
74299+
74300+ if (polstate->subj_map_set.s_hash) {
74301+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74302+ if (polstate->subj_map_set.s_hash[i]) {
74303+ kfree(polstate->subj_map_set.s_hash[i]);
74304+ polstate->subj_map_set.s_hash[i] = NULL;
74305+ }
74306+ }
74307+
74308+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74309+ PAGE_SIZE)
74310+ kfree(polstate->subj_map_set.s_hash);
74311+ else
74312+ vfree(polstate->subj_map_set.s_hash);
74313+ }
74314+
74315+ return;
74316+}
74317+
74318+static void
74319+free_variables(bool reload)
74320+{
74321+ struct acl_subject_label *s;
74322+ struct acl_role_label *r;
74323+ struct task_struct *task, *task2;
74324+ unsigned int x;
74325+
74326+ if (!reload) {
74327+ gr_clear_learn_entries();
74328+
74329+ read_lock(&tasklist_lock);
74330+ do_each_thread(task2, task) {
74331+ task->acl_sp_role = 0;
74332+ task->acl_role_id = 0;
74333+ task->inherited = 0;
74334+ task->acl = NULL;
74335+ task->role = NULL;
74336+ } while_each_thread(task2, task);
74337+ read_unlock(&tasklist_lock);
74338+
74339+ kfree(fakefs_obj_rw);
74340+ fakefs_obj_rw = NULL;
74341+ kfree(fakefs_obj_rwx);
74342+ fakefs_obj_rwx = NULL;
74343+
74344+ /* release the reference to the real root dentry and vfsmount */
74345+ path_put(&gr_real_root);
74346+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74347+ }
74348+
74349+ /* free all object hash tables */
74350+
74351+ FOR_EACH_ROLE_START(r)
74352+ if (r->subj_hash == NULL)
74353+ goto next_role;
74354+ FOR_EACH_SUBJECT_START(r, s, x)
74355+ if (s->obj_hash == NULL)
74356+ break;
74357+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74358+ kfree(s->obj_hash);
74359+ else
74360+ vfree(s->obj_hash);
74361+ FOR_EACH_SUBJECT_END(s, x)
74362+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74363+ if (s->obj_hash == NULL)
74364+ break;
74365+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74366+ kfree(s->obj_hash);
74367+ else
74368+ vfree(s->obj_hash);
74369+ FOR_EACH_NESTED_SUBJECT_END(s)
74370+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74371+ kfree(r->subj_hash);
74372+ else
74373+ vfree(r->subj_hash);
74374+ r->subj_hash = NULL;
74375+next_role:
74376+ FOR_EACH_ROLE_END(r)
74377+
74378+ acl_free_all();
74379+
74380+ if (polstate->acl_role_set.r_hash) {
74381+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74382+ PAGE_SIZE)
74383+ kfree(polstate->acl_role_set.r_hash);
74384+ else
74385+ vfree(polstate->acl_role_set.r_hash);
74386+ }
74387+ if (polstate->name_set.n_hash) {
74388+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74389+ PAGE_SIZE)
74390+ kfree(polstate->name_set.n_hash);
74391+ else
74392+ vfree(polstate->name_set.n_hash);
74393+ }
74394+
74395+ if (polstate->inodev_set.i_hash) {
74396+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74397+ PAGE_SIZE)
74398+ kfree(polstate->inodev_set.i_hash);
74399+ else
74400+ vfree(polstate->inodev_set.i_hash);
74401+ }
74402+
74403+ if (!reload)
74404+ gr_free_uidset();
74405+
74406+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74407+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74408+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74409+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74410+
74411+ polstate->default_role = NULL;
74412+ polstate->kernel_role = NULL;
74413+ polstate->role_list = NULL;
74414+
74415+ return;
74416+}
74417+
74418+static struct acl_subject_label *
74419+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74420+
74421+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74422+{
74423+ unsigned int len = strnlen_user(*name, maxlen);
74424+ char *tmp;
74425+
74426+ if (!len || len >= maxlen)
74427+ return -EINVAL;
74428+
74429+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74430+ return -ENOMEM;
74431+
74432+ if (copy_from_user(tmp, *name, len))
74433+ return -EFAULT;
74434+
74435+ tmp[len-1] = '\0';
74436+ *name = tmp;
74437+
74438+ return 0;
74439+}
74440+
74441+static int
74442+copy_user_glob(struct acl_object_label *obj)
74443+{
74444+ struct acl_object_label *g_tmp, **guser;
74445+ int error;
74446+
74447+ if (obj->globbed == NULL)
74448+ return 0;
74449+
74450+ guser = &obj->globbed;
74451+ while (*guser) {
74452+ g_tmp = (struct acl_object_label *)
74453+ acl_alloc(sizeof (struct acl_object_label));
74454+ if (g_tmp == NULL)
74455+ return -ENOMEM;
74456+
74457+ if (copy_acl_object_label(g_tmp, *guser))
74458+ return -EFAULT;
74459+
74460+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74461+ if (error)
74462+ return error;
74463+
74464+ *guser = g_tmp;
74465+ guser = &(g_tmp->next);
74466+ }
74467+
74468+ return 0;
74469+}
74470+
74471+static int
74472+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74473+ struct acl_role_label *role)
74474+{
74475+ struct acl_object_label *o_tmp;
74476+ int ret;
74477+
74478+ while (userp) {
74479+ if ((o_tmp = (struct acl_object_label *)
74480+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74481+ return -ENOMEM;
74482+
74483+ if (copy_acl_object_label(o_tmp, userp))
74484+ return -EFAULT;
74485+
74486+ userp = o_tmp->prev;
74487+
74488+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74489+ if (ret)
74490+ return ret;
74491+
74492+ insert_acl_obj_label(o_tmp, subj);
74493+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74494+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74495+ return -ENOMEM;
74496+
74497+ ret = copy_user_glob(o_tmp);
74498+ if (ret)
74499+ return ret;
74500+
74501+ if (o_tmp->nested) {
74502+ int already_copied;
74503+
74504+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74505+ if (IS_ERR(o_tmp->nested))
74506+ return PTR_ERR(o_tmp->nested);
74507+
74508+ /* insert into nested subject list if we haven't copied this one yet
74509+ to prevent duplicate entries */
74510+ if (!already_copied) {
74511+ o_tmp->nested->next = role->hash->first;
74512+ role->hash->first = o_tmp->nested;
74513+ }
74514+ }
74515+ }
74516+
74517+ return 0;
74518+}
74519+
74520+static __u32
74521+count_user_subjs(struct acl_subject_label *userp)
74522+{
74523+ struct acl_subject_label s_tmp;
74524+ __u32 num = 0;
74525+
74526+ while (userp) {
74527+ if (copy_acl_subject_label(&s_tmp, userp))
74528+ break;
74529+
74530+ userp = s_tmp.prev;
74531+ }
74532+
74533+ return num;
74534+}
74535+
74536+static int
74537+copy_user_allowedips(struct acl_role_label *rolep)
74538+{
74539+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74540+
74541+ ruserip = rolep->allowed_ips;
74542+
74543+ while (ruserip) {
74544+ rlast = rtmp;
74545+
74546+ if ((rtmp = (struct role_allowed_ip *)
74547+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74548+ return -ENOMEM;
74549+
74550+ if (copy_role_allowed_ip(rtmp, ruserip))
74551+ return -EFAULT;
74552+
74553+ ruserip = rtmp->prev;
74554+
74555+ if (!rlast) {
74556+ rtmp->prev = NULL;
74557+ rolep->allowed_ips = rtmp;
74558+ } else {
74559+ rlast->next = rtmp;
74560+ rtmp->prev = rlast;
74561+ }
74562+
74563+ if (!ruserip)
74564+ rtmp->next = NULL;
74565+ }
74566+
74567+ return 0;
74568+}
74569+
74570+static int
74571+copy_user_transitions(struct acl_role_label *rolep)
74572+{
74573+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74574+ int error;
74575+
74576+ rusertp = rolep->transitions;
74577+
74578+ while (rusertp) {
74579+ rlast = rtmp;
74580+
74581+ if ((rtmp = (struct role_transition *)
74582+ acl_alloc(sizeof (struct role_transition))) == NULL)
74583+ return -ENOMEM;
74584+
74585+ if (copy_role_transition(rtmp, rusertp))
74586+ return -EFAULT;
74587+
74588+ rusertp = rtmp->prev;
74589+
74590+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74591+ if (error)
74592+ return error;
74593+
74594+ if (!rlast) {
74595+ rtmp->prev = NULL;
74596+ rolep->transitions = rtmp;
74597+ } else {
74598+ rlast->next = rtmp;
74599+ rtmp->prev = rlast;
74600+ }
74601+
74602+ if (!rusertp)
74603+ rtmp->next = NULL;
74604+ }
74605+
74606+ return 0;
74607+}
74608+
74609+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74610+{
74611+ struct acl_object_label o_tmp;
74612+ __u32 num = 0;
74613+
74614+ while (userp) {
74615+ if (copy_acl_object_label(&o_tmp, userp))
74616+ break;
74617+
74618+ userp = o_tmp.prev;
74619+ num++;
74620+ }
74621+
74622+ return num;
74623+}
74624+
74625+static struct acl_subject_label *
74626+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74627+{
74628+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74629+ __u32 num_objs;
74630+ struct acl_ip_label **i_tmp, *i_utmp2;
74631+ struct gr_hash_struct ghash;
74632+ struct subject_map *subjmap;
74633+ unsigned int i_num;
74634+ int err;
74635+
74636+ if (already_copied != NULL)
74637+ *already_copied = 0;
74638+
74639+ s_tmp = lookup_subject_map(userp);
74640+
74641+ /* we've already copied this subject into the kernel, just return
74642+ the reference to it, and don't copy it over again
74643+ */
74644+ if (s_tmp) {
74645+ if (already_copied != NULL)
74646+ *already_copied = 1;
74647+ return(s_tmp);
74648+ }
74649+
74650+ if ((s_tmp = (struct acl_subject_label *)
74651+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74652+ return ERR_PTR(-ENOMEM);
74653+
74654+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74655+ if (subjmap == NULL)
74656+ return ERR_PTR(-ENOMEM);
74657+
74658+ subjmap->user = userp;
74659+ subjmap->kernel = s_tmp;
74660+ insert_subj_map_entry(subjmap);
74661+
74662+ if (copy_acl_subject_label(s_tmp, userp))
74663+ return ERR_PTR(-EFAULT);
74664+
74665+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74666+ if (err)
74667+ return ERR_PTR(err);
74668+
74669+ if (!strcmp(s_tmp->filename, "/"))
74670+ role->root_label = s_tmp;
74671+
74672+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74673+ return ERR_PTR(-EFAULT);
74674+
74675+ /* copy user and group transition tables */
74676+
74677+ if (s_tmp->user_trans_num) {
74678+ uid_t *uidlist;
74679+
74680+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74681+ if (uidlist == NULL)
74682+ return ERR_PTR(-ENOMEM);
74683+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74684+ return ERR_PTR(-EFAULT);
74685+
74686+ s_tmp->user_transitions = uidlist;
74687+ }
74688+
74689+ if (s_tmp->group_trans_num) {
74690+ gid_t *gidlist;
74691+
74692+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74693+ if (gidlist == NULL)
74694+ return ERR_PTR(-ENOMEM);
74695+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74696+ return ERR_PTR(-EFAULT);
74697+
74698+ s_tmp->group_transitions = gidlist;
74699+ }
74700+
74701+ /* set up object hash table */
74702+ num_objs = count_user_objs(ghash.first);
74703+
74704+ s_tmp->obj_hash_size = num_objs;
74705+ s_tmp->obj_hash =
74706+ (struct acl_object_label **)
74707+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74708+
74709+ if (!s_tmp->obj_hash)
74710+ return ERR_PTR(-ENOMEM);
74711+
74712+ memset(s_tmp->obj_hash, 0,
74713+ s_tmp->obj_hash_size *
74714+ sizeof (struct acl_object_label *));
74715+
74716+ /* add in objects */
74717+ err = copy_user_objs(ghash.first, s_tmp, role);
74718+
74719+ if (err)
74720+ return ERR_PTR(err);
74721+
74722+ /* set pointer for parent subject */
74723+ if (s_tmp->parent_subject) {
74724+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74725+
74726+ if (IS_ERR(s_tmp2))
74727+ return s_tmp2;
74728+
74729+ s_tmp->parent_subject = s_tmp2;
74730+ }
74731+
74732+ /* add in ip acls */
74733+
74734+ if (!s_tmp->ip_num) {
74735+ s_tmp->ips = NULL;
74736+ goto insert;
74737+ }
74738+
74739+ i_tmp =
74740+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74741+ sizeof (struct acl_ip_label *));
74742+
74743+ if (!i_tmp)
74744+ return ERR_PTR(-ENOMEM);
74745+
74746+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74747+ *(i_tmp + i_num) =
74748+ (struct acl_ip_label *)
74749+ acl_alloc(sizeof (struct acl_ip_label));
74750+ if (!*(i_tmp + i_num))
74751+ return ERR_PTR(-ENOMEM);
74752+
74753+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74754+ return ERR_PTR(-EFAULT);
74755+
74756+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74757+ return ERR_PTR(-EFAULT);
74758+
74759+ if ((*(i_tmp + i_num))->iface == NULL)
74760+ continue;
74761+
74762+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74763+ if (err)
74764+ return ERR_PTR(err);
74765+ }
74766+
74767+ s_tmp->ips = i_tmp;
74768+
74769+insert:
74770+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74771+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74772+ return ERR_PTR(-ENOMEM);
74773+
74774+ return s_tmp;
74775+}
74776+
74777+static int
74778+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74779+{
74780+ struct acl_subject_label s_pre;
74781+ struct acl_subject_label * ret;
74782+ int err;
74783+
74784+ while (userp) {
74785+ if (copy_acl_subject_label(&s_pre, userp))
74786+ return -EFAULT;
74787+
74788+ ret = do_copy_user_subj(userp, role, NULL);
74789+
74790+ err = PTR_ERR(ret);
74791+ if (IS_ERR(ret))
74792+ return err;
74793+
74794+ insert_acl_subj_label(ret, role);
74795+
74796+ userp = s_pre.prev;
74797+ }
74798+
74799+ return 0;
74800+}
74801+
74802+static int
74803+copy_user_acl(struct gr_arg *arg)
74804+{
74805+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74806+ struct acl_subject_label *subj_list;
74807+ struct sprole_pw *sptmp;
74808+ struct gr_hash_struct *ghash;
74809+ uid_t *domainlist;
74810+ unsigned int r_num;
74811+ int err = 0;
74812+ __u16 i;
74813+ __u32 num_subjs;
74814+
74815+ /* we need a default and kernel role */
74816+ if (arg->role_db.num_roles < 2)
74817+ return -EINVAL;
74818+
74819+ /* copy special role authentication info from userspace */
74820+
74821+ polstate->num_sprole_pws = arg->num_sprole_pws;
74822+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74823+
74824+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74825+ return -ENOMEM;
74826+
74827+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74828+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74829+ if (!sptmp)
74830+ return -ENOMEM;
74831+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74832+ return -EFAULT;
74833+
74834+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74835+ if (err)
74836+ return err;
74837+
74838+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74839+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74840+#endif
74841+
74842+ polstate->acl_special_roles[i] = sptmp;
74843+ }
74844+
74845+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74846+
74847+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74848+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74849+
74850+ if (!r_tmp)
74851+ return -ENOMEM;
74852+
74853+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74854+ return -EFAULT;
74855+
74856+ if (copy_acl_role_label(r_tmp, r_utmp2))
74857+ return -EFAULT;
74858+
74859+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74860+ if (err)
74861+ return err;
74862+
74863+ if (!strcmp(r_tmp->rolename, "default")
74864+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74865+ polstate->default_role = r_tmp;
74866+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74867+ polstate->kernel_role = r_tmp;
74868+ }
74869+
74870+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74871+ return -ENOMEM;
74872+
74873+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74874+ return -EFAULT;
74875+
74876+ r_tmp->hash = ghash;
74877+
74878+ num_subjs = count_user_subjs(r_tmp->hash->first);
74879+
74880+ r_tmp->subj_hash_size = num_subjs;
74881+ r_tmp->subj_hash =
74882+ (struct acl_subject_label **)
74883+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74884+
74885+ if (!r_tmp->subj_hash)
74886+ return -ENOMEM;
74887+
74888+ err = copy_user_allowedips(r_tmp);
74889+ if (err)
74890+ return err;
74891+
74892+ /* copy domain info */
74893+ if (r_tmp->domain_children != NULL) {
74894+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74895+ if (domainlist == NULL)
74896+ return -ENOMEM;
74897+
74898+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74899+ return -EFAULT;
74900+
74901+ r_tmp->domain_children = domainlist;
74902+ }
74903+
74904+ err = copy_user_transitions(r_tmp);
74905+ if (err)
74906+ return err;
74907+
74908+ memset(r_tmp->subj_hash, 0,
74909+ r_tmp->subj_hash_size *
74910+ sizeof (struct acl_subject_label *));
74911+
74912+ /* acquire the list of subjects, then NULL out
74913+ the list prior to parsing the subjects for this role,
74914+ as during this parsing the list is replaced with a list
74915+ of *nested* subjects for the role
74916+ */
74917+ subj_list = r_tmp->hash->first;
74918+
74919+ /* set nested subject list to null */
74920+ r_tmp->hash->first = NULL;
74921+
74922+ err = copy_user_subjs(subj_list, r_tmp);
74923+
74924+ if (err)
74925+ return err;
74926+
74927+ insert_acl_role_label(r_tmp);
74928+ }
74929+
74930+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74931+ return -EINVAL;
74932+
74933+ return err;
74934+}
74935+
74936+static int gracl_reload_apply_policies(void *reload)
74937+{
74938+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74939+ struct task_struct *task, *task2;
74940+ struct acl_role_label *role, *rtmp;
74941+ struct acl_subject_label *subj;
74942+ const struct cred *cred;
74943+ int role_applied;
74944+ int ret = 0;
74945+
74946+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74947+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74948+
74949+ /* first make sure we'll be able to apply the new policy cleanly */
74950+ do_each_thread(task2, task) {
74951+ if (task->exec_file == NULL)
74952+ continue;
74953+ role_applied = 0;
74954+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74955+ /* preserve special roles */
74956+ FOR_EACH_ROLE_START(role)
74957+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74958+ rtmp = task->role;
74959+ task->role = role;
74960+ role_applied = 1;
74961+ break;
74962+ }
74963+ FOR_EACH_ROLE_END(role)
74964+ }
74965+ if (!role_applied) {
74966+ cred = __task_cred(task);
74967+ rtmp = task->role;
74968+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74969+ }
74970+ /* this handles non-nested inherited subjects, nested subjects will still
74971+ be dropped currently */
74972+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74973+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74974+ /* change the role back so that we've made no modifications to the policy */
74975+ task->role = rtmp;
74976+
74977+ if (subj == NULL || task->tmpacl == NULL) {
74978+ ret = -EINVAL;
74979+ goto out;
74980+ }
74981+ } while_each_thread(task2, task);
74982+
74983+ /* now actually apply the policy */
74984+
74985+ do_each_thread(task2, task) {
74986+ if (task->exec_file) {
74987+ role_applied = 0;
74988+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74989+ /* preserve special roles */
74990+ FOR_EACH_ROLE_START(role)
74991+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74992+ task->role = role;
74993+ role_applied = 1;
74994+ break;
74995+ }
74996+ FOR_EACH_ROLE_END(role)
74997+ }
74998+ if (!role_applied) {
74999+ cred = __task_cred(task);
75000+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75001+ }
75002+ /* this handles non-nested inherited subjects, nested subjects will still
75003+ be dropped currently */
75004+ if (!reload_state->oldmode && task->inherited)
75005+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
75006+ else {
75007+ /* looked up and tagged to the task previously */
75008+ subj = task->tmpacl;
75009+ }
75010+ /* subj will be non-null */
75011+ __gr_apply_subject_to_task(polstate, task, subj);
75012+ if (reload_state->oldmode) {
75013+ task->acl_role_id = 0;
75014+ task->acl_sp_role = 0;
75015+ task->inherited = 0;
75016+ }
75017+ } else {
75018+ // it's a kernel process
75019+ task->role = polstate->kernel_role;
75020+ task->acl = polstate->kernel_role->root_label;
75021+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75022+ task->acl->mode &= ~GR_PROCFIND;
75023+#endif
75024+ }
75025+ } while_each_thread(task2, task);
75026+
75027+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75028+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75029+
75030+out:
75031+
75032+ return ret;
75033+}
75034+
75035+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75036+{
75037+ struct gr_reload_state new_reload_state = { };
75038+ int err;
75039+
75040+ new_reload_state.oldpolicy_ptr = polstate;
75041+ new_reload_state.oldalloc_ptr = current_alloc_state;
75042+ new_reload_state.oldmode = oldmode;
75043+
75044+ current_alloc_state = &new_reload_state.newalloc;
75045+ polstate = &new_reload_state.newpolicy;
75046+
75047+ /* everything relevant is now saved off, copy in the new policy */
75048+ if (init_variables(args, true)) {
75049+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75050+ err = -ENOMEM;
75051+ goto error;
75052+ }
75053+
75054+ err = copy_user_acl(args);
75055+ free_init_variables();
75056+ if (err)
75057+ goto error;
75058+ /* the new policy is copied in, with the old policy available via saved_state
75059+ first go through applying roles, making sure to preserve special roles
75060+ then apply new subjects, making sure to preserve inherited and nested subjects,
75061+ though currently only inherited subjects will be preserved
75062+ */
75063+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75064+ if (err)
75065+ goto error;
75066+
75067+ /* we've now applied the new policy, so restore the old policy state to free it */
75068+ polstate = &new_reload_state.oldpolicy;
75069+ current_alloc_state = &new_reload_state.oldalloc;
75070+ free_variables(true);
75071+
75072+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75073+ to running_polstate/current_alloc_state inside stop_machine
75074+ */
75075+ err = 0;
75076+ goto out;
75077+error:
75078+ /* on error of loading the new policy, we'll just keep the previous
75079+ policy set around
75080+ */
75081+ free_variables(true);
75082+
75083+ /* doesn't affect runtime, but maintains consistent state */
75084+out:
75085+ polstate = new_reload_state.oldpolicy_ptr;
75086+ current_alloc_state = new_reload_state.oldalloc_ptr;
75087+
75088+ return err;
75089+}
75090+
75091+static int
75092+gracl_init(struct gr_arg *args)
75093+{
75094+ int error = 0;
75095+
75096+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75097+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75098+
75099+ if (init_variables(args, false)) {
75100+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75101+ error = -ENOMEM;
75102+ goto out;
75103+ }
75104+
75105+ error = copy_user_acl(args);
75106+ free_init_variables();
75107+ if (error)
75108+ goto out;
75109+
75110+ error = gr_set_acls(0);
75111+ if (error)
75112+ goto out;
75113+
75114+ gr_enable_rbac_system();
75115+
75116+ return 0;
75117+
75118+out:
75119+ free_variables(false);
75120+ return error;
75121+}
75122+
75123+static int
75124+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75125+ unsigned char **sum)
75126+{
75127+ struct acl_role_label *r;
75128+ struct role_allowed_ip *ipp;
75129+ struct role_transition *trans;
75130+ unsigned int i;
75131+ int found = 0;
75132+ u32 curr_ip = current->signal->curr_ip;
75133+
75134+ current->signal->saved_ip = curr_ip;
75135+
75136+ /* check transition table */
75137+
75138+ for (trans = current->role->transitions; trans; trans = trans->next) {
75139+ if (!strcmp(rolename, trans->rolename)) {
75140+ found = 1;
75141+ break;
75142+ }
75143+ }
75144+
75145+ if (!found)
75146+ return 0;
75147+
75148+ /* handle special roles that do not require authentication
75149+ and check ip */
75150+
75151+ FOR_EACH_ROLE_START(r)
75152+ if (!strcmp(rolename, r->rolename) &&
75153+ (r->roletype & GR_ROLE_SPECIAL)) {
75154+ found = 0;
75155+ if (r->allowed_ips != NULL) {
75156+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75157+ if ((ntohl(curr_ip) & ipp->netmask) ==
75158+ (ntohl(ipp->addr) & ipp->netmask))
75159+ found = 1;
75160+ }
75161+ } else
75162+ found = 2;
75163+ if (!found)
75164+ return 0;
75165+
75166+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75167+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75168+ *salt = NULL;
75169+ *sum = NULL;
75170+ return 1;
75171+ }
75172+ }
75173+ FOR_EACH_ROLE_END(r)
75174+
75175+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75176+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75177+ *salt = polstate->acl_special_roles[i]->salt;
75178+ *sum = polstate->acl_special_roles[i]->sum;
75179+ return 1;
75180+ }
75181+ }
75182+
75183+ return 0;
75184+}
75185+
75186+int gr_check_secure_terminal(struct task_struct *task)
75187+{
75188+ struct task_struct *p, *p2, *p3;
75189+ struct files_struct *files;
75190+ struct fdtable *fdt;
75191+ struct file *our_file = NULL, *file;
75192+ int i;
75193+
75194+ if (task->signal->tty == NULL)
75195+ return 1;
75196+
75197+ files = get_files_struct(task);
75198+ if (files != NULL) {
75199+ rcu_read_lock();
75200+ fdt = files_fdtable(files);
75201+ for (i=0; i < fdt->max_fds; i++) {
75202+ file = fcheck_files(files, i);
75203+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75204+ get_file(file);
75205+ our_file = file;
75206+ }
75207+ }
75208+ rcu_read_unlock();
75209+ put_files_struct(files);
75210+ }
75211+
75212+ if (our_file == NULL)
75213+ return 1;
75214+
75215+ read_lock(&tasklist_lock);
75216+ do_each_thread(p2, p) {
75217+ files = get_files_struct(p);
75218+ if (files == NULL ||
75219+ (p->signal && p->signal->tty == task->signal->tty)) {
75220+ if (files != NULL)
75221+ put_files_struct(files);
75222+ continue;
75223+ }
75224+ rcu_read_lock();
75225+ fdt = files_fdtable(files);
75226+ for (i=0; i < fdt->max_fds; i++) {
75227+ file = fcheck_files(files, i);
75228+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75229+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75230+ p3 = task;
75231+ while (task_pid_nr(p3) > 0) {
75232+ if (p3 == p)
75233+ break;
75234+ p3 = p3->real_parent;
75235+ }
75236+ if (p3 == p)
75237+ break;
75238+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75239+ gr_handle_alertkill(p);
75240+ rcu_read_unlock();
75241+ put_files_struct(files);
75242+ read_unlock(&tasklist_lock);
75243+ fput(our_file);
75244+ return 0;
75245+ }
75246+ }
75247+ rcu_read_unlock();
75248+ put_files_struct(files);
75249+ } while_each_thread(p2, p);
75250+ read_unlock(&tasklist_lock);
75251+
75252+ fput(our_file);
75253+ return 1;
75254+}
75255+
75256+ssize_t
75257+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75258+{
75259+ struct gr_arg_wrapper uwrap;
75260+ unsigned char *sprole_salt = NULL;
75261+ unsigned char *sprole_sum = NULL;
75262+ int error = 0;
75263+ int error2 = 0;
75264+ size_t req_count = 0;
75265+ unsigned char oldmode = 0;
75266+
75267+ mutex_lock(&gr_dev_mutex);
75268+
75269+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75270+ error = -EPERM;
75271+ goto out;
75272+ }
75273+
75274+#ifdef CONFIG_COMPAT
75275+ pax_open_kernel();
75276+ if (is_compat_task()) {
75277+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75278+ copy_gr_arg = &copy_gr_arg_compat;
75279+ copy_acl_object_label = &copy_acl_object_label_compat;
75280+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75281+ copy_acl_role_label = &copy_acl_role_label_compat;
75282+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75283+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75284+ copy_role_transition = &copy_role_transition_compat;
75285+ copy_sprole_pw = &copy_sprole_pw_compat;
75286+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75287+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75288+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75289+ } else {
75290+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75291+ copy_gr_arg = &copy_gr_arg_normal;
75292+ copy_acl_object_label = &copy_acl_object_label_normal;
75293+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75294+ copy_acl_role_label = &copy_acl_role_label_normal;
75295+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75296+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75297+ copy_role_transition = &copy_role_transition_normal;
75298+ copy_sprole_pw = &copy_sprole_pw_normal;
75299+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75300+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75301+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75302+ }
75303+ pax_close_kernel();
75304+#endif
75305+
75306+ req_count = get_gr_arg_wrapper_size();
75307+
75308+ if (count != req_count) {
75309+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75310+ error = -EINVAL;
75311+ goto out;
75312+ }
75313+
75314+
75315+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75316+ gr_auth_expires = 0;
75317+ gr_auth_attempts = 0;
75318+ }
75319+
75320+ error = copy_gr_arg_wrapper(buf, &uwrap);
75321+ if (error)
75322+ goto out;
75323+
75324+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75325+ if (error)
75326+ goto out;
75327+
75328+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75329+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75330+ time_after(gr_auth_expires, get_seconds())) {
75331+ error = -EBUSY;
75332+ goto out;
75333+ }
75334+
75335+ /* if non-root trying to do anything other than use a special role,
75336+ do not attempt authentication, do not count towards authentication
75337+ locking
75338+ */
75339+
75340+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75341+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75342+ gr_is_global_nonroot(current_uid())) {
75343+ error = -EPERM;
75344+ goto out;
75345+ }
75346+
75347+ /* ensure pw and special role name are null terminated */
75348+
75349+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75350+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75351+
75352+ /* Okay.
75353+ * We have our enough of the argument structure..(we have yet
75354+ * to copy_from_user the tables themselves) . Copy the tables
75355+ * only if we need them, i.e. for loading operations. */
75356+
75357+ switch (gr_usermode->mode) {
75358+ case GR_STATUS:
75359+ if (gr_acl_is_enabled()) {
75360+ error = 1;
75361+ if (!gr_check_secure_terminal(current))
75362+ error = 3;
75363+ } else
75364+ error = 2;
75365+ goto out;
75366+ case GR_SHUTDOWN:
75367+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75368+ stop_machine(gr_rbac_disable, NULL, NULL);
75369+ free_variables(false);
75370+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75371+ memset(gr_system_salt, 0, GR_SALT_LEN);
75372+ memset(gr_system_sum, 0, GR_SHA_LEN);
75373+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75374+ } else if (gr_acl_is_enabled()) {
75375+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75376+ error = -EPERM;
75377+ } else {
75378+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75379+ error = -EAGAIN;
75380+ }
75381+ break;
75382+ case GR_ENABLE:
75383+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75384+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75385+ else {
75386+ if (gr_acl_is_enabled())
75387+ error = -EAGAIN;
75388+ else
75389+ error = error2;
75390+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75391+ }
75392+ break;
75393+ case GR_OLDRELOAD:
75394+ oldmode = 1;
75395+ case GR_RELOAD:
75396+ if (!gr_acl_is_enabled()) {
75397+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75398+ error = -EAGAIN;
75399+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75400+ error2 = gracl_reload(gr_usermode, oldmode);
75401+ if (!error2)
75402+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75403+ else {
75404+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75405+ error = error2;
75406+ }
75407+ } else {
75408+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75409+ error = -EPERM;
75410+ }
75411+ break;
75412+ case GR_SEGVMOD:
75413+ if (unlikely(!gr_acl_is_enabled())) {
75414+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75415+ error = -EAGAIN;
75416+ break;
75417+ }
75418+
75419+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75420+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75421+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75422+ struct acl_subject_label *segvacl;
75423+ segvacl =
75424+ lookup_acl_subj_label(gr_usermode->segv_inode,
75425+ gr_usermode->segv_device,
75426+ current->role);
75427+ if (segvacl) {
75428+ segvacl->crashes = 0;
75429+ segvacl->expires = 0;
75430+ }
75431+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75432+ gr_remove_uid(gr_usermode->segv_uid);
75433+ }
75434+ } else {
75435+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75436+ error = -EPERM;
75437+ }
75438+ break;
75439+ case GR_SPROLE:
75440+ case GR_SPROLEPAM:
75441+ if (unlikely(!gr_acl_is_enabled())) {
75442+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75443+ error = -EAGAIN;
75444+ break;
75445+ }
75446+
75447+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75448+ current->role->expires = 0;
75449+ current->role->auth_attempts = 0;
75450+ }
75451+
75452+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75453+ time_after(current->role->expires, get_seconds())) {
75454+ error = -EBUSY;
75455+ goto out;
75456+ }
75457+
75458+ if (lookup_special_role_auth
75459+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75460+ && ((!sprole_salt && !sprole_sum)
75461+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75462+ char *p = "";
75463+ assign_special_role(gr_usermode->sp_role);
75464+ read_lock(&tasklist_lock);
75465+ if (current->real_parent)
75466+ p = current->real_parent->role->rolename;
75467+ read_unlock(&tasklist_lock);
75468+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75469+ p, acl_sp_role_value);
75470+ } else {
75471+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75472+ error = -EPERM;
75473+ if(!(current->role->auth_attempts++))
75474+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75475+
75476+ goto out;
75477+ }
75478+ break;
75479+ case GR_UNSPROLE:
75480+ if (unlikely(!gr_acl_is_enabled())) {
75481+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75482+ error = -EAGAIN;
75483+ break;
75484+ }
75485+
75486+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75487+ char *p = "";
75488+ int i = 0;
75489+
75490+ read_lock(&tasklist_lock);
75491+ if (current->real_parent) {
75492+ p = current->real_parent->role->rolename;
75493+ i = current->real_parent->acl_role_id;
75494+ }
75495+ read_unlock(&tasklist_lock);
75496+
75497+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75498+ gr_set_acls(1);
75499+ } else {
75500+ error = -EPERM;
75501+ goto out;
75502+ }
75503+ break;
75504+ default:
75505+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75506+ error = -EINVAL;
75507+ break;
75508+ }
75509+
75510+ if (error != -EPERM)
75511+ goto out;
75512+
75513+ if(!(gr_auth_attempts++))
75514+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75515+
75516+ out:
75517+ mutex_unlock(&gr_dev_mutex);
75518+
75519+ if (!error)
75520+ error = req_count;
75521+
75522+ return error;
75523+}
75524+
75525+int
75526+gr_set_acls(const int type)
75527+{
75528+ struct task_struct *task, *task2;
75529+ struct acl_role_label *role = current->role;
75530+ struct acl_subject_label *subj;
75531+ __u16 acl_role_id = current->acl_role_id;
75532+ const struct cred *cred;
75533+ int ret;
75534+
75535+ rcu_read_lock();
75536+ read_lock(&tasklist_lock);
75537+ read_lock(&grsec_exec_file_lock);
75538+ do_each_thread(task2, task) {
75539+ /* check to see if we're called from the exit handler,
75540+ if so, only replace ACLs that have inherited the admin
75541+ ACL */
75542+
75543+ if (type && (task->role != role ||
75544+ task->acl_role_id != acl_role_id))
75545+ continue;
75546+
75547+ task->acl_role_id = 0;
75548+ task->acl_sp_role = 0;
75549+ task->inherited = 0;
75550+
75551+ if (task->exec_file) {
75552+ cred = __task_cred(task);
75553+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75554+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75555+ if (subj == NULL) {
75556+ ret = -EINVAL;
75557+ read_unlock(&grsec_exec_file_lock);
75558+ read_unlock(&tasklist_lock);
75559+ rcu_read_unlock();
75560+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75561+ return ret;
75562+ }
75563+ __gr_apply_subject_to_task(polstate, task, subj);
75564+ } else {
75565+ // it's a kernel process
75566+ task->role = polstate->kernel_role;
75567+ task->acl = polstate->kernel_role->root_label;
75568+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75569+ task->acl->mode &= ~GR_PROCFIND;
75570+#endif
75571+ }
75572+ } while_each_thread(task2, task);
75573+ read_unlock(&grsec_exec_file_lock);
75574+ read_unlock(&tasklist_lock);
75575+ rcu_read_unlock();
75576+
75577+ return 0;
75578+}
75579diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75580new file mode 100644
75581index 0000000..39645c9
75582--- /dev/null
75583+++ b/grsecurity/gracl_res.c
75584@@ -0,0 +1,68 @@
75585+#include <linux/kernel.h>
75586+#include <linux/sched.h>
75587+#include <linux/gracl.h>
75588+#include <linux/grinternal.h>
75589+
75590+static const char *restab_log[] = {
75591+ [RLIMIT_CPU] = "RLIMIT_CPU",
75592+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75593+ [RLIMIT_DATA] = "RLIMIT_DATA",
75594+ [RLIMIT_STACK] = "RLIMIT_STACK",
75595+ [RLIMIT_CORE] = "RLIMIT_CORE",
75596+ [RLIMIT_RSS] = "RLIMIT_RSS",
75597+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75598+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75599+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75600+ [RLIMIT_AS] = "RLIMIT_AS",
75601+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75602+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75603+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75604+ [RLIMIT_NICE] = "RLIMIT_NICE",
75605+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75606+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75607+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75608+};
75609+
75610+void
75611+gr_log_resource(const struct task_struct *task,
75612+ const int res, const unsigned long wanted, const int gt)
75613+{
75614+ const struct cred *cred;
75615+ unsigned long rlim;
75616+
75617+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75618+ return;
75619+
75620+ // not yet supported resource
75621+ if (unlikely(!restab_log[res]))
75622+ return;
75623+
75624+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75625+ rlim = task_rlimit_max(task, res);
75626+ else
75627+ rlim = task_rlimit(task, res);
75628+
75629+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75630+ return;
75631+
75632+ rcu_read_lock();
75633+ cred = __task_cred(task);
75634+
75635+ if (res == RLIMIT_NPROC &&
75636+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75637+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75638+ goto out_rcu_unlock;
75639+ else if (res == RLIMIT_MEMLOCK &&
75640+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75641+ goto out_rcu_unlock;
75642+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75643+ goto out_rcu_unlock;
75644+ rcu_read_unlock();
75645+
75646+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75647+
75648+ return;
75649+out_rcu_unlock:
75650+ rcu_read_unlock();
75651+ return;
75652+}
75653diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75654new file mode 100644
75655index 0000000..218b66b
75656--- /dev/null
75657+++ b/grsecurity/gracl_segv.c
75658@@ -0,0 +1,324 @@
75659+#include <linux/kernel.h>
75660+#include <linux/mm.h>
75661+#include <asm/uaccess.h>
75662+#include <asm/errno.h>
75663+#include <asm/mman.h>
75664+#include <net/sock.h>
75665+#include <linux/file.h>
75666+#include <linux/fs.h>
75667+#include <linux/net.h>
75668+#include <linux/in.h>
75669+#include <linux/slab.h>
75670+#include <linux/types.h>
75671+#include <linux/sched.h>
75672+#include <linux/timer.h>
75673+#include <linux/gracl.h>
75674+#include <linux/grsecurity.h>
75675+#include <linux/grinternal.h>
75676+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75677+#include <linux/magic.h>
75678+#include <linux/pagemap.h>
75679+#include "../fs/btrfs/async-thread.h"
75680+#include "../fs/btrfs/ctree.h"
75681+#include "../fs/btrfs/btrfs_inode.h"
75682+#endif
75683+
75684+static struct crash_uid *uid_set;
75685+static unsigned short uid_used;
75686+static DEFINE_SPINLOCK(gr_uid_lock);
75687+extern rwlock_t gr_inode_lock;
75688+extern struct acl_subject_label *
75689+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75690+ struct acl_role_label *role);
75691+
75692+static inline dev_t __get_dev(const struct dentry *dentry)
75693+{
75694+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75695+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75696+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75697+ else
75698+#endif
75699+ return dentry->d_sb->s_dev;
75700+}
75701+
75702+static inline u64 __get_ino(const struct dentry *dentry)
75703+{
75704+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75705+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75706+ return btrfs_ino(dentry->d_inode);
75707+ else
75708+#endif
75709+ return dentry->d_inode->i_ino;
75710+}
75711+
75712+int
75713+gr_init_uidset(void)
75714+{
75715+ uid_set =
75716+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75717+ uid_used = 0;
75718+
75719+ return uid_set ? 1 : 0;
75720+}
75721+
75722+void
75723+gr_free_uidset(void)
75724+{
75725+ if (uid_set) {
75726+ struct crash_uid *tmpset;
75727+ spin_lock(&gr_uid_lock);
75728+ tmpset = uid_set;
75729+ uid_set = NULL;
75730+ uid_used = 0;
75731+ spin_unlock(&gr_uid_lock);
75732+ if (tmpset)
75733+ kfree(tmpset);
75734+ }
75735+
75736+ return;
75737+}
75738+
75739+int
75740+gr_find_uid(const uid_t uid)
75741+{
75742+ struct crash_uid *tmp = uid_set;
75743+ uid_t buid;
75744+ int low = 0, high = uid_used - 1, mid;
75745+
75746+ while (high >= low) {
75747+ mid = (low + high) >> 1;
75748+ buid = tmp[mid].uid;
75749+ if (buid == uid)
75750+ return mid;
75751+ if (buid > uid)
75752+ high = mid - 1;
75753+ if (buid < uid)
75754+ low = mid + 1;
75755+ }
75756+
75757+ return -1;
75758+}
75759+
75760+static __inline__ void
75761+gr_insertsort(void)
75762+{
75763+ unsigned short i, j;
75764+ struct crash_uid index;
75765+
75766+ for (i = 1; i < uid_used; i++) {
75767+ index = uid_set[i];
75768+ j = i;
75769+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75770+ uid_set[j] = uid_set[j - 1];
75771+ j--;
75772+ }
75773+ uid_set[j] = index;
75774+ }
75775+
75776+ return;
75777+}
75778+
75779+static __inline__ void
75780+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75781+{
75782+ int loc;
75783+ uid_t uid = GR_GLOBAL_UID(kuid);
75784+
75785+ if (uid_used == GR_UIDTABLE_MAX)
75786+ return;
75787+
75788+ loc = gr_find_uid(uid);
75789+
75790+ if (loc >= 0) {
75791+ uid_set[loc].expires = expires;
75792+ return;
75793+ }
75794+
75795+ uid_set[uid_used].uid = uid;
75796+ uid_set[uid_used].expires = expires;
75797+ uid_used++;
75798+
75799+ gr_insertsort();
75800+
75801+ return;
75802+}
75803+
75804+void
75805+gr_remove_uid(const unsigned short loc)
75806+{
75807+ unsigned short i;
75808+
75809+ for (i = loc + 1; i < uid_used; i++)
75810+ uid_set[i - 1] = uid_set[i];
75811+
75812+ uid_used--;
75813+
75814+ return;
75815+}
75816+
75817+int
75818+gr_check_crash_uid(const kuid_t kuid)
75819+{
75820+ int loc;
75821+ int ret = 0;
75822+ uid_t uid;
75823+
75824+ if (unlikely(!gr_acl_is_enabled()))
75825+ return 0;
75826+
75827+ uid = GR_GLOBAL_UID(kuid);
75828+
75829+ spin_lock(&gr_uid_lock);
75830+ loc = gr_find_uid(uid);
75831+
75832+ if (loc < 0)
75833+ goto out_unlock;
75834+
75835+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75836+ gr_remove_uid(loc);
75837+ else
75838+ ret = 1;
75839+
75840+out_unlock:
75841+ spin_unlock(&gr_uid_lock);
75842+ return ret;
75843+}
75844+
75845+static __inline__ int
75846+proc_is_setxid(const struct cred *cred)
75847+{
75848+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75849+ !uid_eq(cred->uid, cred->fsuid))
75850+ return 1;
75851+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75852+ !gid_eq(cred->gid, cred->fsgid))
75853+ return 1;
75854+
75855+ return 0;
75856+}
75857+
75858+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75859+
75860+void
75861+gr_handle_crash(struct task_struct *task, const int sig)
75862+{
75863+ struct acl_subject_label *curr;
75864+ struct task_struct *tsk, *tsk2;
75865+ const struct cred *cred;
75866+ const struct cred *cred2;
75867+
75868+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75869+ return;
75870+
75871+ if (unlikely(!gr_acl_is_enabled()))
75872+ return;
75873+
75874+ curr = task->acl;
75875+
75876+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75877+ return;
75878+
75879+ if (time_before_eq(curr->expires, get_seconds())) {
75880+ curr->expires = 0;
75881+ curr->crashes = 0;
75882+ }
75883+
75884+ curr->crashes++;
75885+
75886+ if (!curr->expires)
75887+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75888+
75889+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75890+ time_after(curr->expires, get_seconds())) {
75891+ rcu_read_lock();
75892+ cred = __task_cred(task);
75893+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75894+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75895+ spin_lock(&gr_uid_lock);
75896+ gr_insert_uid(cred->uid, curr->expires);
75897+ spin_unlock(&gr_uid_lock);
75898+ curr->expires = 0;
75899+ curr->crashes = 0;
75900+ read_lock(&tasklist_lock);
75901+ do_each_thread(tsk2, tsk) {
75902+ cred2 = __task_cred(tsk);
75903+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75904+ gr_fake_force_sig(SIGKILL, tsk);
75905+ } while_each_thread(tsk2, tsk);
75906+ read_unlock(&tasklist_lock);
75907+ } else {
75908+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75909+ read_lock(&tasklist_lock);
75910+ read_lock(&grsec_exec_file_lock);
75911+ do_each_thread(tsk2, tsk) {
75912+ if (likely(tsk != task)) {
75913+ // if this thread has the same subject as the one that triggered
75914+ // RES_CRASH and it's the same binary, kill it
75915+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75916+ gr_fake_force_sig(SIGKILL, tsk);
75917+ }
75918+ } while_each_thread(tsk2, tsk);
75919+ read_unlock(&grsec_exec_file_lock);
75920+ read_unlock(&tasklist_lock);
75921+ }
75922+ rcu_read_unlock();
75923+ }
75924+
75925+ return;
75926+}
75927+
75928+int
75929+gr_check_crash_exec(const struct file *filp)
75930+{
75931+ struct acl_subject_label *curr;
75932+ struct dentry *dentry;
75933+
75934+ if (unlikely(!gr_acl_is_enabled()))
75935+ return 0;
75936+
75937+ read_lock(&gr_inode_lock);
75938+ dentry = filp->f_path.dentry;
75939+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75940+ current->role);
75941+ read_unlock(&gr_inode_lock);
75942+
75943+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75944+ (!curr->crashes && !curr->expires))
75945+ return 0;
75946+
75947+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75948+ time_after(curr->expires, get_seconds()))
75949+ return 1;
75950+ else if (time_before_eq(curr->expires, get_seconds())) {
75951+ curr->crashes = 0;
75952+ curr->expires = 0;
75953+ }
75954+
75955+ return 0;
75956+}
75957+
75958+void
75959+gr_handle_alertkill(struct task_struct *task)
75960+{
75961+ struct acl_subject_label *curracl;
75962+ __u32 curr_ip;
75963+ struct task_struct *p, *p2;
75964+
75965+ if (unlikely(!gr_acl_is_enabled()))
75966+ return;
75967+
75968+ curracl = task->acl;
75969+ curr_ip = task->signal->curr_ip;
75970+
75971+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75972+ read_lock(&tasklist_lock);
75973+ do_each_thread(p2, p) {
75974+ if (p->signal->curr_ip == curr_ip)
75975+ gr_fake_force_sig(SIGKILL, p);
75976+ } while_each_thread(p2, p);
75977+ read_unlock(&tasklist_lock);
75978+ } else if (curracl->mode & GR_KILLPROC)
75979+ gr_fake_force_sig(SIGKILL, task);
75980+
75981+ return;
75982+}
75983diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75984new file mode 100644
75985index 0000000..6b0c9cc
75986--- /dev/null
75987+++ b/grsecurity/gracl_shm.c
75988@@ -0,0 +1,40 @@
75989+#include <linux/kernel.h>
75990+#include <linux/mm.h>
75991+#include <linux/sched.h>
75992+#include <linux/file.h>
75993+#include <linux/ipc.h>
75994+#include <linux/gracl.h>
75995+#include <linux/grsecurity.h>
75996+#include <linux/grinternal.h>
75997+
75998+int
75999+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76000+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76001+{
76002+ struct task_struct *task;
76003+
76004+ if (!gr_acl_is_enabled())
76005+ return 1;
76006+
76007+ rcu_read_lock();
76008+ read_lock(&tasklist_lock);
76009+
76010+ task = find_task_by_vpid(shm_cprid);
76011+
76012+ if (unlikely(!task))
76013+ task = find_task_by_vpid(shm_lapid);
76014+
76015+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76016+ (task_pid_nr(task) == shm_lapid)) &&
76017+ (task->acl->mode & GR_PROTSHM) &&
76018+ (task->acl != current->acl))) {
76019+ read_unlock(&tasklist_lock);
76020+ rcu_read_unlock();
76021+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76022+ return 0;
76023+ }
76024+ read_unlock(&tasklist_lock);
76025+ rcu_read_unlock();
76026+
76027+ return 1;
76028+}
76029diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76030new file mode 100644
76031index 0000000..bc0be01
76032--- /dev/null
76033+++ b/grsecurity/grsec_chdir.c
76034@@ -0,0 +1,19 @@
76035+#include <linux/kernel.h>
76036+#include <linux/sched.h>
76037+#include <linux/fs.h>
76038+#include <linux/file.h>
76039+#include <linux/grsecurity.h>
76040+#include <linux/grinternal.h>
76041+
76042+void
76043+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76044+{
76045+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76046+ if ((grsec_enable_chdir && grsec_enable_group &&
76047+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76048+ !grsec_enable_group)) {
76049+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76050+ }
76051+#endif
76052+ return;
76053+}
76054diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76055new file mode 100644
76056index 0000000..114ea4f
76057--- /dev/null
76058+++ b/grsecurity/grsec_chroot.c
76059@@ -0,0 +1,467 @@
76060+#include <linux/kernel.h>
76061+#include <linux/module.h>
76062+#include <linux/sched.h>
76063+#include <linux/file.h>
76064+#include <linux/fs.h>
76065+#include <linux/mount.h>
76066+#include <linux/types.h>
76067+#include "../fs/mount.h"
76068+#include <linux/grsecurity.h>
76069+#include <linux/grinternal.h>
76070+
76071+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76072+int gr_init_ran;
76073+#endif
76074+
76075+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76076+{
76077+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76078+ struct dentry *tmpd = dentry;
76079+
76080+ read_seqlock_excl(&mount_lock);
76081+ write_seqlock(&rename_lock);
76082+
76083+ while (tmpd != mnt->mnt_root) {
76084+ atomic_inc(&tmpd->chroot_refcnt);
76085+ tmpd = tmpd->d_parent;
76086+ }
76087+ atomic_inc(&tmpd->chroot_refcnt);
76088+
76089+ write_sequnlock(&rename_lock);
76090+ read_sequnlock_excl(&mount_lock);
76091+#endif
76092+}
76093+
76094+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
76095+{
76096+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76097+ struct dentry *tmpd = dentry;
76098+
76099+ read_seqlock_excl(&mount_lock);
76100+ write_seqlock(&rename_lock);
76101+
76102+ while (tmpd != mnt->mnt_root) {
76103+ atomic_dec(&tmpd->chroot_refcnt);
76104+ tmpd = tmpd->d_parent;
76105+ }
76106+ atomic_dec(&tmpd->chroot_refcnt);
76107+
76108+ write_sequnlock(&rename_lock);
76109+ read_sequnlock_excl(&mount_lock);
76110+#endif
76111+}
76112+
76113+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76114+static struct dentry *get_closest_chroot(struct dentry *dentry)
76115+{
76116+ write_seqlock(&rename_lock);
76117+ do {
76118+ if (atomic_read(&dentry->chroot_refcnt)) {
76119+ write_sequnlock(&rename_lock);
76120+ return dentry;
76121+ }
76122+ dentry = dentry->d_parent;
76123+ } while (!IS_ROOT(dentry));
76124+ write_sequnlock(&rename_lock);
76125+ return NULL;
76126+}
76127+#endif
76128+
76129+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
76130+ struct dentry *newdentry, struct vfsmount *newmnt)
76131+{
76132+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76133+ struct dentry *chroot;
76134+
76135+ if (unlikely(!grsec_enable_chroot_rename))
76136+ return 0;
76137+
76138+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
76139+ return 0;
76140+
76141+ chroot = get_closest_chroot(olddentry);
76142+
76143+ if (chroot == NULL)
76144+ return 0;
76145+
76146+ if (is_subdir(newdentry, chroot))
76147+ return 0;
76148+
76149+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
76150+
76151+ return 1;
76152+#else
76153+ return 0;
76154+#endif
76155+}
76156+
76157+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76158+{
76159+#ifdef CONFIG_GRKERNSEC
76160+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76161+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76162+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76163+ && gr_init_ran
76164+#endif
76165+ )
76166+ task->gr_is_chrooted = 1;
76167+ else {
76168+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76169+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76170+ gr_init_ran = 1;
76171+#endif
76172+ task->gr_is_chrooted = 0;
76173+ }
76174+
76175+ task->gr_chroot_dentry = path->dentry;
76176+#endif
76177+ return;
76178+}
76179+
76180+void gr_clear_chroot_entries(struct task_struct *task)
76181+{
76182+#ifdef CONFIG_GRKERNSEC
76183+ task->gr_is_chrooted = 0;
76184+ task->gr_chroot_dentry = NULL;
76185+#endif
76186+ return;
76187+}
76188+
76189+int
76190+gr_handle_chroot_unix(const pid_t pid)
76191+{
76192+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76193+ struct task_struct *p;
76194+
76195+ if (unlikely(!grsec_enable_chroot_unix))
76196+ return 1;
76197+
76198+ if (likely(!proc_is_chrooted(current)))
76199+ return 1;
76200+
76201+ rcu_read_lock();
76202+ read_lock(&tasklist_lock);
76203+ p = find_task_by_vpid_unrestricted(pid);
76204+ if (unlikely(p && !have_same_root(current, p))) {
76205+ read_unlock(&tasklist_lock);
76206+ rcu_read_unlock();
76207+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76208+ return 0;
76209+ }
76210+ read_unlock(&tasklist_lock);
76211+ rcu_read_unlock();
76212+#endif
76213+ return 1;
76214+}
76215+
76216+int
76217+gr_handle_chroot_nice(void)
76218+{
76219+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76220+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76221+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76222+ return -EPERM;
76223+ }
76224+#endif
76225+ return 0;
76226+}
76227+
76228+int
76229+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76230+{
76231+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76232+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76233+ && proc_is_chrooted(current)) {
76234+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76235+ return -EACCES;
76236+ }
76237+#endif
76238+ return 0;
76239+}
76240+
76241+int
76242+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76243+{
76244+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76245+ struct task_struct *p;
76246+ int ret = 0;
76247+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76248+ return ret;
76249+
76250+ read_lock(&tasklist_lock);
76251+ do_each_pid_task(pid, type, p) {
76252+ if (!have_same_root(current, p)) {
76253+ ret = 1;
76254+ goto out;
76255+ }
76256+ } while_each_pid_task(pid, type, p);
76257+out:
76258+ read_unlock(&tasklist_lock);
76259+ return ret;
76260+#endif
76261+ return 0;
76262+}
76263+
76264+int
76265+gr_pid_is_chrooted(struct task_struct *p)
76266+{
76267+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76268+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76269+ return 0;
76270+
76271+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76272+ !have_same_root(current, p)) {
76273+ return 1;
76274+ }
76275+#endif
76276+ return 0;
76277+}
76278+
76279+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76280+
76281+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76282+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76283+{
76284+ struct path path, currentroot;
76285+ int ret = 0;
76286+
76287+ path.dentry = (struct dentry *)u_dentry;
76288+ path.mnt = (struct vfsmount *)u_mnt;
76289+ get_fs_root(current->fs, &currentroot);
76290+ if (path_is_under(&path, &currentroot))
76291+ ret = 1;
76292+ path_put(&currentroot);
76293+
76294+ return ret;
76295+}
76296+#endif
76297+
76298+int
76299+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76300+{
76301+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76302+ if (!grsec_enable_chroot_fchdir)
76303+ return 1;
76304+
76305+ if (!proc_is_chrooted(current))
76306+ return 1;
76307+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76308+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76309+ return 0;
76310+ }
76311+#endif
76312+ return 1;
76313+}
76314+
76315+int
76316+gr_chroot_fhandle(void)
76317+{
76318+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76319+ if (!grsec_enable_chroot_fchdir)
76320+ return 1;
76321+
76322+ if (!proc_is_chrooted(current))
76323+ return 1;
76324+ else {
76325+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76326+ return 0;
76327+ }
76328+#endif
76329+ return 1;
76330+}
76331+
76332+int
76333+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76334+ const u64 shm_createtime)
76335+{
76336+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76337+ struct task_struct *p;
76338+
76339+ if (unlikely(!grsec_enable_chroot_shmat))
76340+ return 1;
76341+
76342+ if (likely(!proc_is_chrooted(current)))
76343+ return 1;
76344+
76345+ rcu_read_lock();
76346+ read_lock(&tasklist_lock);
76347+
76348+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76349+ if (time_before_eq64(p->start_time, shm_createtime)) {
76350+ if (have_same_root(current, p)) {
76351+ goto allow;
76352+ } else {
76353+ read_unlock(&tasklist_lock);
76354+ rcu_read_unlock();
76355+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76356+ return 0;
76357+ }
76358+ }
76359+ /* creator exited, pid reuse, fall through to next check */
76360+ }
76361+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76362+ if (unlikely(!have_same_root(current, p))) {
76363+ read_unlock(&tasklist_lock);
76364+ rcu_read_unlock();
76365+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76366+ return 0;
76367+ }
76368+ }
76369+
76370+allow:
76371+ read_unlock(&tasklist_lock);
76372+ rcu_read_unlock();
76373+#endif
76374+ return 1;
76375+}
76376+
76377+void
76378+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76379+{
76380+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76381+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76382+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76383+#endif
76384+ return;
76385+}
76386+
76387+int
76388+gr_handle_chroot_mknod(const struct dentry *dentry,
76389+ const struct vfsmount *mnt, const int mode)
76390+{
76391+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76392+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76393+ proc_is_chrooted(current)) {
76394+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76395+ return -EPERM;
76396+ }
76397+#endif
76398+ return 0;
76399+}
76400+
76401+int
76402+gr_handle_chroot_mount(const struct dentry *dentry,
76403+ const struct vfsmount *mnt, const char *dev_name)
76404+{
76405+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76406+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76407+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76408+ return -EPERM;
76409+ }
76410+#endif
76411+ return 0;
76412+}
76413+
76414+int
76415+gr_handle_chroot_pivot(void)
76416+{
76417+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76418+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76419+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76420+ return -EPERM;
76421+ }
76422+#endif
76423+ return 0;
76424+}
76425+
76426+int
76427+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76428+{
76429+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76430+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76431+ !gr_is_outside_chroot(dentry, mnt)) {
76432+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76433+ return -EPERM;
76434+ }
76435+#endif
76436+ return 0;
76437+}
76438+
76439+extern const char *captab_log[];
76440+extern int captab_log_entries;
76441+
76442+int
76443+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76444+{
76445+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76446+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76447+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76448+ if (cap_raised(chroot_caps, cap)) {
76449+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76450+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76451+ }
76452+ return 0;
76453+ }
76454+ }
76455+#endif
76456+ return 1;
76457+}
76458+
76459+int
76460+gr_chroot_is_capable(const int cap)
76461+{
76462+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76463+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76464+#endif
76465+ return 1;
76466+}
76467+
76468+int
76469+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76470+{
76471+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76472+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76473+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76474+ if (cap_raised(chroot_caps, cap)) {
76475+ return 0;
76476+ }
76477+ }
76478+#endif
76479+ return 1;
76480+}
76481+
76482+int
76483+gr_chroot_is_capable_nolog(const int cap)
76484+{
76485+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76486+ return gr_task_chroot_is_capable_nolog(current, cap);
76487+#endif
76488+ return 1;
76489+}
76490+
76491+int
76492+gr_handle_chroot_sysctl(const int op)
76493+{
76494+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76495+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76496+ proc_is_chrooted(current))
76497+ return -EACCES;
76498+#endif
76499+ return 0;
76500+}
76501+
76502+void
76503+gr_handle_chroot_chdir(const struct path *path)
76504+{
76505+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76506+ if (grsec_enable_chroot_chdir)
76507+ set_fs_pwd(current->fs, path);
76508+#endif
76509+ return;
76510+}
76511+
76512+int
76513+gr_handle_chroot_chmod(const struct dentry *dentry,
76514+ const struct vfsmount *mnt, const int mode)
76515+{
76516+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76517+ /* allow chmod +s on directories, but not files */
76518+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76519+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76520+ proc_is_chrooted(current)) {
76521+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76522+ return -EPERM;
76523+ }
76524+#endif
76525+ return 0;
76526+}
76527diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76528new file mode 100644
76529index 0000000..946f750
76530--- /dev/null
76531+++ b/grsecurity/grsec_disabled.c
76532@@ -0,0 +1,445 @@
76533+#include <linux/kernel.h>
76534+#include <linux/module.h>
76535+#include <linux/sched.h>
76536+#include <linux/file.h>
76537+#include <linux/fs.h>
76538+#include <linux/kdev_t.h>
76539+#include <linux/net.h>
76540+#include <linux/in.h>
76541+#include <linux/ip.h>
76542+#include <linux/skbuff.h>
76543+#include <linux/sysctl.h>
76544+
76545+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76546+void
76547+pax_set_initial_flags(struct linux_binprm *bprm)
76548+{
76549+ return;
76550+}
76551+#endif
76552+
76553+#ifdef CONFIG_SYSCTL
76554+__u32
76555+gr_handle_sysctl(const struct ctl_table * table, const int op)
76556+{
76557+ return 0;
76558+}
76559+#endif
76560+
76561+#ifdef CONFIG_TASKSTATS
76562+int gr_is_taskstats_denied(int pid)
76563+{
76564+ return 0;
76565+}
76566+#endif
76567+
76568+int
76569+gr_acl_is_enabled(void)
76570+{
76571+ return 0;
76572+}
76573+
76574+int
76575+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76576+{
76577+ return 0;
76578+}
76579+
76580+void
76581+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76582+{
76583+ return;
76584+}
76585+
76586+int
76587+gr_handle_rawio(const struct inode *inode)
76588+{
76589+ return 0;
76590+}
76591+
76592+void
76593+gr_acl_handle_psacct(struct task_struct *task, const long code)
76594+{
76595+ return;
76596+}
76597+
76598+int
76599+gr_handle_ptrace(struct task_struct *task, const long request)
76600+{
76601+ return 0;
76602+}
76603+
76604+int
76605+gr_handle_proc_ptrace(struct task_struct *task)
76606+{
76607+ return 0;
76608+}
76609+
76610+int
76611+gr_set_acls(const int type)
76612+{
76613+ return 0;
76614+}
76615+
76616+int
76617+gr_check_hidden_task(const struct task_struct *tsk)
76618+{
76619+ return 0;
76620+}
76621+
76622+int
76623+gr_check_protected_task(const struct task_struct *task)
76624+{
76625+ return 0;
76626+}
76627+
76628+int
76629+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76630+{
76631+ return 0;
76632+}
76633+
76634+void
76635+gr_copy_label(struct task_struct *tsk)
76636+{
76637+ return;
76638+}
76639+
76640+void
76641+gr_set_pax_flags(struct task_struct *task)
76642+{
76643+ return;
76644+}
76645+
76646+int
76647+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76648+ const int unsafe_share)
76649+{
76650+ return 0;
76651+}
76652+
76653+void
76654+gr_handle_delete(const u64 ino, const dev_t dev)
76655+{
76656+ return;
76657+}
76658+
76659+void
76660+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76661+{
76662+ return;
76663+}
76664+
76665+void
76666+gr_handle_crash(struct task_struct *task, const int sig)
76667+{
76668+ return;
76669+}
76670+
76671+int
76672+gr_check_crash_exec(const struct file *filp)
76673+{
76674+ return 0;
76675+}
76676+
76677+int
76678+gr_check_crash_uid(const kuid_t uid)
76679+{
76680+ return 0;
76681+}
76682+
76683+void
76684+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76685+ struct dentry *old_dentry,
76686+ struct dentry *new_dentry,
76687+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76688+{
76689+ return;
76690+}
76691+
76692+int
76693+gr_search_socket(const int family, const int type, const int protocol)
76694+{
76695+ return 1;
76696+}
76697+
76698+int
76699+gr_search_connectbind(const int mode, const struct socket *sock,
76700+ const struct sockaddr_in *addr)
76701+{
76702+ return 0;
76703+}
76704+
76705+void
76706+gr_handle_alertkill(struct task_struct *task)
76707+{
76708+ return;
76709+}
76710+
76711+__u32
76712+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76713+{
76714+ return 1;
76715+}
76716+
76717+__u32
76718+gr_acl_handle_hidden_file(const struct dentry * dentry,
76719+ const struct vfsmount * mnt)
76720+{
76721+ return 1;
76722+}
76723+
76724+__u32
76725+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76726+ int acc_mode)
76727+{
76728+ return 1;
76729+}
76730+
76731+__u32
76732+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76733+{
76734+ return 1;
76735+}
76736+
76737+__u32
76738+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76739+{
76740+ return 1;
76741+}
76742+
76743+int
76744+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76745+ unsigned int *vm_flags)
76746+{
76747+ return 1;
76748+}
76749+
76750+__u32
76751+gr_acl_handle_truncate(const struct dentry * dentry,
76752+ const struct vfsmount * mnt)
76753+{
76754+ return 1;
76755+}
76756+
76757+__u32
76758+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76759+{
76760+ return 1;
76761+}
76762+
76763+__u32
76764+gr_acl_handle_access(const struct dentry * dentry,
76765+ const struct vfsmount * mnt, const int fmode)
76766+{
76767+ return 1;
76768+}
76769+
76770+__u32
76771+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76772+ umode_t *mode)
76773+{
76774+ return 1;
76775+}
76776+
76777+__u32
76778+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76779+{
76780+ return 1;
76781+}
76782+
76783+__u32
76784+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76785+{
76786+ return 1;
76787+}
76788+
76789+__u32
76790+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76791+{
76792+ return 1;
76793+}
76794+
76795+void
76796+grsecurity_init(void)
76797+{
76798+ return;
76799+}
76800+
76801+umode_t gr_acl_umask(void)
76802+{
76803+ return 0;
76804+}
76805+
76806+__u32
76807+gr_acl_handle_mknod(const struct dentry * new_dentry,
76808+ const struct dentry * parent_dentry,
76809+ const struct vfsmount * parent_mnt,
76810+ const int mode)
76811+{
76812+ return 1;
76813+}
76814+
76815+__u32
76816+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76817+ const struct dentry * parent_dentry,
76818+ const struct vfsmount * parent_mnt)
76819+{
76820+ return 1;
76821+}
76822+
76823+__u32
76824+gr_acl_handle_symlink(const struct dentry * new_dentry,
76825+ const struct dentry * parent_dentry,
76826+ const struct vfsmount * parent_mnt, const struct filename *from)
76827+{
76828+ return 1;
76829+}
76830+
76831+__u32
76832+gr_acl_handle_link(const struct dentry * new_dentry,
76833+ const struct dentry * parent_dentry,
76834+ const struct vfsmount * parent_mnt,
76835+ const struct dentry * old_dentry,
76836+ const struct vfsmount * old_mnt, const struct filename *to)
76837+{
76838+ return 1;
76839+}
76840+
76841+int
76842+gr_acl_handle_rename(const struct dentry *new_dentry,
76843+ const struct dentry *parent_dentry,
76844+ const struct vfsmount *parent_mnt,
76845+ const struct dentry *old_dentry,
76846+ const struct inode *old_parent_inode,
76847+ const struct vfsmount *old_mnt, const struct filename *newname,
76848+ unsigned int flags)
76849+{
76850+ return 0;
76851+}
76852+
76853+int
76854+gr_acl_handle_filldir(const struct file *file, const char *name,
76855+ const int namelen, const u64 ino)
76856+{
76857+ return 1;
76858+}
76859+
76860+int
76861+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76862+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76863+{
76864+ return 1;
76865+}
76866+
76867+int
76868+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76869+{
76870+ return 0;
76871+}
76872+
76873+int
76874+gr_search_accept(const struct socket *sock)
76875+{
76876+ return 0;
76877+}
76878+
76879+int
76880+gr_search_listen(const struct socket *sock)
76881+{
76882+ return 0;
76883+}
76884+
76885+int
76886+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76887+{
76888+ return 0;
76889+}
76890+
76891+__u32
76892+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76893+{
76894+ return 1;
76895+}
76896+
76897+__u32
76898+gr_acl_handle_creat(const struct dentry * dentry,
76899+ const struct dentry * p_dentry,
76900+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76901+ const int imode)
76902+{
76903+ return 1;
76904+}
76905+
76906+void
76907+gr_acl_handle_exit(void)
76908+{
76909+ return;
76910+}
76911+
76912+int
76913+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76914+{
76915+ return 1;
76916+}
76917+
76918+void
76919+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76920+{
76921+ return;
76922+}
76923+
76924+int
76925+gr_acl_handle_procpidmem(const struct task_struct *task)
76926+{
76927+ return 0;
76928+}
76929+
76930+int
76931+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76932+{
76933+ return 0;
76934+}
76935+
76936+int
76937+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76938+{
76939+ return 0;
76940+}
76941+
76942+int
76943+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76944+{
76945+ return 0;
76946+}
76947+
76948+int
76949+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76950+{
76951+ return 0;
76952+}
76953+
76954+int gr_acl_enable_at_secure(void)
76955+{
76956+ return 0;
76957+}
76958+
76959+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76960+{
76961+ return dentry->d_sb->s_dev;
76962+}
76963+
76964+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76965+{
76966+ return dentry->d_inode->i_ino;
76967+}
76968+
76969+void gr_put_exec_file(struct task_struct *task)
76970+{
76971+ return;
76972+}
76973+
76974+#ifdef CONFIG_SECURITY
76975+EXPORT_SYMBOL_GPL(gr_check_user_change);
76976+EXPORT_SYMBOL_GPL(gr_check_group_change);
76977+#endif
76978diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76979new file mode 100644
76980index 0000000..fb7531e
76981--- /dev/null
76982+++ b/grsecurity/grsec_exec.c
76983@@ -0,0 +1,189 @@
76984+#include <linux/kernel.h>
76985+#include <linux/sched.h>
76986+#include <linux/file.h>
76987+#include <linux/binfmts.h>
76988+#include <linux/fs.h>
76989+#include <linux/types.h>
76990+#include <linux/grdefs.h>
76991+#include <linux/grsecurity.h>
76992+#include <linux/grinternal.h>
76993+#include <linux/capability.h>
76994+#include <linux/module.h>
76995+#include <linux/compat.h>
76996+
76997+#include <asm/uaccess.h>
76998+
76999+#ifdef CONFIG_GRKERNSEC_EXECLOG
77000+static char gr_exec_arg_buf[132];
77001+static DEFINE_MUTEX(gr_exec_arg_mutex);
77002+#endif
77003+
77004+struct user_arg_ptr {
77005+#ifdef CONFIG_COMPAT
77006+ bool is_compat;
77007+#endif
77008+ union {
77009+ const char __user *const __user *native;
77010+#ifdef CONFIG_COMPAT
77011+ const compat_uptr_t __user *compat;
77012+#endif
77013+ } ptr;
77014+};
77015+
77016+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
77017+
77018+void
77019+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
77020+{
77021+#ifdef CONFIG_GRKERNSEC_EXECLOG
77022+ char *grarg = gr_exec_arg_buf;
77023+ unsigned int i, x, execlen = 0;
77024+ char c;
77025+
77026+ if (!((grsec_enable_execlog && grsec_enable_group &&
77027+ in_group_p(grsec_audit_gid))
77028+ || (grsec_enable_execlog && !grsec_enable_group)))
77029+ return;
77030+
77031+ mutex_lock(&gr_exec_arg_mutex);
77032+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
77033+
77034+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
77035+ const char __user *p;
77036+ unsigned int len;
77037+
77038+ p = get_user_arg_ptr(argv, i);
77039+ if (IS_ERR(p))
77040+ goto log;
77041+
77042+ len = strnlen_user(p, 128 - execlen);
77043+ if (len > 128 - execlen)
77044+ len = 128 - execlen;
77045+ else if (len > 0)
77046+ len--;
77047+ if (copy_from_user(grarg + execlen, p, len))
77048+ goto log;
77049+
77050+ /* rewrite unprintable characters */
77051+ for (x = 0; x < len; x++) {
77052+ c = *(grarg + execlen + x);
77053+ if (c < 32 || c > 126)
77054+ *(grarg + execlen + x) = ' ';
77055+ }
77056+
77057+ execlen += len;
77058+ *(grarg + execlen) = ' ';
77059+ *(grarg + execlen + 1) = '\0';
77060+ execlen++;
77061+ }
77062+
77063+ log:
77064+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
77065+ bprm->file->f_path.mnt, grarg);
77066+ mutex_unlock(&gr_exec_arg_mutex);
77067+#endif
77068+ return;
77069+}
77070+
77071+#ifdef CONFIG_GRKERNSEC
77072+extern int gr_acl_is_capable(const int cap);
77073+extern int gr_acl_is_capable_nolog(const int cap);
77074+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77075+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77076+extern int gr_chroot_is_capable(const int cap);
77077+extern int gr_chroot_is_capable_nolog(const int cap);
77078+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77079+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77080+#endif
77081+
77082+const char *captab_log[] = {
77083+ "CAP_CHOWN",
77084+ "CAP_DAC_OVERRIDE",
77085+ "CAP_DAC_READ_SEARCH",
77086+ "CAP_FOWNER",
77087+ "CAP_FSETID",
77088+ "CAP_KILL",
77089+ "CAP_SETGID",
77090+ "CAP_SETUID",
77091+ "CAP_SETPCAP",
77092+ "CAP_LINUX_IMMUTABLE",
77093+ "CAP_NET_BIND_SERVICE",
77094+ "CAP_NET_BROADCAST",
77095+ "CAP_NET_ADMIN",
77096+ "CAP_NET_RAW",
77097+ "CAP_IPC_LOCK",
77098+ "CAP_IPC_OWNER",
77099+ "CAP_SYS_MODULE",
77100+ "CAP_SYS_RAWIO",
77101+ "CAP_SYS_CHROOT",
77102+ "CAP_SYS_PTRACE",
77103+ "CAP_SYS_PACCT",
77104+ "CAP_SYS_ADMIN",
77105+ "CAP_SYS_BOOT",
77106+ "CAP_SYS_NICE",
77107+ "CAP_SYS_RESOURCE",
77108+ "CAP_SYS_TIME",
77109+ "CAP_SYS_TTY_CONFIG",
77110+ "CAP_MKNOD",
77111+ "CAP_LEASE",
77112+ "CAP_AUDIT_WRITE",
77113+ "CAP_AUDIT_CONTROL",
77114+ "CAP_SETFCAP",
77115+ "CAP_MAC_OVERRIDE",
77116+ "CAP_MAC_ADMIN",
77117+ "CAP_SYSLOG",
77118+ "CAP_WAKE_ALARM",
77119+ "CAP_BLOCK_SUSPEND",
77120+ "CAP_AUDIT_READ"
77121+};
77122+
77123+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77124+
77125+int gr_is_capable(const int cap)
77126+{
77127+#ifdef CONFIG_GRKERNSEC
77128+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77129+ return 1;
77130+ return 0;
77131+#else
77132+ return 1;
77133+#endif
77134+}
77135+
77136+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77137+{
77138+#ifdef CONFIG_GRKERNSEC
77139+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77140+ return 1;
77141+ return 0;
77142+#else
77143+ return 1;
77144+#endif
77145+}
77146+
77147+int gr_is_capable_nolog(const int cap)
77148+{
77149+#ifdef CONFIG_GRKERNSEC
77150+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77151+ return 1;
77152+ return 0;
77153+#else
77154+ return 1;
77155+#endif
77156+}
77157+
77158+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77159+{
77160+#ifdef CONFIG_GRKERNSEC
77161+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77162+ return 1;
77163+ return 0;
77164+#else
77165+ return 1;
77166+#endif
77167+}
77168+
77169+EXPORT_SYMBOL_GPL(gr_is_capable);
77170+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77171+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77172+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77173diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77174new file mode 100644
77175index 0000000..06cc6ea
77176--- /dev/null
77177+++ b/grsecurity/grsec_fifo.c
77178@@ -0,0 +1,24 @@
77179+#include <linux/kernel.h>
77180+#include <linux/sched.h>
77181+#include <linux/fs.h>
77182+#include <linux/file.h>
77183+#include <linux/grinternal.h>
77184+
77185+int
77186+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77187+ const struct dentry *dir, const int flag, const int acc_mode)
77188+{
77189+#ifdef CONFIG_GRKERNSEC_FIFO
77190+ const struct cred *cred = current_cred();
77191+
77192+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77193+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77194+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77195+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77196+ if (!inode_permission(dentry->d_inode, acc_mode))
77197+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77198+ return -EACCES;
77199+ }
77200+#endif
77201+ return 0;
77202+}
77203diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77204new file mode 100644
77205index 0000000..8ca18bf
77206--- /dev/null
77207+++ b/grsecurity/grsec_fork.c
77208@@ -0,0 +1,23 @@
77209+#include <linux/kernel.h>
77210+#include <linux/sched.h>
77211+#include <linux/grsecurity.h>
77212+#include <linux/grinternal.h>
77213+#include <linux/errno.h>
77214+
77215+void
77216+gr_log_forkfail(const int retval)
77217+{
77218+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77219+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77220+ switch (retval) {
77221+ case -EAGAIN:
77222+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77223+ break;
77224+ case -ENOMEM:
77225+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77226+ break;
77227+ }
77228+ }
77229+#endif
77230+ return;
77231+}
77232diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77233new file mode 100644
77234index 0000000..4ed9e7d
77235--- /dev/null
77236+++ b/grsecurity/grsec_init.c
77237@@ -0,0 +1,290 @@
77238+#include <linux/kernel.h>
77239+#include <linux/sched.h>
77240+#include <linux/mm.h>
77241+#include <linux/gracl.h>
77242+#include <linux/slab.h>
77243+#include <linux/vmalloc.h>
77244+#include <linux/percpu.h>
77245+#include <linux/module.h>
77246+
77247+int grsec_enable_ptrace_readexec;
77248+int grsec_enable_setxid;
77249+int grsec_enable_symlinkown;
77250+kgid_t grsec_symlinkown_gid;
77251+int grsec_enable_brute;
77252+int grsec_enable_link;
77253+int grsec_enable_dmesg;
77254+int grsec_enable_harden_ptrace;
77255+int grsec_enable_harden_ipc;
77256+int grsec_enable_fifo;
77257+int grsec_enable_execlog;
77258+int grsec_enable_signal;
77259+int grsec_enable_forkfail;
77260+int grsec_enable_audit_ptrace;
77261+int grsec_enable_time;
77262+int grsec_enable_group;
77263+kgid_t grsec_audit_gid;
77264+int grsec_enable_chdir;
77265+int grsec_enable_mount;
77266+int grsec_enable_rofs;
77267+int grsec_deny_new_usb;
77268+int grsec_enable_chroot_findtask;
77269+int grsec_enable_chroot_mount;
77270+int grsec_enable_chroot_shmat;
77271+int grsec_enable_chroot_fchdir;
77272+int grsec_enable_chroot_double;
77273+int grsec_enable_chroot_pivot;
77274+int grsec_enable_chroot_chdir;
77275+int grsec_enable_chroot_chmod;
77276+int grsec_enable_chroot_mknod;
77277+int grsec_enable_chroot_nice;
77278+int grsec_enable_chroot_execlog;
77279+int grsec_enable_chroot_caps;
77280+int grsec_enable_chroot_rename;
77281+int grsec_enable_chroot_sysctl;
77282+int grsec_enable_chroot_unix;
77283+int grsec_enable_tpe;
77284+kgid_t grsec_tpe_gid;
77285+int grsec_enable_blackhole;
77286+#ifdef CONFIG_IPV6_MODULE
77287+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77288+#endif
77289+int grsec_lastack_retries;
77290+int grsec_enable_tpe_all;
77291+int grsec_enable_tpe_invert;
77292+int grsec_enable_socket_all;
77293+kgid_t grsec_socket_all_gid;
77294+int grsec_enable_socket_client;
77295+kgid_t grsec_socket_client_gid;
77296+int grsec_enable_socket_server;
77297+kgid_t grsec_socket_server_gid;
77298+int grsec_resource_logging;
77299+int grsec_disable_privio;
77300+int grsec_enable_log_rwxmaps;
77301+int grsec_lock;
77302+
77303+DEFINE_SPINLOCK(grsec_alert_lock);
77304+unsigned long grsec_alert_wtime = 0;
77305+unsigned long grsec_alert_fyet = 0;
77306+
77307+DEFINE_SPINLOCK(grsec_audit_lock);
77308+
77309+DEFINE_RWLOCK(grsec_exec_file_lock);
77310+
77311+char *gr_shared_page[4];
77312+
77313+char *gr_alert_log_fmt;
77314+char *gr_audit_log_fmt;
77315+char *gr_alert_log_buf;
77316+char *gr_audit_log_buf;
77317+
77318+extern struct gr_arg *gr_usermode;
77319+extern unsigned char *gr_system_salt;
77320+extern unsigned char *gr_system_sum;
77321+
77322+void __init
77323+grsecurity_init(void)
77324+{
77325+ int j;
77326+ /* create the per-cpu shared pages */
77327+
77328+#ifdef CONFIG_X86
77329+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77330+#endif
77331+
77332+ for (j = 0; j < 4; j++) {
77333+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77334+ if (gr_shared_page[j] == NULL) {
77335+ panic("Unable to allocate grsecurity shared page");
77336+ return;
77337+ }
77338+ }
77339+
77340+ /* allocate log buffers */
77341+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77342+ if (!gr_alert_log_fmt) {
77343+ panic("Unable to allocate grsecurity alert log format buffer");
77344+ return;
77345+ }
77346+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77347+ if (!gr_audit_log_fmt) {
77348+ panic("Unable to allocate grsecurity audit log format buffer");
77349+ return;
77350+ }
77351+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77352+ if (!gr_alert_log_buf) {
77353+ panic("Unable to allocate grsecurity alert log buffer");
77354+ return;
77355+ }
77356+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77357+ if (!gr_audit_log_buf) {
77358+ panic("Unable to allocate grsecurity audit log buffer");
77359+ return;
77360+ }
77361+
77362+ /* allocate memory for authentication structure */
77363+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77364+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77365+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77366+
77367+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77368+ panic("Unable to allocate grsecurity authentication structure");
77369+ return;
77370+ }
77371+
77372+#ifdef CONFIG_GRKERNSEC_IO
77373+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77374+ grsec_disable_privio = 1;
77375+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77376+ grsec_disable_privio = 1;
77377+#else
77378+ grsec_disable_privio = 0;
77379+#endif
77380+#endif
77381+
77382+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77383+ /* for backward compatibility, tpe_invert always defaults to on if
77384+ enabled in the kernel
77385+ */
77386+ grsec_enable_tpe_invert = 1;
77387+#endif
77388+
77389+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77390+#ifndef CONFIG_GRKERNSEC_SYSCTL
77391+ grsec_lock = 1;
77392+#endif
77393+
77394+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77395+ grsec_enable_log_rwxmaps = 1;
77396+#endif
77397+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77398+ grsec_enable_group = 1;
77399+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77400+#endif
77401+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77402+ grsec_enable_ptrace_readexec = 1;
77403+#endif
77404+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77405+ grsec_enable_chdir = 1;
77406+#endif
77407+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77408+ grsec_enable_harden_ptrace = 1;
77409+#endif
77410+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77411+ grsec_enable_harden_ipc = 1;
77412+#endif
77413+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77414+ grsec_enable_mount = 1;
77415+#endif
77416+#ifdef CONFIG_GRKERNSEC_LINK
77417+ grsec_enable_link = 1;
77418+#endif
77419+#ifdef CONFIG_GRKERNSEC_BRUTE
77420+ grsec_enable_brute = 1;
77421+#endif
77422+#ifdef CONFIG_GRKERNSEC_DMESG
77423+ grsec_enable_dmesg = 1;
77424+#endif
77425+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77426+ grsec_enable_blackhole = 1;
77427+ grsec_lastack_retries = 4;
77428+#endif
77429+#ifdef CONFIG_GRKERNSEC_FIFO
77430+ grsec_enable_fifo = 1;
77431+#endif
77432+#ifdef CONFIG_GRKERNSEC_EXECLOG
77433+ grsec_enable_execlog = 1;
77434+#endif
77435+#ifdef CONFIG_GRKERNSEC_SETXID
77436+ grsec_enable_setxid = 1;
77437+#endif
77438+#ifdef CONFIG_GRKERNSEC_SIGNAL
77439+ grsec_enable_signal = 1;
77440+#endif
77441+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77442+ grsec_enable_forkfail = 1;
77443+#endif
77444+#ifdef CONFIG_GRKERNSEC_TIME
77445+ grsec_enable_time = 1;
77446+#endif
77447+#ifdef CONFIG_GRKERNSEC_RESLOG
77448+ grsec_resource_logging = 1;
77449+#endif
77450+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77451+ grsec_enable_chroot_findtask = 1;
77452+#endif
77453+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77454+ grsec_enable_chroot_unix = 1;
77455+#endif
77456+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77457+ grsec_enable_chroot_mount = 1;
77458+#endif
77459+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77460+ grsec_enable_chroot_fchdir = 1;
77461+#endif
77462+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77463+ grsec_enable_chroot_shmat = 1;
77464+#endif
77465+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77466+ grsec_enable_audit_ptrace = 1;
77467+#endif
77468+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77469+ grsec_enable_chroot_double = 1;
77470+#endif
77471+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77472+ grsec_enable_chroot_pivot = 1;
77473+#endif
77474+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77475+ grsec_enable_chroot_chdir = 1;
77476+#endif
77477+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77478+ grsec_enable_chroot_chmod = 1;
77479+#endif
77480+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77481+ grsec_enable_chroot_mknod = 1;
77482+#endif
77483+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77484+ grsec_enable_chroot_nice = 1;
77485+#endif
77486+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77487+ grsec_enable_chroot_execlog = 1;
77488+#endif
77489+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77490+ grsec_enable_chroot_caps = 1;
77491+#endif
77492+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77493+ grsec_enable_chroot_rename = 1;
77494+#endif
77495+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77496+ grsec_enable_chroot_sysctl = 1;
77497+#endif
77498+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77499+ grsec_enable_symlinkown = 1;
77500+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77501+#endif
77502+#ifdef CONFIG_GRKERNSEC_TPE
77503+ grsec_enable_tpe = 1;
77504+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77505+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77506+ grsec_enable_tpe_all = 1;
77507+#endif
77508+#endif
77509+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77510+ grsec_enable_socket_all = 1;
77511+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77512+#endif
77513+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77514+ grsec_enable_socket_client = 1;
77515+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77516+#endif
77517+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77518+ grsec_enable_socket_server = 1;
77519+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77520+#endif
77521+#endif
77522+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77523+ grsec_deny_new_usb = 1;
77524+#endif
77525+
77526+ return;
77527+}
77528diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77529new file mode 100644
77530index 0000000..1773300
77531--- /dev/null
77532+++ b/grsecurity/grsec_ipc.c
77533@@ -0,0 +1,48 @@
77534+#include <linux/kernel.h>
77535+#include <linux/mm.h>
77536+#include <linux/sched.h>
77537+#include <linux/file.h>
77538+#include <linux/ipc.h>
77539+#include <linux/ipc_namespace.h>
77540+#include <linux/grsecurity.h>
77541+#include <linux/grinternal.h>
77542+
77543+int
77544+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77545+{
77546+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77547+ int write;
77548+ int orig_granted_mode;
77549+ kuid_t euid;
77550+ kgid_t egid;
77551+
77552+ if (!grsec_enable_harden_ipc)
77553+ return 1;
77554+
77555+ euid = current_euid();
77556+ egid = current_egid();
77557+
77558+ write = requested_mode & 00002;
77559+ orig_granted_mode = ipcp->mode;
77560+
77561+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77562+ orig_granted_mode >>= 6;
77563+ else {
77564+ /* if likely wrong permissions, lock to user */
77565+ if (orig_granted_mode & 0007)
77566+ orig_granted_mode = 0;
77567+ /* otherwise do a egid-only check */
77568+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77569+ orig_granted_mode >>= 3;
77570+ /* otherwise, no access */
77571+ else
77572+ orig_granted_mode = 0;
77573+ }
77574+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77575+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77576+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77577+ return 0;
77578+ }
77579+#endif
77580+ return 1;
77581+}
77582diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77583new file mode 100644
77584index 0000000..5e05e20
77585--- /dev/null
77586+++ b/grsecurity/grsec_link.c
77587@@ -0,0 +1,58 @@
77588+#include <linux/kernel.h>
77589+#include <linux/sched.h>
77590+#include <linux/fs.h>
77591+#include <linux/file.h>
77592+#include <linux/grinternal.h>
77593+
77594+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77595+{
77596+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77597+ const struct inode *link_inode = link->dentry->d_inode;
77598+
77599+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77600+ /* ignore root-owned links, e.g. /proc/self */
77601+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77602+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77603+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77604+ return 1;
77605+ }
77606+#endif
77607+ return 0;
77608+}
77609+
77610+int
77611+gr_handle_follow_link(const struct inode *parent,
77612+ const struct inode *inode,
77613+ const struct dentry *dentry, const struct vfsmount *mnt)
77614+{
77615+#ifdef CONFIG_GRKERNSEC_LINK
77616+ const struct cred *cred = current_cred();
77617+
77618+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77619+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77620+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77621+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77622+ return -EACCES;
77623+ }
77624+#endif
77625+ return 0;
77626+}
77627+
77628+int
77629+gr_handle_hardlink(const struct dentry *dentry,
77630+ const struct vfsmount *mnt,
77631+ struct inode *inode, const int mode, const struct filename *to)
77632+{
77633+#ifdef CONFIG_GRKERNSEC_LINK
77634+ const struct cred *cred = current_cred();
77635+
77636+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77637+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77638+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77639+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77640+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77641+ return -EPERM;
77642+ }
77643+#endif
77644+ return 0;
77645+}
77646diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77647new file mode 100644
77648index 0000000..dbe0a6b
77649--- /dev/null
77650+++ b/grsecurity/grsec_log.c
77651@@ -0,0 +1,341 @@
77652+#include <linux/kernel.h>
77653+#include <linux/sched.h>
77654+#include <linux/file.h>
77655+#include <linux/tty.h>
77656+#include <linux/fs.h>
77657+#include <linux/mm.h>
77658+#include <linux/grinternal.h>
77659+
77660+#ifdef CONFIG_TREE_PREEMPT_RCU
77661+#define DISABLE_PREEMPT() preempt_disable()
77662+#define ENABLE_PREEMPT() preempt_enable()
77663+#else
77664+#define DISABLE_PREEMPT()
77665+#define ENABLE_PREEMPT()
77666+#endif
77667+
77668+#define BEGIN_LOCKS(x) \
77669+ DISABLE_PREEMPT(); \
77670+ rcu_read_lock(); \
77671+ read_lock(&tasklist_lock); \
77672+ read_lock(&grsec_exec_file_lock); \
77673+ if (x != GR_DO_AUDIT) \
77674+ spin_lock(&grsec_alert_lock); \
77675+ else \
77676+ spin_lock(&grsec_audit_lock)
77677+
77678+#define END_LOCKS(x) \
77679+ if (x != GR_DO_AUDIT) \
77680+ spin_unlock(&grsec_alert_lock); \
77681+ else \
77682+ spin_unlock(&grsec_audit_lock); \
77683+ read_unlock(&grsec_exec_file_lock); \
77684+ read_unlock(&tasklist_lock); \
77685+ rcu_read_unlock(); \
77686+ ENABLE_PREEMPT(); \
77687+ if (x == GR_DONT_AUDIT) \
77688+ gr_handle_alertkill(current)
77689+
77690+enum {
77691+ FLOODING,
77692+ NO_FLOODING
77693+};
77694+
77695+extern char *gr_alert_log_fmt;
77696+extern char *gr_audit_log_fmt;
77697+extern char *gr_alert_log_buf;
77698+extern char *gr_audit_log_buf;
77699+
77700+static int gr_log_start(int audit)
77701+{
77702+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77703+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77704+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77705+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77706+ unsigned long curr_secs = get_seconds();
77707+
77708+ if (audit == GR_DO_AUDIT)
77709+ goto set_fmt;
77710+
77711+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77712+ grsec_alert_wtime = curr_secs;
77713+ grsec_alert_fyet = 0;
77714+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77715+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77716+ grsec_alert_fyet++;
77717+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77718+ grsec_alert_wtime = curr_secs;
77719+ grsec_alert_fyet++;
77720+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77721+ return FLOODING;
77722+ }
77723+ else return FLOODING;
77724+
77725+set_fmt:
77726+#endif
77727+ memset(buf, 0, PAGE_SIZE);
77728+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77729+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77730+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77731+ } else if (current->signal->curr_ip) {
77732+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77733+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77734+ } else if (gr_acl_is_enabled()) {
77735+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77736+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77737+ } else {
77738+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77739+ strcpy(buf, fmt);
77740+ }
77741+
77742+ return NO_FLOODING;
77743+}
77744+
77745+static void gr_log_middle(int audit, const char *msg, va_list ap)
77746+ __attribute__ ((format (printf, 2, 0)));
77747+
77748+static void gr_log_middle(int audit, const char *msg, va_list ap)
77749+{
77750+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77751+ unsigned int len = strlen(buf);
77752+
77753+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77754+
77755+ return;
77756+}
77757+
77758+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77759+ __attribute__ ((format (printf, 2, 3)));
77760+
77761+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77762+{
77763+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77764+ unsigned int len = strlen(buf);
77765+ va_list ap;
77766+
77767+ va_start(ap, msg);
77768+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77769+ va_end(ap);
77770+
77771+ return;
77772+}
77773+
77774+static void gr_log_end(int audit, int append_default)
77775+{
77776+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77777+ if (append_default) {
77778+ struct task_struct *task = current;
77779+ struct task_struct *parent = task->real_parent;
77780+ const struct cred *cred = __task_cred(task);
77781+ const struct cred *pcred = __task_cred(parent);
77782+ unsigned int len = strlen(buf);
77783+
77784+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77785+ }
77786+
77787+ printk("%s\n", buf);
77788+
77789+ return;
77790+}
77791+
77792+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77793+{
77794+ int logtype;
77795+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77796+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77797+ void *voidptr = NULL;
77798+ int num1 = 0, num2 = 0;
77799+ unsigned long ulong1 = 0, ulong2 = 0;
77800+ struct dentry *dentry = NULL;
77801+ struct vfsmount *mnt = NULL;
77802+ struct file *file = NULL;
77803+ struct task_struct *task = NULL;
77804+ struct vm_area_struct *vma = NULL;
77805+ const struct cred *cred, *pcred;
77806+ va_list ap;
77807+
77808+ BEGIN_LOCKS(audit);
77809+ logtype = gr_log_start(audit);
77810+ if (logtype == FLOODING) {
77811+ END_LOCKS(audit);
77812+ return;
77813+ }
77814+ va_start(ap, argtypes);
77815+ switch (argtypes) {
77816+ case GR_TTYSNIFF:
77817+ task = va_arg(ap, struct task_struct *);
77818+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77819+ break;
77820+ case GR_SYSCTL_HIDDEN:
77821+ str1 = va_arg(ap, char *);
77822+ gr_log_middle_varargs(audit, msg, result, str1);
77823+ break;
77824+ case GR_RBAC:
77825+ dentry = va_arg(ap, struct dentry *);
77826+ mnt = va_arg(ap, struct vfsmount *);
77827+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77828+ break;
77829+ case GR_RBAC_STR:
77830+ dentry = va_arg(ap, struct dentry *);
77831+ mnt = va_arg(ap, struct vfsmount *);
77832+ str1 = va_arg(ap, char *);
77833+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77834+ break;
77835+ case GR_STR_RBAC:
77836+ str1 = va_arg(ap, char *);
77837+ dentry = va_arg(ap, struct dentry *);
77838+ mnt = va_arg(ap, struct vfsmount *);
77839+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77840+ break;
77841+ case GR_RBAC_MODE2:
77842+ dentry = va_arg(ap, struct dentry *);
77843+ mnt = va_arg(ap, struct vfsmount *);
77844+ str1 = va_arg(ap, char *);
77845+ str2 = va_arg(ap, char *);
77846+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77847+ break;
77848+ case GR_RBAC_MODE3:
77849+ dentry = va_arg(ap, struct dentry *);
77850+ mnt = va_arg(ap, struct vfsmount *);
77851+ str1 = va_arg(ap, char *);
77852+ str2 = va_arg(ap, char *);
77853+ str3 = va_arg(ap, char *);
77854+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77855+ break;
77856+ case GR_FILENAME:
77857+ dentry = va_arg(ap, struct dentry *);
77858+ mnt = va_arg(ap, struct vfsmount *);
77859+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77860+ break;
77861+ case GR_STR_FILENAME:
77862+ str1 = va_arg(ap, char *);
77863+ dentry = va_arg(ap, struct dentry *);
77864+ mnt = va_arg(ap, struct vfsmount *);
77865+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77866+ break;
77867+ case GR_FILENAME_STR:
77868+ dentry = va_arg(ap, struct dentry *);
77869+ mnt = va_arg(ap, struct vfsmount *);
77870+ str1 = va_arg(ap, char *);
77871+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77872+ break;
77873+ case GR_FILENAME_TWO_INT:
77874+ dentry = va_arg(ap, struct dentry *);
77875+ mnt = va_arg(ap, struct vfsmount *);
77876+ num1 = va_arg(ap, int);
77877+ num2 = va_arg(ap, int);
77878+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77879+ break;
77880+ case GR_FILENAME_TWO_INT_STR:
77881+ dentry = va_arg(ap, struct dentry *);
77882+ mnt = va_arg(ap, struct vfsmount *);
77883+ num1 = va_arg(ap, int);
77884+ num2 = va_arg(ap, int);
77885+ str1 = va_arg(ap, char *);
77886+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77887+ break;
77888+ case GR_TEXTREL:
77889+ file = va_arg(ap, struct file *);
77890+ ulong1 = va_arg(ap, unsigned long);
77891+ ulong2 = va_arg(ap, unsigned long);
77892+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77893+ break;
77894+ case GR_PTRACE:
77895+ task = va_arg(ap, struct task_struct *);
77896+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77897+ break;
77898+ case GR_RESOURCE:
77899+ task = va_arg(ap, struct task_struct *);
77900+ cred = __task_cred(task);
77901+ pcred = __task_cred(task->real_parent);
77902+ ulong1 = va_arg(ap, unsigned long);
77903+ str1 = va_arg(ap, char *);
77904+ ulong2 = va_arg(ap, unsigned long);
77905+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77906+ break;
77907+ case GR_CAP:
77908+ task = va_arg(ap, struct task_struct *);
77909+ cred = __task_cred(task);
77910+ pcred = __task_cred(task->real_parent);
77911+ str1 = va_arg(ap, char *);
77912+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77913+ break;
77914+ case GR_SIG:
77915+ str1 = va_arg(ap, char *);
77916+ voidptr = va_arg(ap, void *);
77917+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77918+ break;
77919+ case GR_SIG2:
77920+ task = va_arg(ap, struct task_struct *);
77921+ cred = __task_cred(task);
77922+ pcred = __task_cred(task->real_parent);
77923+ num1 = va_arg(ap, int);
77924+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77925+ break;
77926+ case GR_CRASH1:
77927+ task = va_arg(ap, struct task_struct *);
77928+ cred = __task_cred(task);
77929+ pcred = __task_cred(task->real_parent);
77930+ ulong1 = va_arg(ap, unsigned long);
77931+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77932+ break;
77933+ case GR_CRASH2:
77934+ task = va_arg(ap, struct task_struct *);
77935+ cred = __task_cred(task);
77936+ pcred = __task_cred(task->real_parent);
77937+ ulong1 = va_arg(ap, unsigned long);
77938+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77939+ break;
77940+ case GR_RWXMAP:
77941+ file = va_arg(ap, struct file *);
77942+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77943+ break;
77944+ case GR_RWXMAPVMA:
77945+ vma = va_arg(ap, struct vm_area_struct *);
77946+ if (vma->vm_file)
77947+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77948+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77949+ str1 = "<stack>";
77950+ else if (vma->vm_start <= current->mm->brk &&
77951+ vma->vm_end >= current->mm->start_brk)
77952+ str1 = "<heap>";
77953+ else
77954+ str1 = "<anonymous mapping>";
77955+ gr_log_middle_varargs(audit, msg, str1);
77956+ break;
77957+ case GR_PSACCT:
77958+ {
77959+ unsigned int wday, cday;
77960+ __u8 whr, chr;
77961+ __u8 wmin, cmin;
77962+ __u8 wsec, csec;
77963+ char cur_tty[64] = { 0 };
77964+ char parent_tty[64] = { 0 };
77965+
77966+ task = va_arg(ap, struct task_struct *);
77967+ wday = va_arg(ap, unsigned int);
77968+ cday = va_arg(ap, unsigned int);
77969+ whr = va_arg(ap, int);
77970+ chr = va_arg(ap, int);
77971+ wmin = va_arg(ap, int);
77972+ cmin = va_arg(ap, int);
77973+ wsec = va_arg(ap, int);
77974+ csec = va_arg(ap, int);
77975+ ulong1 = va_arg(ap, unsigned long);
77976+ cred = __task_cred(task);
77977+ pcred = __task_cred(task->real_parent);
77978+
77979+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77980+ }
77981+ break;
77982+ default:
77983+ gr_log_middle(audit, msg, ap);
77984+ }
77985+ va_end(ap);
77986+ // these don't need DEFAULTSECARGS printed on the end
77987+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77988+ gr_log_end(audit, 0);
77989+ else
77990+ gr_log_end(audit, 1);
77991+ END_LOCKS(audit);
77992+}
77993diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77994new file mode 100644
77995index 0000000..0e39d8c
77996--- /dev/null
77997+++ b/grsecurity/grsec_mem.c
77998@@ -0,0 +1,48 @@
77999+#include <linux/kernel.h>
78000+#include <linux/sched.h>
78001+#include <linux/mm.h>
78002+#include <linux/mman.h>
78003+#include <linux/module.h>
78004+#include <linux/grinternal.h>
78005+
78006+void gr_handle_msr_write(void)
78007+{
78008+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
78009+ return;
78010+}
78011+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
78012+
78013+void
78014+gr_handle_ioperm(void)
78015+{
78016+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
78017+ return;
78018+}
78019+
78020+void
78021+gr_handle_iopl(void)
78022+{
78023+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
78024+ return;
78025+}
78026+
78027+void
78028+gr_handle_mem_readwrite(u64 from, u64 to)
78029+{
78030+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
78031+ return;
78032+}
78033+
78034+void
78035+gr_handle_vm86(void)
78036+{
78037+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
78038+ return;
78039+}
78040+
78041+void
78042+gr_log_badprocpid(const char *entry)
78043+{
78044+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
78045+ return;
78046+}
78047diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
78048new file mode 100644
78049index 0000000..6f9eb73
78050--- /dev/null
78051+++ b/grsecurity/grsec_mount.c
78052@@ -0,0 +1,65 @@
78053+#include <linux/kernel.h>
78054+#include <linux/sched.h>
78055+#include <linux/mount.h>
78056+#include <linux/major.h>
78057+#include <linux/grsecurity.h>
78058+#include <linux/grinternal.h>
78059+
78060+void
78061+gr_log_remount(const char *devname, const int retval)
78062+{
78063+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78064+ if (grsec_enable_mount && (retval >= 0))
78065+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
78066+#endif
78067+ return;
78068+}
78069+
78070+void
78071+gr_log_unmount(const char *devname, const int retval)
78072+{
78073+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78074+ if (grsec_enable_mount && (retval >= 0))
78075+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
78076+#endif
78077+ return;
78078+}
78079+
78080+void
78081+gr_log_mount(const char *from, struct path *to, const int retval)
78082+{
78083+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78084+ if (grsec_enable_mount && (retval >= 0))
78085+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
78086+#endif
78087+ return;
78088+}
78089+
78090+int
78091+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78092+{
78093+#ifdef CONFIG_GRKERNSEC_ROFS
78094+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78095+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78096+ return -EPERM;
78097+ } else
78098+ return 0;
78099+#endif
78100+ return 0;
78101+}
78102+
78103+int
78104+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78105+{
78106+#ifdef CONFIG_GRKERNSEC_ROFS
78107+ struct inode *inode = dentry->d_inode;
78108+
78109+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78110+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78111+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78112+ return -EPERM;
78113+ } else
78114+ return 0;
78115+#endif
78116+ return 0;
78117+}
78118diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78119new file mode 100644
78120index 0000000..6ee9d50
78121--- /dev/null
78122+++ b/grsecurity/grsec_pax.c
78123@@ -0,0 +1,45 @@
78124+#include <linux/kernel.h>
78125+#include <linux/sched.h>
78126+#include <linux/mm.h>
78127+#include <linux/file.h>
78128+#include <linux/grinternal.h>
78129+#include <linux/grsecurity.h>
78130+
78131+void
78132+gr_log_textrel(struct vm_area_struct * vma)
78133+{
78134+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78135+ if (grsec_enable_log_rwxmaps)
78136+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78137+#endif
78138+ return;
78139+}
78140+
78141+void gr_log_ptgnustack(struct file *file)
78142+{
78143+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78144+ if (grsec_enable_log_rwxmaps)
78145+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78146+#endif
78147+ return;
78148+}
78149+
78150+void
78151+gr_log_rwxmmap(struct file *file)
78152+{
78153+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78154+ if (grsec_enable_log_rwxmaps)
78155+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78156+#endif
78157+ return;
78158+}
78159+
78160+void
78161+gr_log_rwxmprotect(struct vm_area_struct *vma)
78162+{
78163+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78164+ if (grsec_enable_log_rwxmaps)
78165+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78166+#endif
78167+ return;
78168+}
78169diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78170new file mode 100644
78171index 0000000..2005a3a
78172--- /dev/null
78173+++ b/grsecurity/grsec_proc.c
78174@@ -0,0 +1,20 @@
78175+#include <linux/kernel.h>
78176+#include <linux/sched.h>
78177+#include <linux/grsecurity.h>
78178+#include <linux/grinternal.h>
78179+
78180+int gr_proc_is_restricted(void)
78181+{
78182+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78183+ const struct cred *cred = current_cred();
78184+#endif
78185+
78186+#ifdef CONFIG_GRKERNSEC_PROC_USER
78187+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78188+ return -EACCES;
78189+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78190+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78191+ return -EACCES;
78192+#endif
78193+ return 0;
78194+}
78195diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78196new file mode 100644
78197index 0000000..f7f29aa
78198--- /dev/null
78199+++ b/grsecurity/grsec_ptrace.c
78200@@ -0,0 +1,30 @@
78201+#include <linux/kernel.h>
78202+#include <linux/sched.h>
78203+#include <linux/grinternal.h>
78204+#include <linux/security.h>
78205+
78206+void
78207+gr_audit_ptrace(struct task_struct *task)
78208+{
78209+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78210+ if (grsec_enable_audit_ptrace)
78211+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78212+#endif
78213+ return;
78214+}
78215+
78216+int
78217+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78218+{
78219+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78220+ const struct dentry *dentry = file->f_path.dentry;
78221+ const struct vfsmount *mnt = file->f_path.mnt;
78222+
78223+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78224+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78225+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78226+ return -EACCES;
78227+ }
78228+#endif
78229+ return 0;
78230+}
78231diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78232new file mode 100644
78233index 0000000..3860c7e
78234--- /dev/null
78235+++ b/grsecurity/grsec_sig.c
78236@@ -0,0 +1,236 @@
78237+#include <linux/kernel.h>
78238+#include <linux/sched.h>
78239+#include <linux/fs.h>
78240+#include <linux/delay.h>
78241+#include <linux/grsecurity.h>
78242+#include <linux/grinternal.h>
78243+#include <linux/hardirq.h>
78244+
78245+char *signames[] = {
78246+ [SIGSEGV] = "Segmentation fault",
78247+ [SIGILL] = "Illegal instruction",
78248+ [SIGABRT] = "Abort",
78249+ [SIGBUS] = "Invalid alignment/Bus error"
78250+};
78251+
78252+void
78253+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78254+{
78255+#ifdef CONFIG_GRKERNSEC_SIGNAL
78256+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78257+ (sig == SIGABRT) || (sig == SIGBUS))) {
78258+ if (task_pid_nr(t) == task_pid_nr(current)) {
78259+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78260+ } else {
78261+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78262+ }
78263+ }
78264+#endif
78265+ return;
78266+}
78267+
78268+int
78269+gr_handle_signal(const struct task_struct *p, const int sig)
78270+{
78271+#ifdef CONFIG_GRKERNSEC
78272+ /* ignore the 0 signal for protected task checks */
78273+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78274+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78275+ return -EPERM;
78276+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78277+ return -EPERM;
78278+ }
78279+#endif
78280+ return 0;
78281+}
78282+
78283+#ifdef CONFIG_GRKERNSEC
78284+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78285+
78286+int gr_fake_force_sig(int sig, struct task_struct *t)
78287+{
78288+ unsigned long int flags;
78289+ int ret, blocked, ignored;
78290+ struct k_sigaction *action;
78291+
78292+ spin_lock_irqsave(&t->sighand->siglock, flags);
78293+ action = &t->sighand->action[sig-1];
78294+ ignored = action->sa.sa_handler == SIG_IGN;
78295+ blocked = sigismember(&t->blocked, sig);
78296+ if (blocked || ignored) {
78297+ action->sa.sa_handler = SIG_DFL;
78298+ if (blocked) {
78299+ sigdelset(&t->blocked, sig);
78300+ recalc_sigpending_and_wake(t);
78301+ }
78302+ }
78303+ if (action->sa.sa_handler == SIG_DFL)
78304+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78305+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78306+
78307+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78308+
78309+ return ret;
78310+}
78311+#endif
78312+
78313+#define GR_USER_BAN_TIME (15 * 60)
78314+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78315+
78316+void gr_handle_brute_attach(int dumpable)
78317+{
78318+#ifdef CONFIG_GRKERNSEC_BRUTE
78319+ struct task_struct *p = current;
78320+ kuid_t uid = GLOBAL_ROOT_UID;
78321+ int daemon = 0;
78322+
78323+ if (!grsec_enable_brute)
78324+ return;
78325+
78326+ rcu_read_lock();
78327+ read_lock(&tasklist_lock);
78328+ read_lock(&grsec_exec_file_lock);
78329+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78330+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78331+ p->real_parent->brute = 1;
78332+ daemon = 1;
78333+ } else {
78334+ const struct cred *cred = __task_cred(p), *cred2;
78335+ struct task_struct *tsk, *tsk2;
78336+
78337+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78338+ struct user_struct *user;
78339+
78340+ uid = cred->uid;
78341+
78342+ /* this is put upon execution past expiration */
78343+ user = find_user(uid);
78344+ if (user == NULL)
78345+ goto unlock;
78346+ user->suid_banned = 1;
78347+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78348+ if (user->suid_ban_expires == ~0UL)
78349+ user->suid_ban_expires--;
78350+
78351+ /* only kill other threads of the same binary, from the same user */
78352+ do_each_thread(tsk2, tsk) {
78353+ cred2 = __task_cred(tsk);
78354+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78355+ gr_fake_force_sig(SIGKILL, tsk);
78356+ } while_each_thread(tsk2, tsk);
78357+ }
78358+ }
78359+unlock:
78360+ read_unlock(&grsec_exec_file_lock);
78361+ read_unlock(&tasklist_lock);
78362+ rcu_read_unlock();
78363+
78364+ if (gr_is_global_nonroot(uid))
78365+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78366+ else if (daemon)
78367+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78368+
78369+#endif
78370+ return;
78371+}
78372+
78373+void gr_handle_brute_check(void)
78374+{
78375+#ifdef CONFIG_GRKERNSEC_BRUTE
78376+ struct task_struct *p = current;
78377+
78378+ if (unlikely(p->brute)) {
78379+ if (!grsec_enable_brute)
78380+ p->brute = 0;
78381+ else if (time_before(get_seconds(), p->brute_expires))
78382+ msleep(30 * 1000);
78383+ }
78384+#endif
78385+ return;
78386+}
78387+
78388+void gr_handle_kernel_exploit(void)
78389+{
78390+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78391+ const struct cred *cred;
78392+ struct task_struct *tsk, *tsk2;
78393+ struct user_struct *user;
78394+ kuid_t uid;
78395+
78396+ if (in_irq() || in_serving_softirq() || in_nmi())
78397+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78398+
78399+ uid = current_uid();
78400+
78401+ if (gr_is_global_root(uid))
78402+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78403+ else {
78404+ /* kill all the processes of this user, hold a reference
78405+ to their creds struct, and prevent them from creating
78406+ another process until system reset
78407+ */
78408+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78409+ GR_GLOBAL_UID(uid));
78410+ /* we intentionally leak this ref */
78411+ user = get_uid(current->cred->user);
78412+ if (user)
78413+ user->kernel_banned = 1;
78414+
78415+ /* kill all processes of this user */
78416+ read_lock(&tasklist_lock);
78417+ do_each_thread(tsk2, tsk) {
78418+ cred = __task_cred(tsk);
78419+ if (uid_eq(cred->uid, uid))
78420+ gr_fake_force_sig(SIGKILL, tsk);
78421+ } while_each_thread(tsk2, tsk);
78422+ read_unlock(&tasklist_lock);
78423+ }
78424+#endif
78425+}
78426+
78427+#ifdef CONFIG_GRKERNSEC_BRUTE
78428+static bool suid_ban_expired(struct user_struct *user)
78429+{
78430+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78431+ user->suid_banned = 0;
78432+ user->suid_ban_expires = 0;
78433+ free_uid(user);
78434+ return true;
78435+ }
78436+
78437+ return false;
78438+}
78439+#endif
78440+
78441+int gr_process_kernel_exec_ban(void)
78442+{
78443+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78444+ if (unlikely(current->cred->user->kernel_banned))
78445+ return -EPERM;
78446+#endif
78447+ return 0;
78448+}
78449+
78450+int gr_process_kernel_setuid_ban(struct user_struct *user)
78451+{
78452+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78453+ if (unlikely(user->kernel_banned))
78454+ gr_fake_force_sig(SIGKILL, current);
78455+#endif
78456+ return 0;
78457+}
78458+
78459+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78460+{
78461+#ifdef CONFIG_GRKERNSEC_BRUTE
78462+ struct user_struct *user = current->cred->user;
78463+ if (unlikely(user->suid_banned)) {
78464+ if (suid_ban_expired(user))
78465+ return 0;
78466+ /* disallow execution of suid binaries only */
78467+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78468+ return -EPERM;
78469+ }
78470+#endif
78471+ return 0;
78472+}
78473diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78474new file mode 100644
78475index 0000000..e3650b6
78476--- /dev/null
78477+++ b/grsecurity/grsec_sock.c
78478@@ -0,0 +1,244 @@
78479+#include <linux/kernel.h>
78480+#include <linux/module.h>
78481+#include <linux/sched.h>
78482+#include <linux/file.h>
78483+#include <linux/net.h>
78484+#include <linux/in.h>
78485+#include <linux/ip.h>
78486+#include <net/sock.h>
78487+#include <net/inet_sock.h>
78488+#include <linux/grsecurity.h>
78489+#include <linux/grinternal.h>
78490+#include <linux/gracl.h>
78491+
78492+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78493+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78494+
78495+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78496+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78497+
78498+#ifdef CONFIG_UNIX_MODULE
78499+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78500+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78501+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78502+EXPORT_SYMBOL_GPL(gr_handle_create);
78503+#endif
78504+
78505+#ifdef CONFIG_GRKERNSEC
78506+#define gr_conn_table_size 32749
78507+struct conn_table_entry {
78508+ struct conn_table_entry *next;
78509+ struct signal_struct *sig;
78510+};
78511+
78512+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78513+DEFINE_SPINLOCK(gr_conn_table_lock);
78514+
78515+extern const char * gr_socktype_to_name(unsigned char type);
78516+extern const char * gr_proto_to_name(unsigned char proto);
78517+extern const char * gr_sockfamily_to_name(unsigned char family);
78518+
78519+static __inline__ int
78520+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78521+{
78522+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78523+}
78524+
78525+static __inline__ int
78526+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78527+ __u16 sport, __u16 dport)
78528+{
78529+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78530+ sig->gr_sport == sport && sig->gr_dport == dport))
78531+ return 1;
78532+ else
78533+ return 0;
78534+}
78535+
78536+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78537+{
78538+ struct conn_table_entry **match;
78539+ unsigned int index;
78540+
78541+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78542+ sig->gr_sport, sig->gr_dport,
78543+ gr_conn_table_size);
78544+
78545+ newent->sig = sig;
78546+
78547+ match = &gr_conn_table[index];
78548+ newent->next = *match;
78549+ *match = newent;
78550+
78551+ return;
78552+}
78553+
78554+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78555+{
78556+ struct conn_table_entry *match, *last = NULL;
78557+ unsigned int index;
78558+
78559+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78560+ sig->gr_sport, sig->gr_dport,
78561+ gr_conn_table_size);
78562+
78563+ match = gr_conn_table[index];
78564+ while (match && !conn_match(match->sig,
78565+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78566+ sig->gr_dport)) {
78567+ last = match;
78568+ match = match->next;
78569+ }
78570+
78571+ if (match) {
78572+ if (last)
78573+ last->next = match->next;
78574+ else
78575+ gr_conn_table[index] = NULL;
78576+ kfree(match);
78577+ }
78578+
78579+ return;
78580+}
78581+
78582+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78583+ __u16 sport, __u16 dport)
78584+{
78585+ struct conn_table_entry *match;
78586+ unsigned int index;
78587+
78588+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78589+
78590+ match = gr_conn_table[index];
78591+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78592+ match = match->next;
78593+
78594+ if (match)
78595+ return match->sig;
78596+ else
78597+ return NULL;
78598+}
78599+
78600+#endif
78601+
78602+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78603+{
78604+#ifdef CONFIG_GRKERNSEC
78605+ struct signal_struct *sig = current->signal;
78606+ struct conn_table_entry *newent;
78607+
78608+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78609+ if (newent == NULL)
78610+ return;
78611+ /* no bh lock needed since we are called with bh disabled */
78612+ spin_lock(&gr_conn_table_lock);
78613+ gr_del_task_from_ip_table_nolock(sig);
78614+ sig->gr_saddr = inet->inet_rcv_saddr;
78615+ sig->gr_daddr = inet->inet_daddr;
78616+ sig->gr_sport = inet->inet_sport;
78617+ sig->gr_dport = inet->inet_dport;
78618+ gr_add_to_task_ip_table_nolock(sig, newent);
78619+ spin_unlock(&gr_conn_table_lock);
78620+#endif
78621+ return;
78622+}
78623+
78624+void gr_del_task_from_ip_table(struct task_struct *task)
78625+{
78626+#ifdef CONFIG_GRKERNSEC
78627+ spin_lock_bh(&gr_conn_table_lock);
78628+ gr_del_task_from_ip_table_nolock(task->signal);
78629+ spin_unlock_bh(&gr_conn_table_lock);
78630+#endif
78631+ return;
78632+}
78633+
78634+void
78635+gr_attach_curr_ip(const struct sock *sk)
78636+{
78637+#ifdef CONFIG_GRKERNSEC
78638+ struct signal_struct *p, *set;
78639+ const struct inet_sock *inet = inet_sk(sk);
78640+
78641+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78642+ return;
78643+
78644+ set = current->signal;
78645+
78646+ spin_lock_bh(&gr_conn_table_lock);
78647+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78648+ inet->inet_dport, inet->inet_sport);
78649+ if (unlikely(p != NULL)) {
78650+ set->curr_ip = p->curr_ip;
78651+ set->used_accept = 1;
78652+ gr_del_task_from_ip_table_nolock(p);
78653+ spin_unlock_bh(&gr_conn_table_lock);
78654+ return;
78655+ }
78656+ spin_unlock_bh(&gr_conn_table_lock);
78657+
78658+ set->curr_ip = inet->inet_daddr;
78659+ set->used_accept = 1;
78660+#endif
78661+ return;
78662+}
78663+
78664+int
78665+gr_handle_sock_all(const int family, const int type, const int protocol)
78666+{
78667+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78668+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78669+ (family != AF_UNIX)) {
78670+ if (family == AF_INET)
78671+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78672+ else
78673+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78674+ return -EACCES;
78675+ }
78676+#endif
78677+ return 0;
78678+}
78679+
78680+int
78681+gr_handle_sock_server(const struct sockaddr *sck)
78682+{
78683+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78684+ if (grsec_enable_socket_server &&
78685+ in_group_p(grsec_socket_server_gid) &&
78686+ sck && (sck->sa_family != AF_UNIX) &&
78687+ (sck->sa_family != AF_LOCAL)) {
78688+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78689+ return -EACCES;
78690+ }
78691+#endif
78692+ return 0;
78693+}
78694+
78695+int
78696+gr_handle_sock_server_other(const struct sock *sck)
78697+{
78698+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78699+ if (grsec_enable_socket_server &&
78700+ in_group_p(grsec_socket_server_gid) &&
78701+ sck && (sck->sk_family != AF_UNIX) &&
78702+ (sck->sk_family != AF_LOCAL)) {
78703+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78704+ return -EACCES;
78705+ }
78706+#endif
78707+ return 0;
78708+}
78709+
78710+int
78711+gr_handle_sock_client(const struct sockaddr *sck)
78712+{
78713+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78714+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78715+ sck && (sck->sa_family != AF_UNIX) &&
78716+ (sck->sa_family != AF_LOCAL)) {
78717+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78718+ return -EACCES;
78719+ }
78720+#endif
78721+ return 0;
78722+}
78723diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78724new file mode 100644
78725index 0000000..cce889e
78726--- /dev/null
78727+++ b/grsecurity/grsec_sysctl.c
78728@@ -0,0 +1,488 @@
78729+#include <linux/kernel.h>
78730+#include <linux/sched.h>
78731+#include <linux/sysctl.h>
78732+#include <linux/grsecurity.h>
78733+#include <linux/grinternal.h>
78734+
78735+int
78736+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78737+{
78738+#ifdef CONFIG_GRKERNSEC_SYSCTL
78739+ if (dirname == NULL || name == NULL)
78740+ return 0;
78741+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78742+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78743+ return -EACCES;
78744+ }
78745+#endif
78746+ return 0;
78747+}
78748+
78749+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78750+static int __maybe_unused __read_only one = 1;
78751+#endif
78752+
78753+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78754+ defined(CONFIG_GRKERNSEC_DENYUSB)
78755+struct ctl_table grsecurity_table[] = {
78756+#ifdef CONFIG_GRKERNSEC_SYSCTL
78757+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78758+#ifdef CONFIG_GRKERNSEC_IO
78759+ {
78760+ .procname = "disable_priv_io",
78761+ .data = &grsec_disable_privio,
78762+ .maxlen = sizeof(int),
78763+ .mode = 0600,
78764+ .proc_handler = &proc_dointvec,
78765+ },
78766+#endif
78767+#endif
78768+#ifdef CONFIG_GRKERNSEC_LINK
78769+ {
78770+ .procname = "linking_restrictions",
78771+ .data = &grsec_enable_link,
78772+ .maxlen = sizeof(int),
78773+ .mode = 0600,
78774+ .proc_handler = &proc_dointvec,
78775+ },
78776+#endif
78777+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78778+ {
78779+ .procname = "enforce_symlinksifowner",
78780+ .data = &grsec_enable_symlinkown,
78781+ .maxlen = sizeof(int),
78782+ .mode = 0600,
78783+ .proc_handler = &proc_dointvec,
78784+ },
78785+ {
78786+ .procname = "symlinkown_gid",
78787+ .data = &grsec_symlinkown_gid,
78788+ .maxlen = sizeof(int),
78789+ .mode = 0600,
78790+ .proc_handler = &proc_dointvec,
78791+ },
78792+#endif
78793+#ifdef CONFIG_GRKERNSEC_BRUTE
78794+ {
78795+ .procname = "deter_bruteforce",
78796+ .data = &grsec_enable_brute,
78797+ .maxlen = sizeof(int),
78798+ .mode = 0600,
78799+ .proc_handler = &proc_dointvec,
78800+ },
78801+#endif
78802+#ifdef CONFIG_GRKERNSEC_FIFO
78803+ {
78804+ .procname = "fifo_restrictions",
78805+ .data = &grsec_enable_fifo,
78806+ .maxlen = sizeof(int),
78807+ .mode = 0600,
78808+ .proc_handler = &proc_dointvec,
78809+ },
78810+#endif
78811+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78812+ {
78813+ .procname = "ptrace_readexec",
78814+ .data = &grsec_enable_ptrace_readexec,
78815+ .maxlen = sizeof(int),
78816+ .mode = 0600,
78817+ .proc_handler = &proc_dointvec,
78818+ },
78819+#endif
78820+#ifdef CONFIG_GRKERNSEC_SETXID
78821+ {
78822+ .procname = "consistent_setxid",
78823+ .data = &grsec_enable_setxid,
78824+ .maxlen = sizeof(int),
78825+ .mode = 0600,
78826+ .proc_handler = &proc_dointvec,
78827+ },
78828+#endif
78829+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78830+ {
78831+ .procname = "ip_blackhole",
78832+ .data = &grsec_enable_blackhole,
78833+ .maxlen = sizeof(int),
78834+ .mode = 0600,
78835+ .proc_handler = &proc_dointvec,
78836+ },
78837+ {
78838+ .procname = "lastack_retries",
78839+ .data = &grsec_lastack_retries,
78840+ .maxlen = sizeof(int),
78841+ .mode = 0600,
78842+ .proc_handler = &proc_dointvec,
78843+ },
78844+#endif
78845+#ifdef CONFIG_GRKERNSEC_EXECLOG
78846+ {
78847+ .procname = "exec_logging",
78848+ .data = &grsec_enable_execlog,
78849+ .maxlen = sizeof(int),
78850+ .mode = 0600,
78851+ .proc_handler = &proc_dointvec,
78852+ },
78853+#endif
78854+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78855+ {
78856+ .procname = "rwxmap_logging",
78857+ .data = &grsec_enable_log_rwxmaps,
78858+ .maxlen = sizeof(int),
78859+ .mode = 0600,
78860+ .proc_handler = &proc_dointvec,
78861+ },
78862+#endif
78863+#ifdef CONFIG_GRKERNSEC_SIGNAL
78864+ {
78865+ .procname = "signal_logging",
78866+ .data = &grsec_enable_signal,
78867+ .maxlen = sizeof(int),
78868+ .mode = 0600,
78869+ .proc_handler = &proc_dointvec,
78870+ },
78871+#endif
78872+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78873+ {
78874+ .procname = "forkfail_logging",
78875+ .data = &grsec_enable_forkfail,
78876+ .maxlen = sizeof(int),
78877+ .mode = 0600,
78878+ .proc_handler = &proc_dointvec,
78879+ },
78880+#endif
78881+#ifdef CONFIG_GRKERNSEC_TIME
78882+ {
78883+ .procname = "timechange_logging",
78884+ .data = &grsec_enable_time,
78885+ .maxlen = sizeof(int),
78886+ .mode = 0600,
78887+ .proc_handler = &proc_dointvec,
78888+ },
78889+#endif
78890+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78891+ {
78892+ .procname = "chroot_deny_shmat",
78893+ .data = &grsec_enable_chroot_shmat,
78894+ .maxlen = sizeof(int),
78895+ .mode = 0600,
78896+ .proc_handler = &proc_dointvec,
78897+ },
78898+#endif
78899+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78900+ {
78901+ .procname = "chroot_deny_unix",
78902+ .data = &grsec_enable_chroot_unix,
78903+ .maxlen = sizeof(int),
78904+ .mode = 0600,
78905+ .proc_handler = &proc_dointvec,
78906+ },
78907+#endif
78908+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78909+ {
78910+ .procname = "chroot_deny_mount",
78911+ .data = &grsec_enable_chroot_mount,
78912+ .maxlen = sizeof(int),
78913+ .mode = 0600,
78914+ .proc_handler = &proc_dointvec,
78915+ },
78916+#endif
78917+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78918+ {
78919+ .procname = "chroot_deny_fchdir",
78920+ .data = &grsec_enable_chroot_fchdir,
78921+ .maxlen = sizeof(int),
78922+ .mode = 0600,
78923+ .proc_handler = &proc_dointvec,
78924+ },
78925+#endif
78926+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78927+ {
78928+ .procname = "chroot_deny_chroot",
78929+ .data = &grsec_enable_chroot_double,
78930+ .maxlen = sizeof(int),
78931+ .mode = 0600,
78932+ .proc_handler = &proc_dointvec,
78933+ },
78934+#endif
78935+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78936+ {
78937+ .procname = "chroot_deny_pivot",
78938+ .data = &grsec_enable_chroot_pivot,
78939+ .maxlen = sizeof(int),
78940+ .mode = 0600,
78941+ .proc_handler = &proc_dointvec,
78942+ },
78943+#endif
78944+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78945+ {
78946+ .procname = "chroot_enforce_chdir",
78947+ .data = &grsec_enable_chroot_chdir,
78948+ .maxlen = sizeof(int),
78949+ .mode = 0600,
78950+ .proc_handler = &proc_dointvec,
78951+ },
78952+#endif
78953+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78954+ {
78955+ .procname = "chroot_deny_chmod",
78956+ .data = &grsec_enable_chroot_chmod,
78957+ .maxlen = sizeof(int),
78958+ .mode = 0600,
78959+ .proc_handler = &proc_dointvec,
78960+ },
78961+#endif
78962+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78963+ {
78964+ .procname = "chroot_deny_mknod",
78965+ .data = &grsec_enable_chroot_mknod,
78966+ .maxlen = sizeof(int),
78967+ .mode = 0600,
78968+ .proc_handler = &proc_dointvec,
78969+ },
78970+#endif
78971+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78972+ {
78973+ .procname = "chroot_restrict_nice",
78974+ .data = &grsec_enable_chroot_nice,
78975+ .maxlen = sizeof(int),
78976+ .mode = 0600,
78977+ .proc_handler = &proc_dointvec,
78978+ },
78979+#endif
78980+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78981+ {
78982+ .procname = "chroot_execlog",
78983+ .data = &grsec_enable_chroot_execlog,
78984+ .maxlen = sizeof(int),
78985+ .mode = 0600,
78986+ .proc_handler = &proc_dointvec,
78987+ },
78988+#endif
78989+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78990+ {
78991+ .procname = "chroot_caps",
78992+ .data = &grsec_enable_chroot_caps,
78993+ .maxlen = sizeof(int),
78994+ .mode = 0600,
78995+ .proc_handler = &proc_dointvec,
78996+ },
78997+#endif
78998+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78999+ {
79000+ .procname = "chroot_deny_bad_rename",
79001+ .data = &grsec_enable_chroot_rename,
79002+ .maxlen = sizeof(int),
79003+ .mode = 0600,
79004+ .proc_handler = &proc_dointvec,
79005+ },
79006+#endif
79007+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
79008+ {
79009+ .procname = "chroot_deny_sysctl",
79010+ .data = &grsec_enable_chroot_sysctl,
79011+ .maxlen = sizeof(int),
79012+ .mode = 0600,
79013+ .proc_handler = &proc_dointvec,
79014+ },
79015+#endif
79016+#ifdef CONFIG_GRKERNSEC_TPE
79017+ {
79018+ .procname = "tpe",
79019+ .data = &grsec_enable_tpe,
79020+ .maxlen = sizeof(int),
79021+ .mode = 0600,
79022+ .proc_handler = &proc_dointvec,
79023+ },
79024+ {
79025+ .procname = "tpe_gid",
79026+ .data = &grsec_tpe_gid,
79027+ .maxlen = sizeof(int),
79028+ .mode = 0600,
79029+ .proc_handler = &proc_dointvec,
79030+ },
79031+#endif
79032+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79033+ {
79034+ .procname = "tpe_invert",
79035+ .data = &grsec_enable_tpe_invert,
79036+ .maxlen = sizeof(int),
79037+ .mode = 0600,
79038+ .proc_handler = &proc_dointvec,
79039+ },
79040+#endif
79041+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79042+ {
79043+ .procname = "tpe_restrict_all",
79044+ .data = &grsec_enable_tpe_all,
79045+ .maxlen = sizeof(int),
79046+ .mode = 0600,
79047+ .proc_handler = &proc_dointvec,
79048+ },
79049+#endif
79050+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
79051+ {
79052+ .procname = "socket_all",
79053+ .data = &grsec_enable_socket_all,
79054+ .maxlen = sizeof(int),
79055+ .mode = 0600,
79056+ .proc_handler = &proc_dointvec,
79057+ },
79058+ {
79059+ .procname = "socket_all_gid",
79060+ .data = &grsec_socket_all_gid,
79061+ .maxlen = sizeof(int),
79062+ .mode = 0600,
79063+ .proc_handler = &proc_dointvec,
79064+ },
79065+#endif
79066+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
79067+ {
79068+ .procname = "socket_client",
79069+ .data = &grsec_enable_socket_client,
79070+ .maxlen = sizeof(int),
79071+ .mode = 0600,
79072+ .proc_handler = &proc_dointvec,
79073+ },
79074+ {
79075+ .procname = "socket_client_gid",
79076+ .data = &grsec_socket_client_gid,
79077+ .maxlen = sizeof(int),
79078+ .mode = 0600,
79079+ .proc_handler = &proc_dointvec,
79080+ },
79081+#endif
79082+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
79083+ {
79084+ .procname = "socket_server",
79085+ .data = &grsec_enable_socket_server,
79086+ .maxlen = sizeof(int),
79087+ .mode = 0600,
79088+ .proc_handler = &proc_dointvec,
79089+ },
79090+ {
79091+ .procname = "socket_server_gid",
79092+ .data = &grsec_socket_server_gid,
79093+ .maxlen = sizeof(int),
79094+ .mode = 0600,
79095+ .proc_handler = &proc_dointvec,
79096+ },
79097+#endif
79098+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79099+ {
79100+ .procname = "audit_group",
79101+ .data = &grsec_enable_group,
79102+ .maxlen = sizeof(int),
79103+ .mode = 0600,
79104+ .proc_handler = &proc_dointvec,
79105+ },
79106+ {
79107+ .procname = "audit_gid",
79108+ .data = &grsec_audit_gid,
79109+ .maxlen = sizeof(int),
79110+ .mode = 0600,
79111+ .proc_handler = &proc_dointvec,
79112+ },
79113+#endif
79114+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79115+ {
79116+ .procname = "audit_chdir",
79117+ .data = &grsec_enable_chdir,
79118+ .maxlen = sizeof(int),
79119+ .mode = 0600,
79120+ .proc_handler = &proc_dointvec,
79121+ },
79122+#endif
79123+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79124+ {
79125+ .procname = "audit_mount",
79126+ .data = &grsec_enable_mount,
79127+ .maxlen = sizeof(int),
79128+ .mode = 0600,
79129+ .proc_handler = &proc_dointvec,
79130+ },
79131+#endif
79132+#ifdef CONFIG_GRKERNSEC_DMESG
79133+ {
79134+ .procname = "dmesg",
79135+ .data = &grsec_enable_dmesg,
79136+ .maxlen = sizeof(int),
79137+ .mode = 0600,
79138+ .proc_handler = &proc_dointvec,
79139+ },
79140+#endif
79141+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79142+ {
79143+ .procname = "chroot_findtask",
79144+ .data = &grsec_enable_chroot_findtask,
79145+ .maxlen = sizeof(int),
79146+ .mode = 0600,
79147+ .proc_handler = &proc_dointvec,
79148+ },
79149+#endif
79150+#ifdef CONFIG_GRKERNSEC_RESLOG
79151+ {
79152+ .procname = "resource_logging",
79153+ .data = &grsec_resource_logging,
79154+ .maxlen = sizeof(int),
79155+ .mode = 0600,
79156+ .proc_handler = &proc_dointvec,
79157+ },
79158+#endif
79159+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79160+ {
79161+ .procname = "audit_ptrace",
79162+ .data = &grsec_enable_audit_ptrace,
79163+ .maxlen = sizeof(int),
79164+ .mode = 0600,
79165+ .proc_handler = &proc_dointvec,
79166+ },
79167+#endif
79168+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79169+ {
79170+ .procname = "harden_ptrace",
79171+ .data = &grsec_enable_harden_ptrace,
79172+ .maxlen = sizeof(int),
79173+ .mode = 0600,
79174+ .proc_handler = &proc_dointvec,
79175+ },
79176+#endif
79177+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79178+ {
79179+ .procname = "harden_ipc",
79180+ .data = &grsec_enable_harden_ipc,
79181+ .maxlen = sizeof(int),
79182+ .mode = 0600,
79183+ .proc_handler = &proc_dointvec,
79184+ },
79185+#endif
79186+ {
79187+ .procname = "grsec_lock",
79188+ .data = &grsec_lock,
79189+ .maxlen = sizeof(int),
79190+ .mode = 0600,
79191+ .proc_handler = &proc_dointvec,
79192+ },
79193+#endif
79194+#ifdef CONFIG_GRKERNSEC_ROFS
79195+ {
79196+ .procname = "romount_protect",
79197+ .data = &grsec_enable_rofs,
79198+ .maxlen = sizeof(int),
79199+ .mode = 0600,
79200+ .proc_handler = &proc_dointvec_minmax,
79201+ .extra1 = &one,
79202+ .extra2 = &one,
79203+ },
79204+#endif
79205+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79206+ {
79207+ .procname = "deny_new_usb",
79208+ .data = &grsec_deny_new_usb,
79209+ .maxlen = sizeof(int),
79210+ .mode = 0600,
79211+ .proc_handler = &proc_dointvec,
79212+ },
79213+#endif
79214+ { }
79215+};
79216+#endif
79217diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79218new file mode 100644
79219index 0000000..61b514e
79220--- /dev/null
79221+++ b/grsecurity/grsec_time.c
79222@@ -0,0 +1,16 @@
79223+#include <linux/kernel.h>
79224+#include <linux/sched.h>
79225+#include <linux/grinternal.h>
79226+#include <linux/module.h>
79227+
79228+void
79229+gr_log_timechange(void)
79230+{
79231+#ifdef CONFIG_GRKERNSEC_TIME
79232+ if (grsec_enable_time)
79233+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79234+#endif
79235+ return;
79236+}
79237+
79238+EXPORT_SYMBOL_GPL(gr_log_timechange);
79239diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79240new file mode 100644
79241index 0000000..d1953de
79242--- /dev/null
79243+++ b/grsecurity/grsec_tpe.c
79244@@ -0,0 +1,78 @@
79245+#include <linux/kernel.h>
79246+#include <linux/sched.h>
79247+#include <linux/file.h>
79248+#include <linux/fs.h>
79249+#include <linux/grinternal.h>
79250+
79251+extern int gr_acl_tpe_check(void);
79252+
79253+int
79254+gr_tpe_allow(const struct file *file)
79255+{
79256+#ifdef CONFIG_GRKERNSEC
79257+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79258+ struct inode *file_inode = file->f_path.dentry->d_inode;
79259+ const struct cred *cred = current_cred();
79260+ char *msg = NULL;
79261+ char *msg2 = NULL;
79262+
79263+ // never restrict root
79264+ if (gr_is_global_root(cred->uid))
79265+ return 1;
79266+
79267+ if (grsec_enable_tpe) {
79268+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79269+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79270+ msg = "not being in trusted group";
79271+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79272+ msg = "being in untrusted group";
79273+#else
79274+ if (in_group_p(grsec_tpe_gid))
79275+ msg = "being in untrusted group";
79276+#endif
79277+ }
79278+ if (!msg && gr_acl_tpe_check())
79279+ msg = "being in untrusted role";
79280+
79281+ // not in any affected group/role
79282+ if (!msg)
79283+ goto next_check;
79284+
79285+ if (gr_is_global_nonroot(inode->i_uid))
79286+ msg2 = "file in non-root-owned directory";
79287+ else if (inode->i_mode & S_IWOTH)
79288+ msg2 = "file in world-writable directory";
79289+ else if (inode->i_mode & S_IWGRP)
79290+ msg2 = "file in group-writable directory";
79291+ else if (file_inode->i_mode & S_IWOTH)
79292+ msg2 = "file is world-writable";
79293+
79294+ if (msg && msg2) {
79295+ char fullmsg[70] = {0};
79296+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79297+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79298+ return 0;
79299+ }
79300+ msg = NULL;
79301+next_check:
79302+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79303+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79304+ return 1;
79305+
79306+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79307+ msg = "directory not owned by user";
79308+ else if (inode->i_mode & S_IWOTH)
79309+ msg = "file in world-writable directory";
79310+ else if (inode->i_mode & S_IWGRP)
79311+ msg = "file in group-writable directory";
79312+ else if (file_inode->i_mode & S_IWOTH)
79313+ msg = "file is world-writable";
79314+
79315+ if (msg) {
79316+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79317+ return 0;
79318+ }
79319+#endif
79320+#endif
79321+ return 1;
79322+}
79323diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79324new file mode 100644
79325index 0000000..ae02d8e
79326--- /dev/null
79327+++ b/grsecurity/grsec_usb.c
79328@@ -0,0 +1,15 @@
79329+#include <linux/kernel.h>
79330+#include <linux/grinternal.h>
79331+#include <linux/module.h>
79332+
79333+int gr_handle_new_usb(void)
79334+{
79335+#ifdef CONFIG_GRKERNSEC_DENYUSB
79336+ if (grsec_deny_new_usb) {
79337+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79338+ return 1;
79339+ }
79340+#endif
79341+ return 0;
79342+}
79343+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79344diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79345new file mode 100644
79346index 0000000..158b330
79347--- /dev/null
79348+++ b/grsecurity/grsum.c
79349@@ -0,0 +1,64 @@
79350+#include <linux/err.h>
79351+#include <linux/kernel.h>
79352+#include <linux/sched.h>
79353+#include <linux/mm.h>
79354+#include <linux/scatterlist.h>
79355+#include <linux/crypto.h>
79356+#include <linux/gracl.h>
79357+
79358+
79359+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79360+#error "crypto and sha256 must be built into the kernel"
79361+#endif
79362+
79363+int
79364+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79365+{
79366+ struct crypto_hash *tfm;
79367+ struct hash_desc desc;
79368+ struct scatterlist sg[2];
79369+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79370+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79371+ unsigned long *sumptr = (unsigned long *)sum;
79372+ int cryptres;
79373+ int retval = 1;
79374+ volatile int mismatched = 0;
79375+ volatile int dummy = 0;
79376+ unsigned int i;
79377+
79378+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79379+ if (IS_ERR(tfm)) {
79380+ /* should never happen, since sha256 should be built in */
79381+ memset(entry->pw, 0, GR_PW_LEN);
79382+ return 1;
79383+ }
79384+
79385+ sg_init_table(sg, 2);
79386+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79387+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79388+
79389+ desc.tfm = tfm;
79390+ desc.flags = 0;
79391+
79392+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79393+ temp_sum);
79394+
79395+ memset(entry->pw, 0, GR_PW_LEN);
79396+
79397+ if (cryptres)
79398+ goto out;
79399+
79400+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79401+ if (sumptr[i] != tmpsumptr[i])
79402+ mismatched = 1;
79403+ else
79404+ dummy = 1; // waste a cycle
79405+
79406+ if (!mismatched)
79407+ retval = dummy - 1;
79408+
79409+out:
79410+ crypto_free_hash(tfm);
79411+
79412+ return retval;
79413+}
79414diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79415index 77ff547..181834f 100644
79416--- a/include/asm-generic/4level-fixup.h
79417+++ b/include/asm-generic/4level-fixup.h
79418@@ -13,8 +13,10 @@
79419 #define pmd_alloc(mm, pud, address) \
79420 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79421 NULL: pmd_offset(pud, address))
79422+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79423
79424 #define pud_alloc(mm, pgd, address) (pgd)
79425+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79426 #define pud_offset(pgd, start) (pgd)
79427 #define pud_none(pud) 0
79428 #define pud_bad(pud) 0
79429diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79430index b7babf0..1e4b4f1 100644
79431--- a/include/asm-generic/atomic-long.h
79432+++ b/include/asm-generic/atomic-long.h
79433@@ -22,6 +22,12 @@
79434
79435 typedef atomic64_t atomic_long_t;
79436
79437+#ifdef CONFIG_PAX_REFCOUNT
79438+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79439+#else
79440+typedef atomic64_t atomic_long_unchecked_t;
79441+#endif
79442+
79443 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79444
79445 static inline long atomic_long_read(atomic_long_t *l)
79446@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79447 return (long)atomic64_read(v);
79448 }
79449
79450+#ifdef CONFIG_PAX_REFCOUNT
79451+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79452+{
79453+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79454+
79455+ return (long)atomic64_read_unchecked(v);
79456+}
79457+#endif
79458+
79459 static inline void atomic_long_set(atomic_long_t *l, long i)
79460 {
79461 atomic64_t *v = (atomic64_t *)l;
79462@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79463 atomic64_set(v, i);
79464 }
79465
79466+#ifdef CONFIG_PAX_REFCOUNT
79467+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79468+{
79469+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79470+
79471+ atomic64_set_unchecked(v, i);
79472+}
79473+#endif
79474+
79475 static inline void atomic_long_inc(atomic_long_t *l)
79476 {
79477 atomic64_t *v = (atomic64_t *)l;
79478@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79479 atomic64_inc(v);
79480 }
79481
79482+#ifdef CONFIG_PAX_REFCOUNT
79483+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79484+{
79485+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79486+
79487+ atomic64_inc_unchecked(v);
79488+}
79489+#endif
79490+
79491 static inline void atomic_long_dec(atomic_long_t *l)
79492 {
79493 atomic64_t *v = (atomic64_t *)l;
79494@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79495 atomic64_dec(v);
79496 }
79497
79498+#ifdef CONFIG_PAX_REFCOUNT
79499+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79500+{
79501+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79502+
79503+ atomic64_dec_unchecked(v);
79504+}
79505+#endif
79506+
79507 static inline void atomic_long_add(long i, atomic_long_t *l)
79508 {
79509 atomic64_t *v = (atomic64_t *)l;
79510@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79511 atomic64_add(i, v);
79512 }
79513
79514+#ifdef CONFIG_PAX_REFCOUNT
79515+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79516+{
79517+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79518+
79519+ atomic64_add_unchecked(i, v);
79520+}
79521+#endif
79522+
79523 static inline void atomic_long_sub(long i, atomic_long_t *l)
79524 {
79525 atomic64_t *v = (atomic64_t *)l;
79526@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79527 atomic64_sub(i, v);
79528 }
79529
79530+#ifdef CONFIG_PAX_REFCOUNT
79531+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79532+{
79533+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79534+
79535+ atomic64_sub_unchecked(i, v);
79536+}
79537+#endif
79538+
79539 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79540 {
79541 atomic64_t *v = (atomic64_t *)l;
79542@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79543 return atomic64_add_negative(i, v);
79544 }
79545
79546-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79547+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79548 {
79549 atomic64_t *v = (atomic64_t *)l;
79550
79551 return (long)atomic64_add_return(i, v);
79552 }
79553
79554+#ifdef CONFIG_PAX_REFCOUNT
79555+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79556+{
79557+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79558+
79559+ return (long)atomic64_add_return_unchecked(i, v);
79560+}
79561+#endif
79562+
79563 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79564 {
79565 atomic64_t *v = (atomic64_t *)l;
79566@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79567 return (long)atomic64_inc_return(v);
79568 }
79569
79570+#ifdef CONFIG_PAX_REFCOUNT
79571+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79572+{
79573+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79574+
79575+ return (long)atomic64_inc_return_unchecked(v);
79576+}
79577+#endif
79578+
79579 static inline long atomic_long_dec_return(atomic_long_t *l)
79580 {
79581 atomic64_t *v = (atomic64_t *)l;
79582@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79583
79584 typedef atomic_t atomic_long_t;
79585
79586+#ifdef CONFIG_PAX_REFCOUNT
79587+typedef atomic_unchecked_t atomic_long_unchecked_t;
79588+#else
79589+typedef atomic_t atomic_long_unchecked_t;
79590+#endif
79591+
79592 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79593 static inline long atomic_long_read(atomic_long_t *l)
79594 {
79595@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79596 return (long)atomic_read(v);
79597 }
79598
79599+#ifdef CONFIG_PAX_REFCOUNT
79600+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79601+{
79602+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79603+
79604+ return (long)atomic_read_unchecked(v);
79605+}
79606+#endif
79607+
79608 static inline void atomic_long_set(atomic_long_t *l, long i)
79609 {
79610 atomic_t *v = (atomic_t *)l;
79611@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79612 atomic_set(v, i);
79613 }
79614
79615+#ifdef CONFIG_PAX_REFCOUNT
79616+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79617+{
79618+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79619+
79620+ atomic_set_unchecked(v, i);
79621+}
79622+#endif
79623+
79624 static inline void atomic_long_inc(atomic_long_t *l)
79625 {
79626 atomic_t *v = (atomic_t *)l;
79627@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79628 atomic_inc(v);
79629 }
79630
79631+#ifdef CONFIG_PAX_REFCOUNT
79632+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79633+{
79634+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79635+
79636+ atomic_inc_unchecked(v);
79637+}
79638+#endif
79639+
79640 static inline void atomic_long_dec(atomic_long_t *l)
79641 {
79642 atomic_t *v = (atomic_t *)l;
79643@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79644 atomic_dec(v);
79645 }
79646
79647+#ifdef CONFIG_PAX_REFCOUNT
79648+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79649+{
79650+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79651+
79652+ atomic_dec_unchecked(v);
79653+}
79654+#endif
79655+
79656 static inline void atomic_long_add(long i, atomic_long_t *l)
79657 {
79658 atomic_t *v = (atomic_t *)l;
79659@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79660 atomic_add(i, v);
79661 }
79662
79663+#ifdef CONFIG_PAX_REFCOUNT
79664+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79665+{
79666+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79667+
79668+ atomic_add_unchecked(i, v);
79669+}
79670+#endif
79671+
79672 static inline void atomic_long_sub(long i, atomic_long_t *l)
79673 {
79674 atomic_t *v = (atomic_t *)l;
79675@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79676 atomic_sub(i, v);
79677 }
79678
79679+#ifdef CONFIG_PAX_REFCOUNT
79680+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79681+{
79682+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79683+
79684+ atomic_sub_unchecked(i, v);
79685+}
79686+#endif
79687+
79688 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79689 {
79690 atomic_t *v = (atomic_t *)l;
79691@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79692 return atomic_add_negative(i, v);
79693 }
79694
79695-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79696+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79697 {
79698 atomic_t *v = (atomic_t *)l;
79699
79700 return (long)atomic_add_return(i, v);
79701 }
79702
79703+#ifdef CONFIG_PAX_REFCOUNT
79704+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79705+{
79706+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79707+
79708+ return (long)atomic_add_return_unchecked(i, v);
79709+}
79710+
79711+#endif
79712+
79713 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79714 {
79715 atomic_t *v = (atomic_t *)l;
79716@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79717 return (long)atomic_inc_return(v);
79718 }
79719
79720+#ifdef CONFIG_PAX_REFCOUNT
79721+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79722+{
79723+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79724+
79725+ return (long)atomic_inc_return_unchecked(v);
79726+}
79727+#endif
79728+
79729 static inline long atomic_long_dec_return(atomic_long_t *l)
79730 {
79731 atomic_t *v = (atomic_t *)l;
79732@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79733
79734 #endif /* BITS_PER_LONG == 64 */
79735
79736+#ifdef CONFIG_PAX_REFCOUNT
79737+static inline void pax_refcount_needs_these_functions(void)
79738+{
79739+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79740+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79741+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79742+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79743+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79744+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79745+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79746+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79747+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79748+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79749+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79750+#ifdef CONFIG_X86
79751+ atomic_clear_mask_unchecked(0, NULL);
79752+ atomic_set_mask_unchecked(0, NULL);
79753+#endif
79754+
79755+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79756+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79757+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79758+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79759+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79760+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79761+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79762+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79763+}
79764+#else
79765+#define atomic_read_unchecked(v) atomic_read(v)
79766+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79767+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79768+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79769+#define atomic_inc_unchecked(v) atomic_inc(v)
79770+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79771+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79772+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79773+#define atomic_dec_unchecked(v) atomic_dec(v)
79774+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79775+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79776+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79777+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79778+
79779+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79780+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79781+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79782+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79783+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79784+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79785+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79786+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79787+#endif
79788+
79789 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79790diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79791index 30ad9c8..c70c170 100644
79792--- a/include/asm-generic/atomic64.h
79793+++ b/include/asm-generic/atomic64.h
79794@@ -16,6 +16,8 @@ typedef struct {
79795 long long counter;
79796 } atomic64_t;
79797
79798+typedef atomic64_t atomic64_unchecked_t;
79799+
79800 #define ATOMIC64_INIT(i) { (i) }
79801
79802 extern long long atomic64_read(const atomic64_t *v);
79803@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79804 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79805 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79806
79807+#define atomic64_read_unchecked(v) atomic64_read(v)
79808+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79809+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79810+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79811+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79812+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79813+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79814+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79815+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79816+
79817 #endif /* _ASM_GENERIC_ATOMIC64_H */
79818diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79819index f5c40b0..e902f9d 100644
79820--- a/include/asm-generic/barrier.h
79821+++ b/include/asm-generic/barrier.h
79822@@ -82,7 +82,7 @@
79823 do { \
79824 compiletime_assert_atomic_type(*p); \
79825 smp_mb(); \
79826- ACCESS_ONCE(*p) = (v); \
79827+ ACCESS_ONCE_RW(*p) = (v); \
79828 } while (0)
79829
79830 #define smp_load_acquire(p) \
79831diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79832index a60a7cc..0fe12f2 100644
79833--- a/include/asm-generic/bitops/__fls.h
79834+++ b/include/asm-generic/bitops/__fls.h
79835@@ -9,7 +9,7 @@
79836 *
79837 * Undefined if no set bit exists, so code should check against 0 first.
79838 */
79839-static __always_inline unsigned long __fls(unsigned long word)
79840+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79841 {
79842 int num = BITS_PER_LONG - 1;
79843
79844diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79845index 0576d1f..dad6c71 100644
79846--- a/include/asm-generic/bitops/fls.h
79847+++ b/include/asm-generic/bitops/fls.h
79848@@ -9,7 +9,7 @@
79849 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79850 */
79851
79852-static __always_inline int fls(int x)
79853+static __always_inline int __intentional_overflow(-1) fls(int x)
79854 {
79855 int r = 32;
79856
79857diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79858index b097cf8..3d40e14 100644
79859--- a/include/asm-generic/bitops/fls64.h
79860+++ b/include/asm-generic/bitops/fls64.h
79861@@ -15,7 +15,7 @@
79862 * at position 64.
79863 */
79864 #if BITS_PER_LONG == 32
79865-static __always_inline int fls64(__u64 x)
79866+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79867 {
79868 __u32 h = x >> 32;
79869 if (h)
79870@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79871 return fls(x);
79872 }
79873 #elif BITS_PER_LONG == 64
79874-static __always_inline int fls64(__u64 x)
79875+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79876 {
79877 if (x == 0)
79878 return 0;
79879diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79880index 1bfcfe5..e04c5c9 100644
79881--- a/include/asm-generic/cache.h
79882+++ b/include/asm-generic/cache.h
79883@@ -6,7 +6,7 @@
79884 * cache lines need to provide their own cache.h.
79885 */
79886
79887-#define L1_CACHE_SHIFT 5
79888-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79889+#define L1_CACHE_SHIFT 5UL
79890+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79891
79892 #endif /* __ASM_GENERIC_CACHE_H */
79893diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79894index 0d68a1e..b74a761 100644
79895--- a/include/asm-generic/emergency-restart.h
79896+++ b/include/asm-generic/emergency-restart.h
79897@@ -1,7 +1,7 @@
79898 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79899 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79900
79901-static inline void machine_emergency_restart(void)
79902+static inline __noreturn void machine_emergency_restart(void)
79903 {
79904 machine_restart(NULL);
79905 }
79906diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79907index 90f99c7..00ce236 100644
79908--- a/include/asm-generic/kmap_types.h
79909+++ b/include/asm-generic/kmap_types.h
79910@@ -2,9 +2,9 @@
79911 #define _ASM_GENERIC_KMAP_TYPES_H
79912
79913 #ifdef __WITH_KM_FENCE
79914-# define KM_TYPE_NR 41
79915+# define KM_TYPE_NR 42
79916 #else
79917-# define KM_TYPE_NR 20
79918+# define KM_TYPE_NR 21
79919 #endif
79920
79921 #endif
79922diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79923index 9ceb03b..62b0b8f 100644
79924--- a/include/asm-generic/local.h
79925+++ b/include/asm-generic/local.h
79926@@ -23,24 +23,37 @@ typedef struct
79927 atomic_long_t a;
79928 } local_t;
79929
79930+typedef struct {
79931+ atomic_long_unchecked_t a;
79932+} local_unchecked_t;
79933+
79934 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79935
79936 #define local_read(l) atomic_long_read(&(l)->a)
79937+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79938 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79939+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79940 #define local_inc(l) atomic_long_inc(&(l)->a)
79941+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79942 #define local_dec(l) atomic_long_dec(&(l)->a)
79943+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79944 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79945+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79946 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79947+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79948
79949 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79950 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79951 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79952 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79953 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79954+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79955 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79956 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79957+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79958
79959 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79960+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79961 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79962 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79963 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79964diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79965index 725612b..9cc513a 100644
79966--- a/include/asm-generic/pgtable-nopmd.h
79967+++ b/include/asm-generic/pgtable-nopmd.h
79968@@ -1,14 +1,19 @@
79969 #ifndef _PGTABLE_NOPMD_H
79970 #define _PGTABLE_NOPMD_H
79971
79972-#ifndef __ASSEMBLY__
79973-
79974 #include <asm-generic/pgtable-nopud.h>
79975
79976-struct mm_struct;
79977-
79978 #define __PAGETABLE_PMD_FOLDED
79979
79980+#define PMD_SHIFT PUD_SHIFT
79981+#define PTRS_PER_PMD 1
79982+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79983+#define PMD_MASK (~(PMD_SIZE-1))
79984+
79985+#ifndef __ASSEMBLY__
79986+
79987+struct mm_struct;
79988+
79989 /*
79990 * Having the pmd type consist of a pud gets the size right, and allows
79991 * us to conceptually access the pud entry that this pmd is folded into
79992@@ -16,11 +21,6 @@ struct mm_struct;
79993 */
79994 typedef struct { pud_t pud; } pmd_t;
79995
79996-#define PMD_SHIFT PUD_SHIFT
79997-#define PTRS_PER_PMD 1
79998-#define PMD_SIZE (1UL << PMD_SHIFT)
79999-#define PMD_MASK (~(PMD_SIZE-1))
80000-
80001 /*
80002 * The "pud_xxx()" functions here are trivial for a folded two-level
80003 * setup: the pmd is never bad, and a pmd always exists (as it's folded
80004diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
80005index 810431d..0ec4804f 100644
80006--- a/include/asm-generic/pgtable-nopud.h
80007+++ b/include/asm-generic/pgtable-nopud.h
80008@@ -1,10 +1,15 @@
80009 #ifndef _PGTABLE_NOPUD_H
80010 #define _PGTABLE_NOPUD_H
80011
80012-#ifndef __ASSEMBLY__
80013-
80014 #define __PAGETABLE_PUD_FOLDED
80015
80016+#define PUD_SHIFT PGDIR_SHIFT
80017+#define PTRS_PER_PUD 1
80018+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
80019+#define PUD_MASK (~(PUD_SIZE-1))
80020+
80021+#ifndef __ASSEMBLY__
80022+
80023 /*
80024 * Having the pud type consist of a pgd gets the size right, and allows
80025 * us to conceptually access the pgd entry that this pud is folded into
80026@@ -12,11 +17,6 @@
80027 */
80028 typedef struct { pgd_t pgd; } pud_t;
80029
80030-#define PUD_SHIFT PGDIR_SHIFT
80031-#define PTRS_PER_PUD 1
80032-#define PUD_SIZE (1UL << PUD_SHIFT)
80033-#define PUD_MASK (~(PUD_SIZE-1))
80034-
80035 /*
80036 * The "pgd_xxx()" functions here are trivial for a folded two-level
80037 * setup: the pud is never bad, and a pud always exists (as it's folded
80038@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
80039 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
80040
80041 #define pgd_populate(mm, pgd, pud) do { } while (0)
80042+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
80043 /*
80044 * (puds are folded into pgds so this doesn't get actually called,
80045 * but the define is needed for a generic inline function.)
80046diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
80047index 177d597..2826237 100644
80048--- a/include/asm-generic/pgtable.h
80049+++ b/include/asm-generic/pgtable.h
80050@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
80051 }
80052 #endif /* CONFIG_NUMA_BALANCING */
80053
80054+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
80055+#ifdef CONFIG_PAX_KERNEXEC
80056+#error KERNEXEC requires pax_open_kernel
80057+#else
80058+static inline unsigned long pax_open_kernel(void) { return 0; }
80059+#endif
80060+#endif
80061+
80062+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80063+#ifdef CONFIG_PAX_KERNEXEC
80064+#error KERNEXEC requires pax_close_kernel
80065+#else
80066+static inline unsigned long pax_close_kernel(void) { return 0; }
80067+#endif
80068+#endif
80069+
80070 #endif /* CONFIG_MMU */
80071
80072 #endif /* !__ASSEMBLY__ */
80073diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80074index 72d8803..cb9749c 100644
80075--- a/include/asm-generic/uaccess.h
80076+++ b/include/asm-generic/uaccess.h
80077@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80078 return __clear_user(to, n);
80079 }
80080
80081+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80082+#ifdef CONFIG_PAX_MEMORY_UDEREF
80083+#error UDEREF requires pax_open_userland
80084+#else
80085+static inline unsigned long pax_open_userland(void) { return 0; }
80086+#endif
80087+#endif
80088+
80089+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80090+#ifdef CONFIG_PAX_MEMORY_UDEREF
80091+#error UDEREF requires pax_close_userland
80092+#else
80093+static inline unsigned long pax_close_userland(void) { return 0; }
80094+#endif
80095+#endif
80096+
80097 #endif /* __ASM_GENERIC_UACCESS_H */
80098diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80099index bee5d68..8d362d1 100644
80100--- a/include/asm-generic/vmlinux.lds.h
80101+++ b/include/asm-generic/vmlinux.lds.h
80102@@ -234,6 +234,7 @@
80103 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80104 VMLINUX_SYMBOL(__start_rodata) = .; \
80105 *(.rodata) *(.rodata.*) \
80106+ *(.data..read_only) \
80107 *(__vermagic) /* Kernel version magic */ \
80108 . = ALIGN(8); \
80109 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80110@@ -726,17 +727,18 @@
80111 * section in the linker script will go there too. @phdr should have
80112 * a leading colon.
80113 *
80114- * Note that this macros defines __per_cpu_load as an absolute symbol.
80115+ * Note that this macros defines per_cpu_load as an absolute symbol.
80116 * If there is no need to put the percpu section at a predetermined
80117 * address, use PERCPU_SECTION.
80118 */
80119 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80120- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80121- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80122+ per_cpu_load = .; \
80123+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80124 - LOAD_OFFSET) { \
80125+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80126 PERCPU_INPUT(cacheline) \
80127 } phdr \
80128- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80129+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80130
80131 /**
80132 * PERCPU_SECTION - define output section for percpu area, simple version
80133diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80134index 623a59c..1e79ab9 100644
80135--- a/include/crypto/algapi.h
80136+++ b/include/crypto/algapi.h
80137@@ -34,7 +34,7 @@ struct crypto_type {
80138 unsigned int maskclear;
80139 unsigned int maskset;
80140 unsigned int tfmsize;
80141-};
80142+} __do_const;
80143
80144 struct crypto_instance {
80145 struct crypto_alg alg;
80146diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80147index e1b2e8b..2697bd2 100644
80148--- a/include/drm/drmP.h
80149+++ b/include/drm/drmP.h
80150@@ -59,6 +59,7 @@
80151
80152 #include <asm/mman.h>
80153 #include <asm/pgalloc.h>
80154+#include <asm/local.h>
80155 #include <asm/uaccess.h>
80156
80157 #include <uapi/drm/drm.h>
80158@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
80159 * \param cmd command.
80160 * \param arg argument.
80161 */
80162-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80163+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80164+ struct drm_file *file_priv);
80165+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80166 struct drm_file *file_priv);
80167
80168-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80169+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80170 unsigned long arg);
80171
80172 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80173@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80174 struct drm_ioctl_desc {
80175 unsigned int cmd;
80176 int flags;
80177- drm_ioctl_t *func;
80178+ drm_ioctl_t func;
80179 unsigned int cmd_drv;
80180 const char *name;
80181-};
80182+} __do_const;
80183
80184 /**
80185 * Creates a driver or general drm_ioctl_desc array entry for the given
80186@@ -629,7 +632,8 @@ struct drm_info_list {
80187 int (*show)(struct seq_file*, void*); /** show callback */
80188 u32 driver_features; /**< Required driver features for this entry */
80189 void *data;
80190-};
80191+} __do_const;
80192+typedef struct drm_info_list __no_const drm_info_list_no_const;
80193
80194 /**
80195 * debugfs node structure. This structure represents a debugfs file.
80196@@ -713,7 +717,7 @@ struct drm_device {
80197
80198 /** \name Usage Counters */
80199 /*@{ */
80200- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80201+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80202 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80203 int buf_use; /**< Buffers in use -- cannot alloc */
80204 atomic_t buf_alloc; /**< Buffer allocation in progress */
80205diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80206index 7adbb65..2a1eb1f 100644
80207--- a/include/drm/drm_crtc_helper.h
80208+++ b/include/drm/drm_crtc_helper.h
80209@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
80210 struct drm_connector *connector);
80211 /* disable encoder when not in use - more explicit than dpms off */
80212 void (*disable)(struct drm_encoder *encoder);
80213-};
80214+} __no_const;
80215
80216 /**
80217 * drm_connector_helper_funcs - helper operations for connectors
80218diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80219index d016dc5..3951fe0 100644
80220--- a/include/drm/i915_pciids.h
80221+++ b/include/drm/i915_pciids.h
80222@@ -37,7 +37,7 @@
80223 */
80224 #define INTEL_VGA_DEVICE(id, info) { \
80225 0x8086, id, \
80226- ~0, ~0, \
80227+ PCI_ANY_ID, PCI_ANY_ID, \
80228 0x030000, 0xff0000, \
80229 (unsigned long) info }
80230
80231diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80232index 72dcbe8..8db58d7 100644
80233--- a/include/drm/ttm/ttm_memory.h
80234+++ b/include/drm/ttm/ttm_memory.h
80235@@ -48,7 +48,7 @@
80236
80237 struct ttm_mem_shrink {
80238 int (*do_shrink) (struct ttm_mem_shrink *);
80239-};
80240+} __no_const;
80241
80242 /**
80243 * struct ttm_mem_global - Global memory accounting structure.
80244diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80245index 49a8284..9643967 100644
80246--- a/include/drm/ttm/ttm_page_alloc.h
80247+++ b/include/drm/ttm/ttm_page_alloc.h
80248@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80249 */
80250 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80251
80252+struct device;
80253 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80254 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80255
80256diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80257index 4b840e8..155d235 100644
80258--- a/include/keys/asymmetric-subtype.h
80259+++ b/include/keys/asymmetric-subtype.h
80260@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80261 /* Verify the signature on a key of this subtype (optional) */
80262 int (*verify_signature)(const struct key *key,
80263 const struct public_key_signature *sig);
80264-};
80265+} __do_const;
80266
80267 /**
80268 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80269diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80270index c1da539..1dcec55 100644
80271--- a/include/linux/atmdev.h
80272+++ b/include/linux/atmdev.h
80273@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80274 #endif
80275
80276 struct k_atm_aal_stats {
80277-#define __HANDLE_ITEM(i) atomic_t i
80278+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80279 __AAL_STAT_ITEMS
80280 #undef __HANDLE_ITEM
80281 };
80282@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80283 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80284 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80285 struct module *owner;
80286-};
80287+} __do_const ;
80288
80289 struct atmphy_ops {
80290 int (*start)(struct atm_dev *dev);
80291diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80292index 5b08a85..60922fb 100644
80293--- a/include/linux/atomic.h
80294+++ b/include/linux/atomic.h
80295@@ -12,7 +12,7 @@
80296 * Atomically adds @a to @v, so long as @v was not already @u.
80297 * Returns non-zero if @v was not @u, and zero otherwise.
80298 */
80299-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80300+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80301 {
80302 return __atomic_add_unless(v, a, u) != u;
80303 }
80304diff --git a/include/linux/audit.h b/include/linux/audit.h
80305index af84234..4177a40 100644
80306--- a/include/linux/audit.h
80307+++ b/include/linux/audit.h
80308@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80309 extern unsigned int audit_serial(void);
80310 extern int auditsc_get_stamp(struct audit_context *ctx,
80311 struct timespec *t, unsigned int *serial);
80312-extern int audit_set_loginuid(kuid_t loginuid);
80313+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80314
80315 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80316 {
80317diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80318index 576e463..28fd926 100644
80319--- a/include/linux/binfmts.h
80320+++ b/include/linux/binfmts.h
80321@@ -44,7 +44,7 @@ struct linux_binprm {
80322 unsigned interp_flags;
80323 unsigned interp_data;
80324 unsigned long loader, exec;
80325-};
80326+} __randomize_layout;
80327
80328 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80329 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80330@@ -77,8 +77,10 @@ struct linux_binfmt {
80331 int (*load_binary)(struct linux_binprm *);
80332 int (*load_shlib)(struct file *);
80333 int (*core_dump)(struct coredump_params *cprm);
80334+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80335+ void (*handle_mmap)(struct file *);
80336 unsigned long min_coredump; /* minimal dump size */
80337-};
80338+} __do_const __randomize_layout;
80339
80340 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80341
80342diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80343index 202e403..16e6617 100644
80344--- a/include/linux/bitmap.h
80345+++ b/include/linux/bitmap.h
80346@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80347 return __bitmap_full(src, nbits);
80348 }
80349
80350-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80351+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80352 {
80353 if (small_const_nbits(nbits))
80354 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80355diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80356index 5d858e0..336c1d9 100644
80357--- a/include/linux/bitops.h
80358+++ b/include/linux/bitops.h
80359@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80360 * @word: value to rotate
80361 * @shift: bits to roll
80362 */
80363-static inline __u32 rol32(__u32 word, unsigned int shift)
80364+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80365 {
80366 return (word << shift) | (word >> (32 - shift));
80367 }
80368@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80369 * @word: value to rotate
80370 * @shift: bits to roll
80371 */
80372-static inline __u32 ror32(__u32 word, unsigned int shift)
80373+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80374 {
80375 return (word >> shift) | (word << (32 - shift));
80376 }
80377@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80378 return (__s32)(value << shift) >> shift;
80379 }
80380
80381-static inline unsigned fls_long(unsigned long l)
80382+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80383 {
80384 if (sizeof(l) == 4)
80385 return fls(l);
80386diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80387index 92f4b4b..483d537 100644
80388--- a/include/linux/blkdev.h
80389+++ b/include/linux/blkdev.h
80390@@ -1613,7 +1613,7 @@ struct block_device_operations {
80391 /* this callback is with swap_lock and sometimes page table lock held */
80392 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80393 struct module *owner;
80394-};
80395+} __do_const;
80396
80397 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80398 unsigned long);
80399diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80400index afc1343..9735539 100644
80401--- a/include/linux/blktrace_api.h
80402+++ b/include/linux/blktrace_api.h
80403@@ -25,7 +25,7 @@ struct blk_trace {
80404 struct dentry *dropped_file;
80405 struct dentry *msg_file;
80406 struct list_head running_list;
80407- atomic_t dropped;
80408+ atomic_unchecked_t dropped;
80409 };
80410
80411 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80412diff --git a/include/linux/cache.h b/include/linux/cache.h
80413index 17e7e82..1d7da26 100644
80414--- a/include/linux/cache.h
80415+++ b/include/linux/cache.h
80416@@ -16,6 +16,14 @@
80417 #define __read_mostly
80418 #endif
80419
80420+#ifndef __read_only
80421+#ifdef CONFIG_PAX_KERNEXEC
80422+#error KERNEXEC requires __read_only
80423+#else
80424+#define __read_only __read_mostly
80425+#endif
80426+#endif
80427+
80428 #ifndef ____cacheline_aligned
80429 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80430 #endif
80431diff --git a/include/linux/capability.h b/include/linux/capability.h
80432index aa93e5e..985a1b0 100644
80433--- a/include/linux/capability.h
80434+++ b/include/linux/capability.h
80435@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80436 extern bool capable(int cap);
80437 extern bool ns_capable(struct user_namespace *ns, int cap);
80438 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80439+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80440 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80441+extern bool capable_nolog(int cap);
80442+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80443
80444 /* audit system wants to get cap info from files as well */
80445 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80446
80447+extern int is_privileged_binary(const struct dentry *dentry);
80448+
80449 #endif /* !_LINUX_CAPABILITY_H */
80450diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80451index 8609d57..86e4d79 100644
80452--- a/include/linux/cdrom.h
80453+++ b/include/linux/cdrom.h
80454@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80455
80456 /* driver specifications */
80457 const int capability; /* capability flags */
80458- int n_minors; /* number of active minor devices */
80459 /* handle uniform packets for scsi type devices (scsi,atapi) */
80460 int (*generic_packet) (struct cdrom_device_info *,
80461 struct packet_command *);
80462diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80463index 4ce9056..86caac6 100644
80464--- a/include/linux/cleancache.h
80465+++ b/include/linux/cleancache.h
80466@@ -31,7 +31,7 @@ struct cleancache_ops {
80467 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80468 void (*invalidate_inode)(int, struct cleancache_filekey);
80469 void (*invalidate_fs)(int);
80470-};
80471+} __no_const;
80472
80473 extern struct cleancache_ops *
80474 cleancache_register_ops(struct cleancache_ops *ops);
80475diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80476index d936409..ce9f842 100644
80477--- a/include/linux/clk-provider.h
80478+++ b/include/linux/clk-provider.h
80479@@ -191,6 +191,7 @@ struct clk_ops {
80480 void (*init)(struct clk_hw *hw);
80481 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80482 };
80483+typedef struct clk_ops __no_const clk_ops_no_const;
80484
80485 /**
80486 * struct clk_init_data - holds init data that's common to all clocks and is
80487diff --git a/include/linux/compat.h b/include/linux/compat.h
80488index 7450ca2..a824b81 100644
80489--- a/include/linux/compat.h
80490+++ b/include/linux/compat.h
80491@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80492 compat_size_t __user *len_ptr);
80493
80494 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80495-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80496+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80497 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80498 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80499 compat_ssize_t msgsz, int msgflg);
80500@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80501 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80502 compat_ulong_t addr, compat_ulong_t data);
80503 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80504- compat_long_t addr, compat_long_t data);
80505+ compat_ulong_t addr, compat_ulong_t data);
80506
80507 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80508 /*
80509diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80510index d1a5582..4424efa 100644
80511--- a/include/linux/compiler-gcc4.h
80512+++ b/include/linux/compiler-gcc4.h
80513@@ -39,9 +39,34 @@
80514 # define __compiletime_warning(message) __attribute__((warning(message)))
80515 # define __compiletime_error(message) __attribute__((error(message)))
80516 #endif /* __CHECKER__ */
80517+
80518+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80519+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80520+#define __bos0(ptr) __bos((ptr), 0)
80521+#define __bos1(ptr) __bos((ptr), 1)
80522 #endif /* GCC_VERSION >= 40300 */
80523
80524 #if GCC_VERSION >= 40500
80525+
80526+#ifdef RANDSTRUCT_PLUGIN
80527+#define __randomize_layout __attribute__((randomize_layout))
80528+#define __no_randomize_layout __attribute__((no_randomize_layout))
80529+#endif
80530+
80531+#ifdef CONSTIFY_PLUGIN
80532+#define __no_const __attribute__((no_const))
80533+#define __do_const __attribute__((do_const))
80534+#endif
80535+
80536+#ifdef SIZE_OVERFLOW_PLUGIN
80537+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80538+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80539+#endif
80540+
80541+#ifdef LATENT_ENTROPY_PLUGIN
80542+#define __latent_entropy __attribute__((latent_entropy))
80543+#endif
80544+
80545 /*
80546 * Mark a position in code as unreachable. This can be used to
80547 * suppress control flow warnings after asm blocks that transfer
80548diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80549index c8c5659..2401b2e 100644
80550--- a/include/linux/compiler-gcc5.h
80551+++ b/include/linux/compiler-gcc5.h
80552@@ -28,6 +28,26 @@
80553 # define __compiletime_error(message) __attribute__((error(message)))
80554 #endif /* __CHECKER__ */
80555
80556+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80557+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80558+#define __bos0(ptr) __bos((ptr), 0)
80559+#define __bos1(ptr) __bos((ptr), 1)
80560+
80561+#ifdef CONSTIFY_PLUGIN
80562+#define __no_const __attribute__((no_const))
80563+#define __do_const __attribute__((do_const))
80564+#endif
80565+
80566+#ifdef SIZE_OVERFLOW_PLUGIN
80567+#error not yet
80568+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80569+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80570+#endif
80571+
80572+#ifdef LATENT_ENTROPY_PLUGIN
80573+#define __latent_entropy __attribute__((latent_entropy))
80574+#endif
80575+
80576 /*
80577 * Mark a position in code as unreachable. This can be used to
80578 * suppress control flow warnings after asm blocks that transfer
80579diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80580index fa6a314..a1b01ad 100644
80581--- a/include/linux/compiler.h
80582+++ b/include/linux/compiler.h
80583@@ -5,11 +5,14 @@
80584
80585 #ifdef __CHECKER__
80586 # define __user __attribute__((noderef, address_space(1)))
80587+# define __force_user __force __user
80588 # define __kernel __attribute__((address_space(0)))
80589+# define __force_kernel __force __kernel
80590 # define __safe __attribute__((safe))
80591 # define __force __attribute__((force))
80592 # define __nocast __attribute__((nocast))
80593 # define __iomem __attribute__((noderef, address_space(2)))
80594+# define __force_iomem __force __iomem
80595 # define __must_hold(x) __attribute__((context(x,1,1)))
80596 # define __acquires(x) __attribute__((context(x,0,1)))
80597 # define __releases(x) __attribute__((context(x,1,0)))
80598@@ -17,20 +20,37 @@
80599 # define __release(x) __context__(x,-1)
80600 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80601 # define __percpu __attribute__((noderef, address_space(3)))
80602+# define __force_percpu __force __percpu
80603 #ifdef CONFIG_SPARSE_RCU_POINTER
80604 # define __rcu __attribute__((noderef, address_space(4)))
80605+# define __force_rcu __force __rcu
80606 #else
80607 # define __rcu
80608+# define __force_rcu
80609 #endif
80610 extern void __chk_user_ptr(const volatile void __user *);
80611 extern void __chk_io_ptr(const volatile void __iomem *);
80612 #else
80613-# define __user
80614-# define __kernel
80615+# ifdef CHECKER_PLUGIN
80616+//# define __user
80617+//# define __force_user
80618+//# define __kernel
80619+//# define __force_kernel
80620+# else
80621+# ifdef STRUCTLEAK_PLUGIN
80622+# define __user __attribute__((user))
80623+# else
80624+# define __user
80625+# endif
80626+# define __force_user
80627+# define __kernel
80628+# define __force_kernel
80629+# endif
80630 # define __safe
80631 # define __force
80632 # define __nocast
80633 # define __iomem
80634+# define __force_iomem
80635 # define __chk_user_ptr(x) (void)0
80636 # define __chk_io_ptr(x) (void)0
80637 # define __builtin_warning(x, y...) (1)
80638@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80639 # define __release(x) (void)0
80640 # define __cond_lock(x,c) (c)
80641 # define __percpu
80642+# define __force_percpu
80643 # define __rcu
80644+# define __force_rcu
80645 #endif
80646
80647 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80648@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80649 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80650 {
80651 switch (size) {
80652- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80653- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80654- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80655+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80656+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80657+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80658 #ifdef CONFIG_64BIT
80659- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80660+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80661 #endif
80662 default:
80663 barrier();
80664- __builtin_memcpy((void *)res, (const void *)p, size);
80665+ __builtin_memcpy(res, (const void *)p, size);
80666 data_access_exceeds_word_size();
80667 barrier();
80668 }
80669 }
80670
80671-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80672+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80673 {
80674 switch (size) {
80675- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80676- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80677- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80678+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80679+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80680+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80681 #ifdef CONFIG_64BIT
80682- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80683+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80684 #endif
80685 default:
80686 barrier();
80687- __builtin_memcpy((void *)p, (const void *)res, size);
80688+ __builtin_memcpy((void *)p, res, size);
80689 data_access_exceeds_word_size();
80690 barrier();
80691 }
80692@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80693 # define __attribute_const__ /* unimplemented */
80694 #endif
80695
80696+#ifndef __randomize_layout
80697+# define __randomize_layout
80698+#endif
80699+
80700+#ifndef __no_randomize_layout
80701+# define __no_randomize_layout
80702+#endif
80703+
80704+#ifndef __no_const
80705+# define __no_const
80706+#endif
80707+
80708+#ifndef __do_const
80709+# define __do_const
80710+#endif
80711+
80712+#ifndef __size_overflow
80713+# define __size_overflow(...)
80714+#endif
80715+
80716+#ifndef __intentional_overflow
80717+# define __intentional_overflow(...)
80718+#endif
80719+
80720+#ifndef __latent_entropy
80721+# define __latent_entropy
80722+#endif
80723+
80724 /*
80725 * Tell gcc if a function is cold. The compiler will assume any path
80726 * directly leading to the call is unlikely.
80727@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80728 #define __cold
80729 #endif
80730
80731+#ifndef __alloc_size
80732+#define __alloc_size(...)
80733+#endif
80734+
80735+#ifndef __bos
80736+#define __bos(ptr, arg)
80737+#endif
80738+
80739+#ifndef __bos0
80740+#define __bos0(ptr)
80741+#endif
80742+
80743+#ifndef __bos1
80744+#define __bos1(ptr)
80745+#endif
80746+
80747 /* Simple shorthand for a section definition */
80748 #ifndef __section
80749 # define __section(S) __attribute__ ((__section__(#S)))
80750@@ -383,6 +449,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80751 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
80752 #endif
80753
80754+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
80755+
80756 /* Is this type a native word size -- useful for atomic operations */
80757 #ifndef __native_word
80758 # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
80759@@ -462,8 +530,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80760 */
80761 #define __ACCESS_ONCE(x) ({ \
80762 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80763- (volatile typeof(x) *)&(x); })
80764+ (volatile const typeof(x) *)&(x); })
80765 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80766+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80767
80768 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80769 #ifdef CONFIG_KPROBES
80770diff --git a/include/linux/completion.h b/include/linux/completion.h
80771index 5d5aaae..0ea9b84 100644
80772--- a/include/linux/completion.h
80773+++ b/include/linux/completion.h
80774@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80775
80776 extern void wait_for_completion(struct completion *);
80777 extern void wait_for_completion_io(struct completion *);
80778-extern int wait_for_completion_interruptible(struct completion *x);
80779-extern int wait_for_completion_killable(struct completion *x);
80780+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80781+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80782 extern unsigned long wait_for_completion_timeout(struct completion *x,
80783- unsigned long timeout);
80784+ unsigned long timeout) __intentional_overflow(-1);
80785 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80786- unsigned long timeout);
80787+ unsigned long timeout) __intentional_overflow(-1);
80788 extern long wait_for_completion_interruptible_timeout(
80789- struct completion *x, unsigned long timeout);
80790+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80791 extern long wait_for_completion_killable_timeout(
80792- struct completion *x, unsigned long timeout);
80793+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80794 extern bool try_wait_for_completion(struct completion *x);
80795 extern bool completion_done(struct completion *x);
80796
80797diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80798index 34025df..d94bbbc 100644
80799--- a/include/linux/configfs.h
80800+++ b/include/linux/configfs.h
80801@@ -125,7 +125,7 @@ struct configfs_attribute {
80802 const char *ca_name;
80803 struct module *ca_owner;
80804 umode_t ca_mode;
80805-};
80806+} __do_const;
80807
80808 /*
80809 * Users often need to create attribute structures for their configurable
80810diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80811index 4d078ce..c970f4d 100644
80812--- a/include/linux/cpufreq.h
80813+++ b/include/linux/cpufreq.h
80814@@ -206,6 +206,7 @@ struct global_attr {
80815 ssize_t (*store)(struct kobject *a, struct attribute *b,
80816 const char *c, size_t count);
80817 };
80818+typedef struct global_attr __no_const global_attr_no_const;
80819
80820 #define define_one_global_ro(_name) \
80821 static struct global_attr _name = \
80822@@ -277,7 +278,7 @@ struct cpufreq_driver {
80823 bool boost_supported;
80824 bool boost_enabled;
80825 int (*set_boost)(int state);
80826-};
80827+} __do_const;
80828
80829 /* flags */
80830 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80831diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80832index 948df62..4602717 100644
80833--- a/include/linux/cpuidle.h
80834+++ b/include/linux/cpuidle.h
80835@@ -50,7 +50,8 @@ struct cpuidle_state {
80836 int index);
80837
80838 int (*enter_dead) (struct cpuidle_device *dev, int index);
80839-};
80840+} __do_const;
80841+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80842
80843 /* Idle State Flags */
80844 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80845@@ -205,7 +206,7 @@ struct cpuidle_governor {
80846 void (*reflect) (struct cpuidle_device *dev, int index);
80847
80848 struct module *owner;
80849-};
80850+} __do_const;
80851
80852 #ifdef CONFIG_CPU_IDLE
80853 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80854diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80855index b950e9d..63810aa 100644
80856--- a/include/linux/cpumask.h
80857+++ b/include/linux/cpumask.h
80858@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80859 }
80860
80861 /* Valid inputs for n are -1 and 0. */
80862-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80863+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80864 {
80865 return n+1;
80866 }
80867
80868-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80869+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80870 {
80871 return n+1;
80872 }
80873
80874-static inline unsigned int cpumask_next_and(int n,
80875+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80876 const struct cpumask *srcp,
80877 const struct cpumask *andp)
80878 {
80879@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80880 *
80881 * Returns >= nr_cpu_ids if no further cpus set.
80882 */
80883-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80884+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80885 {
80886 /* -1 is a legal arg here. */
80887 if (n != -1)
80888@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80889 *
80890 * Returns >= nr_cpu_ids if no further cpus unset.
80891 */
80892-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80893+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80894 {
80895 /* -1 is a legal arg here. */
80896 if (n != -1)
80897@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80898 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80899 }
80900
80901-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80902+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80903 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80904 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80905
80906@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80907 * cpumask_weight - Count of bits in *srcp
80908 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80909 */
80910-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80911+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80912 {
80913 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80914 }
80915diff --git a/include/linux/cred.h b/include/linux/cred.h
80916index 2fb2ca2..d6a3340 100644
80917--- a/include/linux/cred.h
80918+++ b/include/linux/cred.h
80919@@ -35,7 +35,7 @@ struct group_info {
80920 int nblocks;
80921 kgid_t small_block[NGROUPS_SMALL];
80922 kgid_t *blocks[0];
80923-};
80924+} __randomize_layout;
80925
80926 /**
80927 * get_group_info - Get a reference to a group info structure
80928@@ -137,7 +137,7 @@ struct cred {
80929 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80930 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80931 struct rcu_head rcu; /* RCU deletion hook */
80932-};
80933+} __randomize_layout;
80934
80935 extern void __put_cred(struct cred *);
80936 extern void exit_creds(struct task_struct *);
80937@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80938 static inline void validate_process_creds(void)
80939 {
80940 }
80941+static inline void validate_task_creds(struct task_struct *task)
80942+{
80943+}
80944 #endif
80945
80946 /**
80947@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80948
80949 #define task_uid(task) (task_cred_xxx((task), uid))
80950 #define task_euid(task) (task_cred_xxx((task), euid))
80951+#define task_securebits(task) (task_cred_xxx((task), securebits))
80952
80953 #define current_cred_xxx(xxx) \
80954 ({ \
80955diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80956index 9c8776d..8c526c2 100644
80957--- a/include/linux/crypto.h
80958+++ b/include/linux/crypto.h
80959@@ -626,7 +626,7 @@ struct cipher_tfm {
80960 const u8 *key, unsigned int keylen);
80961 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80962 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80963-};
80964+} __no_const;
80965
80966 struct hash_tfm {
80967 int (*init)(struct hash_desc *desc);
80968@@ -647,13 +647,13 @@ struct compress_tfm {
80969 int (*cot_decompress)(struct crypto_tfm *tfm,
80970 const u8 *src, unsigned int slen,
80971 u8 *dst, unsigned int *dlen);
80972-};
80973+} __no_const;
80974
80975 struct rng_tfm {
80976 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80977 unsigned int dlen);
80978 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80979-};
80980+} __no_const;
80981
80982 #define crt_ablkcipher crt_u.ablkcipher
80983 #define crt_aead crt_u.aead
80984diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80985index 653589e..4ef254a 100644
80986--- a/include/linux/ctype.h
80987+++ b/include/linux/ctype.h
80988@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80989 * Fast implementation of tolower() for internal usage. Do not use in your
80990 * code.
80991 */
80992-static inline char _tolower(const char c)
80993+static inline unsigned char _tolower(const unsigned char c)
80994 {
80995 return c | 0x20;
80996 }
80997diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80998index 5a81398..6bbee30 100644
80999--- a/include/linux/dcache.h
81000+++ b/include/linux/dcache.h
81001@@ -123,6 +123,9 @@ struct dentry {
81002 unsigned long d_time; /* used by d_revalidate */
81003 void *d_fsdata; /* fs-specific data */
81004
81005+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
81006+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
81007+#endif
81008 struct list_head d_lru; /* LRU list */
81009 struct list_head d_child; /* child of parent list */
81010 struct list_head d_subdirs; /* our children */
81011@@ -133,7 +136,7 @@ struct dentry {
81012 struct hlist_node d_alias; /* inode alias list */
81013 struct rcu_head d_rcu;
81014 } d_u;
81015-};
81016+} __randomize_layout;
81017
81018 /*
81019 * dentry->d_lock spinlock nesting subclasses:
81020diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
81021index 7925bf0..d5143d2 100644
81022--- a/include/linux/decompress/mm.h
81023+++ b/include/linux/decompress/mm.h
81024@@ -77,7 +77,7 @@ static void free(void *where)
81025 * warnings when not needed (indeed large_malloc / large_free are not
81026 * needed by inflate */
81027
81028-#define malloc(a) kmalloc(a, GFP_KERNEL)
81029+#define malloc(a) kmalloc((a), GFP_KERNEL)
81030 #define free(a) kfree(a)
81031
81032 #define large_malloc(a) vmalloc(a)
81033diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
81034index ce447f0..83c66bd 100644
81035--- a/include/linux/devfreq.h
81036+++ b/include/linux/devfreq.h
81037@@ -114,7 +114,7 @@ struct devfreq_governor {
81038 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
81039 int (*event_handler)(struct devfreq *devfreq,
81040 unsigned int event, void *data);
81041-};
81042+} __do_const;
81043
81044 /**
81045 * struct devfreq - Device devfreq structure
81046diff --git a/include/linux/device.h b/include/linux/device.h
81047index fb50673..ec0b35b 100644
81048--- a/include/linux/device.h
81049+++ b/include/linux/device.h
81050@@ -311,7 +311,7 @@ struct subsys_interface {
81051 struct list_head node;
81052 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
81053 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
81054-};
81055+} __do_const;
81056
81057 int subsys_interface_register(struct subsys_interface *sif);
81058 void subsys_interface_unregister(struct subsys_interface *sif);
81059@@ -507,7 +507,7 @@ struct device_type {
81060 void (*release)(struct device *dev);
81061
81062 const struct dev_pm_ops *pm;
81063-};
81064+} __do_const;
81065
81066 /* interface for exporting device attributes */
81067 struct device_attribute {
81068@@ -517,11 +517,12 @@ struct device_attribute {
81069 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
81070 const char *buf, size_t count);
81071 };
81072+typedef struct device_attribute __no_const device_attribute_no_const;
81073
81074 struct dev_ext_attribute {
81075 struct device_attribute attr;
81076 void *var;
81077-};
81078+} __do_const;
81079
81080 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
81081 char *buf);
81082diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
81083index c3007cb..43efc8c 100644
81084--- a/include/linux/dma-mapping.h
81085+++ b/include/linux/dma-mapping.h
81086@@ -60,7 +60,7 @@ struct dma_map_ops {
81087 u64 (*get_required_mask)(struct device *dev);
81088 #endif
81089 int is_phys;
81090-};
81091+} __do_const;
81092
81093 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
81094
81095diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
81096index 40cd75e..38572a9 100644
81097--- a/include/linux/dmaengine.h
81098+++ b/include/linux/dmaengine.h
81099@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
81100 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
81101 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
81102
81103-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81104+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
81105 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
81106-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81107+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
81108 struct dma_pinned_list *pinned_list, struct page *page,
81109 unsigned int offset, size_t len);
81110
81111diff --git a/include/linux/efi.h b/include/linux/efi.h
81112index 0238d61..34a758f 100644
81113--- a/include/linux/efi.h
81114+++ b/include/linux/efi.h
81115@@ -1054,6 +1054,7 @@ struct efivar_operations {
81116 efi_set_variable_nonblocking_t *set_variable_nonblocking;
81117 efi_query_variable_store_t *query_variable_store;
81118 };
81119+typedef struct efivar_operations __no_const efivar_operations_no_const;
81120
81121 struct efivars {
81122 /*
81123diff --git a/include/linux/elf.h b/include/linux/elf.h
81124index 20fa8d8..3d0dd18 100644
81125--- a/include/linux/elf.h
81126+++ b/include/linux/elf.h
81127@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
81128 #define elf_note elf32_note
81129 #define elf_addr_t Elf32_Off
81130 #define Elf_Half Elf32_Half
81131+#define elf_dyn Elf32_Dyn
81132
81133 #else
81134
81135@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
81136 #define elf_note elf64_note
81137 #define elf_addr_t Elf64_Off
81138 #define Elf_Half Elf64_Half
81139+#define elf_dyn Elf64_Dyn
81140
81141 #endif
81142
81143diff --git a/include/linux/err.h b/include/linux/err.h
81144index a729120..6ede2c9 100644
81145--- a/include/linux/err.h
81146+++ b/include/linux/err.h
81147@@ -20,12 +20,12 @@
81148
81149 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81150
81151-static inline void * __must_check ERR_PTR(long error)
81152+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81153 {
81154 return (void *) error;
81155 }
81156
81157-static inline long __must_check PTR_ERR(__force const void *ptr)
81158+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81159 {
81160 return (long) ptr;
81161 }
81162diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81163index 36f49c4..a2a1f4c 100644
81164--- a/include/linux/extcon.h
81165+++ b/include/linux/extcon.h
81166@@ -135,7 +135,7 @@ struct extcon_dev {
81167 /* /sys/class/extcon/.../mutually_exclusive/... */
81168 struct attribute_group attr_g_muex;
81169 struct attribute **attrs_muex;
81170- struct device_attribute *d_attrs_muex;
81171+ device_attribute_no_const *d_attrs_muex;
81172 };
81173
81174 /**
81175diff --git a/include/linux/fb.h b/include/linux/fb.h
81176index 09bb7a1..d98870a 100644
81177--- a/include/linux/fb.h
81178+++ b/include/linux/fb.h
81179@@ -305,7 +305,7 @@ struct fb_ops {
81180 /* called at KDB enter and leave time to prepare the console */
81181 int (*fb_debug_enter)(struct fb_info *info);
81182 int (*fb_debug_leave)(struct fb_info *info);
81183-};
81184+} __do_const;
81185
81186 #ifdef CONFIG_FB_TILEBLITTING
81187 #define FB_TILE_CURSOR_NONE 0
81188diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81189index 230f87b..1fd0485 100644
81190--- a/include/linux/fdtable.h
81191+++ b/include/linux/fdtable.h
81192@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81193 void put_files_struct(struct files_struct *fs);
81194 void reset_files_struct(struct files_struct *);
81195 int unshare_files(struct files_struct **);
81196-struct files_struct *dup_fd(struct files_struct *, int *);
81197+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81198 void do_close_on_exec(struct files_struct *);
81199 int iterate_fd(struct files_struct *, unsigned,
81200 int (*)(const void *, struct file *, unsigned),
81201diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81202index 8293262..2b3b8bd 100644
81203--- a/include/linux/frontswap.h
81204+++ b/include/linux/frontswap.h
81205@@ -11,7 +11,7 @@ struct frontswap_ops {
81206 int (*load)(unsigned, pgoff_t, struct page *);
81207 void (*invalidate_page)(unsigned, pgoff_t);
81208 void (*invalidate_area)(unsigned);
81209-};
81210+} __no_const;
81211
81212 extern bool frontswap_enabled;
81213 extern struct frontswap_ops *
81214diff --git a/include/linux/fs.h b/include/linux/fs.h
81215index 42efe13..72d42ee 100644
81216--- a/include/linux/fs.h
81217+++ b/include/linux/fs.h
81218@@ -413,7 +413,7 @@ struct address_space {
81219 spinlock_t private_lock; /* for use by the address_space */
81220 struct list_head private_list; /* ditto */
81221 void *private_data; /* ditto */
81222-} __attribute__((aligned(sizeof(long))));
81223+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81224 /*
81225 * On most architectures that alignment is already the case; but
81226 * must be enforced here for CRIS, to let the least significant bit
81227@@ -456,7 +456,7 @@ struct block_device {
81228 int bd_fsfreeze_count;
81229 /* Mutex for freeze */
81230 struct mutex bd_fsfreeze_mutex;
81231-};
81232+} __randomize_layout;
81233
81234 /*
81235 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81236@@ -642,7 +642,7 @@ struct inode {
81237 #endif
81238
81239 void *i_private; /* fs or device private pointer */
81240-};
81241+} __randomize_layout;
81242
81243 static inline int inode_unhashed(struct inode *inode)
81244 {
81245@@ -837,7 +837,7 @@ struct file {
81246 struct list_head f_tfile_llink;
81247 #endif /* #ifdef CONFIG_EPOLL */
81248 struct address_space *f_mapping;
81249-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81250+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81251
81252 struct file_handle {
81253 __u32 handle_bytes;
81254@@ -962,7 +962,7 @@ struct file_lock {
81255 int state; /* state of grant or error if -ve */
81256 } afs;
81257 } fl_u;
81258-};
81259+} __randomize_layout;
81260
81261 /* The following constant reflects the upper bound of the file/locking space */
81262 #ifndef OFFSET_MAX
81263@@ -1305,7 +1305,7 @@ struct super_block {
81264 * Indicates how deep in a filesystem stack this SB is
81265 */
81266 int s_stack_depth;
81267-};
81268+} __randomize_layout;
81269
81270 extern struct timespec current_fs_time(struct super_block *sb);
81271
81272@@ -1536,7 +1536,8 @@ struct file_operations {
81273 long (*fallocate)(struct file *file, int mode, loff_t offset,
81274 loff_t len);
81275 void (*show_fdinfo)(struct seq_file *m, struct file *f);
81276-};
81277+} __do_const __randomize_layout;
81278+typedef struct file_operations __no_const file_operations_no_const;
81279
81280 struct inode_operations {
81281 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81282@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
81283 return !IS_DEADDIR(inode);
81284 }
81285
81286+static inline bool is_sidechannel_device(const struct inode *inode)
81287+{
81288+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81289+ umode_t mode = inode->i_mode;
81290+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81291+#else
81292+ return false;
81293+#endif
81294+}
81295+
81296 #endif /* _LINUX_FS_H */
81297diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81298index 0efc3e6..fd23610 100644
81299--- a/include/linux/fs_struct.h
81300+++ b/include/linux/fs_struct.h
81301@@ -6,13 +6,13 @@
81302 #include <linux/seqlock.h>
81303
81304 struct fs_struct {
81305- int users;
81306+ atomic_t users;
81307 spinlock_t lock;
81308 seqcount_t seq;
81309 int umask;
81310 int in_exec;
81311 struct path root, pwd;
81312-};
81313+} __randomize_layout;
81314
81315 extern struct kmem_cache *fs_cachep;
81316
81317diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81318index 7714849..a4a5c7a 100644
81319--- a/include/linux/fscache-cache.h
81320+++ b/include/linux/fscache-cache.h
81321@@ -113,7 +113,7 @@ struct fscache_operation {
81322 fscache_operation_release_t release;
81323 };
81324
81325-extern atomic_t fscache_op_debug_id;
81326+extern atomic_unchecked_t fscache_op_debug_id;
81327 extern void fscache_op_work_func(struct work_struct *work);
81328
81329 extern void fscache_enqueue_operation(struct fscache_operation *);
81330@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81331 INIT_WORK(&op->work, fscache_op_work_func);
81332 atomic_set(&op->usage, 1);
81333 op->state = FSCACHE_OP_ST_INITIALISED;
81334- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81335+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81336 op->processor = processor;
81337 op->release = release;
81338 INIT_LIST_HEAD(&op->pend_link);
81339diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81340index 115bb81..e7b812b 100644
81341--- a/include/linux/fscache.h
81342+++ b/include/linux/fscache.h
81343@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81344 * - this is mandatory for any object that may have data
81345 */
81346 void (*now_uncached)(void *cookie_netfs_data);
81347-};
81348+} __do_const;
81349
81350 /*
81351 * fscache cached network filesystem type
81352diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81353index 7ee1774..72505b8 100644
81354--- a/include/linux/fsnotify.h
81355+++ b/include/linux/fsnotify.h
81356@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81357 struct inode *inode = file_inode(file);
81358 __u32 mask = FS_ACCESS;
81359
81360+ if (is_sidechannel_device(inode))
81361+ return;
81362+
81363 if (S_ISDIR(inode->i_mode))
81364 mask |= FS_ISDIR;
81365
81366@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81367 struct inode *inode = file_inode(file);
81368 __u32 mask = FS_MODIFY;
81369
81370+ if (is_sidechannel_device(inode))
81371+ return;
81372+
81373 if (S_ISDIR(inode->i_mode))
81374 mask |= FS_ISDIR;
81375
81376@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81377 */
81378 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81379 {
81380- return kstrdup(name, GFP_KERNEL);
81381+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81382 }
81383
81384 /*
81385diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81386index ec274e0..e678159 100644
81387--- a/include/linux/genhd.h
81388+++ b/include/linux/genhd.h
81389@@ -194,7 +194,7 @@ struct gendisk {
81390 struct kobject *slave_dir;
81391
81392 struct timer_rand_state *random;
81393- atomic_t sync_io; /* RAID */
81394+ atomic_unchecked_t sync_io; /* RAID */
81395 struct disk_events *ev;
81396 #ifdef CONFIG_BLK_DEV_INTEGRITY
81397 struct blk_integrity *integrity;
81398@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81399 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81400
81401 /* drivers/char/random.c */
81402-extern void add_disk_randomness(struct gendisk *disk);
81403+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81404 extern void rand_initialize_disk(struct gendisk *disk);
81405
81406 static inline sector_t get_start_sect(struct block_device *bdev)
81407diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81408index 667c311..abac2a7 100644
81409--- a/include/linux/genl_magic_func.h
81410+++ b/include/linux/genl_magic_func.h
81411@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81412 },
81413
81414 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81415-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81416+static struct genl_ops ZZZ_genl_ops[] = {
81417 #include GENL_MAGIC_INCLUDE_FILE
81418 };
81419
81420diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81421index b840e3b..aeaeef9 100644
81422--- a/include/linux/gfp.h
81423+++ b/include/linux/gfp.h
81424@@ -34,6 +34,13 @@ struct vm_area_struct;
81425 #define ___GFP_NO_KSWAPD 0x400000u
81426 #define ___GFP_OTHER_NODE 0x800000u
81427 #define ___GFP_WRITE 0x1000000u
81428+
81429+#ifdef CONFIG_PAX_USERCOPY_SLABS
81430+#define ___GFP_USERCOPY 0x2000000u
81431+#else
81432+#define ___GFP_USERCOPY 0
81433+#endif
81434+
81435 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81436
81437 /*
81438@@ -90,6 +97,7 @@ struct vm_area_struct;
81439 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81440 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81441 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81442+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81443
81444 /*
81445 * This may seem redundant, but it's a way of annotating false positives vs.
81446@@ -97,7 +105,7 @@ struct vm_area_struct;
81447 */
81448 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81449
81450-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81451+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81452 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81453
81454 /* This equals 0, but use constants in case they ever change */
81455@@ -152,6 +160,8 @@ struct vm_area_struct;
81456 /* 4GB DMA on some platforms */
81457 #define GFP_DMA32 __GFP_DMA32
81458
81459+#define GFP_USERCOPY __GFP_USERCOPY
81460+
81461 /* Convert GFP flags to their corresponding migrate type */
81462 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81463 {
81464diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81465new file mode 100644
81466index 0000000..91858e4
81467--- /dev/null
81468+++ b/include/linux/gracl.h
81469@@ -0,0 +1,342 @@
81470+#ifndef GR_ACL_H
81471+#define GR_ACL_H
81472+
81473+#include <linux/grdefs.h>
81474+#include <linux/resource.h>
81475+#include <linux/capability.h>
81476+#include <linux/dcache.h>
81477+#include <asm/resource.h>
81478+
81479+/* Major status information */
81480+
81481+#define GR_VERSION "grsecurity 3.1"
81482+#define GRSECURITY_VERSION 0x3100
81483+
81484+enum {
81485+ GR_SHUTDOWN = 0,
81486+ GR_ENABLE = 1,
81487+ GR_SPROLE = 2,
81488+ GR_OLDRELOAD = 3,
81489+ GR_SEGVMOD = 4,
81490+ GR_STATUS = 5,
81491+ GR_UNSPROLE = 6,
81492+ GR_PASSSET = 7,
81493+ GR_SPROLEPAM = 8,
81494+ GR_RELOAD = 9,
81495+};
81496+
81497+/* Password setup definitions
81498+ * kernel/grhash.c */
81499+enum {
81500+ GR_PW_LEN = 128,
81501+ GR_SALT_LEN = 16,
81502+ GR_SHA_LEN = 32,
81503+};
81504+
81505+enum {
81506+ GR_SPROLE_LEN = 64,
81507+};
81508+
81509+enum {
81510+ GR_NO_GLOB = 0,
81511+ GR_REG_GLOB,
81512+ GR_CREATE_GLOB
81513+};
81514+
81515+#define GR_NLIMITS 32
81516+
81517+/* Begin Data Structures */
81518+
81519+struct sprole_pw {
81520+ unsigned char *rolename;
81521+ unsigned char salt[GR_SALT_LEN];
81522+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81523+};
81524+
81525+struct name_entry {
81526+ __u32 key;
81527+ u64 inode;
81528+ dev_t device;
81529+ char *name;
81530+ __u16 len;
81531+ __u8 deleted;
81532+ struct name_entry *prev;
81533+ struct name_entry *next;
81534+};
81535+
81536+struct inodev_entry {
81537+ struct name_entry *nentry;
81538+ struct inodev_entry *prev;
81539+ struct inodev_entry *next;
81540+};
81541+
81542+struct acl_role_db {
81543+ struct acl_role_label **r_hash;
81544+ __u32 r_size;
81545+};
81546+
81547+struct inodev_db {
81548+ struct inodev_entry **i_hash;
81549+ __u32 i_size;
81550+};
81551+
81552+struct name_db {
81553+ struct name_entry **n_hash;
81554+ __u32 n_size;
81555+};
81556+
81557+struct crash_uid {
81558+ uid_t uid;
81559+ unsigned long expires;
81560+};
81561+
81562+struct gr_hash_struct {
81563+ void **table;
81564+ void **nametable;
81565+ void *first;
81566+ __u32 table_size;
81567+ __u32 used_size;
81568+ int type;
81569+};
81570+
81571+/* Userspace Grsecurity ACL data structures */
81572+
81573+struct acl_subject_label {
81574+ char *filename;
81575+ u64 inode;
81576+ dev_t device;
81577+ __u32 mode;
81578+ kernel_cap_t cap_mask;
81579+ kernel_cap_t cap_lower;
81580+ kernel_cap_t cap_invert_audit;
81581+
81582+ struct rlimit res[GR_NLIMITS];
81583+ __u32 resmask;
81584+
81585+ __u8 user_trans_type;
81586+ __u8 group_trans_type;
81587+ uid_t *user_transitions;
81588+ gid_t *group_transitions;
81589+ __u16 user_trans_num;
81590+ __u16 group_trans_num;
81591+
81592+ __u32 sock_families[2];
81593+ __u32 ip_proto[8];
81594+ __u32 ip_type;
81595+ struct acl_ip_label **ips;
81596+ __u32 ip_num;
81597+ __u32 inaddr_any_override;
81598+
81599+ __u32 crashes;
81600+ unsigned long expires;
81601+
81602+ struct acl_subject_label *parent_subject;
81603+ struct gr_hash_struct *hash;
81604+ struct acl_subject_label *prev;
81605+ struct acl_subject_label *next;
81606+
81607+ struct acl_object_label **obj_hash;
81608+ __u32 obj_hash_size;
81609+ __u16 pax_flags;
81610+};
81611+
81612+struct role_allowed_ip {
81613+ __u32 addr;
81614+ __u32 netmask;
81615+
81616+ struct role_allowed_ip *prev;
81617+ struct role_allowed_ip *next;
81618+};
81619+
81620+struct role_transition {
81621+ char *rolename;
81622+
81623+ struct role_transition *prev;
81624+ struct role_transition *next;
81625+};
81626+
81627+struct acl_role_label {
81628+ char *rolename;
81629+ uid_t uidgid;
81630+ __u16 roletype;
81631+
81632+ __u16 auth_attempts;
81633+ unsigned long expires;
81634+
81635+ struct acl_subject_label *root_label;
81636+ struct gr_hash_struct *hash;
81637+
81638+ struct acl_role_label *prev;
81639+ struct acl_role_label *next;
81640+
81641+ struct role_transition *transitions;
81642+ struct role_allowed_ip *allowed_ips;
81643+ uid_t *domain_children;
81644+ __u16 domain_child_num;
81645+
81646+ umode_t umask;
81647+
81648+ struct acl_subject_label **subj_hash;
81649+ __u32 subj_hash_size;
81650+};
81651+
81652+struct user_acl_role_db {
81653+ struct acl_role_label **r_table;
81654+ __u32 num_pointers; /* Number of allocations to track */
81655+ __u32 num_roles; /* Number of roles */
81656+ __u32 num_domain_children; /* Number of domain children */
81657+ __u32 num_subjects; /* Number of subjects */
81658+ __u32 num_objects; /* Number of objects */
81659+};
81660+
81661+struct acl_object_label {
81662+ char *filename;
81663+ u64 inode;
81664+ dev_t device;
81665+ __u32 mode;
81666+
81667+ struct acl_subject_label *nested;
81668+ struct acl_object_label *globbed;
81669+
81670+ /* next two structures not used */
81671+
81672+ struct acl_object_label *prev;
81673+ struct acl_object_label *next;
81674+};
81675+
81676+struct acl_ip_label {
81677+ char *iface;
81678+ __u32 addr;
81679+ __u32 netmask;
81680+ __u16 low, high;
81681+ __u8 mode;
81682+ __u32 type;
81683+ __u32 proto[8];
81684+
81685+ /* next two structures not used */
81686+
81687+ struct acl_ip_label *prev;
81688+ struct acl_ip_label *next;
81689+};
81690+
81691+struct gr_arg {
81692+ struct user_acl_role_db role_db;
81693+ unsigned char pw[GR_PW_LEN];
81694+ unsigned char salt[GR_SALT_LEN];
81695+ unsigned char sum[GR_SHA_LEN];
81696+ unsigned char sp_role[GR_SPROLE_LEN];
81697+ struct sprole_pw *sprole_pws;
81698+ dev_t segv_device;
81699+ u64 segv_inode;
81700+ uid_t segv_uid;
81701+ __u16 num_sprole_pws;
81702+ __u16 mode;
81703+};
81704+
81705+struct gr_arg_wrapper {
81706+ struct gr_arg *arg;
81707+ __u32 version;
81708+ __u32 size;
81709+};
81710+
81711+struct subject_map {
81712+ struct acl_subject_label *user;
81713+ struct acl_subject_label *kernel;
81714+ struct subject_map *prev;
81715+ struct subject_map *next;
81716+};
81717+
81718+struct acl_subj_map_db {
81719+ struct subject_map **s_hash;
81720+ __u32 s_size;
81721+};
81722+
81723+struct gr_policy_state {
81724+ struct sprole_pw **acl_special_roles;
81725+ __u16 num_sprole_pws;
81726+ struct acl_role_label *kernel_role;
81727+ struct acl_role_label *role_list;
81728+ struct acl_role_label *default_role;
81729+ struct acl_role_db acl_role_set;
81730+ struct acl_subj_map_db subj_map_set;
81731+ struct name_db name_set;
81732+ struct inodev_db inodev_set;
81733+};
81734+
81735+struct gr_alloc_state {
81736+ unsigned long alloc_stack_next;
81737+ unsigned long alloc_stack_size;
81738+ void **alloc_stack;
81739+};
81740+
81741+struct gr_reload_state {
81742+ struct gr_policy_state oldpolicy;
81743+ struct gr_alloc_state oldalloc;
81744+ struct gr_policy_state newpolicy;
81745+ struct gr_alloc_state newalloc;
81746+ struct gr_policy_state *oldpolicy_ptr;
81747+ struct gr_alloc_state *oldalloc_ptr;
81748+ unsigned char oldmode;
81749+};
81750+
81751+/* End Data Structures Section */
81752+
81753+/* Hash functions generated by empirical testing by Brad Spengler
81754+ Makes good use of the low bits of the inode. Generally 0-1 times
81755+ in loop for successful match. 0-3 for unsuccessful match.
81756+ Shift/add algorithm with modulus of table size and an XOR*/
81757+
81758+static __inline__ unsigned int
81759+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81760+{
81761+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81762+}
81763+
81764+ static __inline__ unsigned int
81765+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81766+{
81767+ return ((const unsigned long)userp % sz);
81768+}
81769+
81770+static __inline__ unsigned int
81771+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81772+{
81773+ unsigned int rem;
81774+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81775+ return rem;
81776+}
81777+
81778+static __inline__ unsigned int
81779+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81780+{
81781+ return full_name_hash((const unsigned char *)name, len) % sz;
81782+}
81783+
81784+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81785+ subj = NULL; \
81786+ iter = 0; \
81787+ while (iter < role->subj_hash_size) { \
81788+ if (subj == NULL) \
81789+ subj = role->subj_hash[iter]; \
81790+ if (subj == NULL) { \
81791+ iter++; \
81792+ continue; \
81793+ }
81794+
81795+#define FOR_EACH_SUBJECT_END(subj,iter) \
81796+ subj = subj->next; \
81797+ if (subj == NULL) \
81798+ iter++; \
81799+ }
81800+
81801+
81802+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81803+ subj = role->hash->first; \
81804+ while (subj != NULL) {
81805+
81806+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81807+ subj = subj->next; \
81808+ }
81809+
81810+#endif
81811+
81812diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81813new file mode 100644
81814index 0000000..af64092
81815--- /dev/null
81816+++ b/include/linux/gracl_compat.h
81817@@ -0,0 +1,156 @@
81818+#ifndef GR_ACL_COMPAT_H
81819+#define GR_ACL_COMPAT_H
81820+
81821+#include <linux/resource.h>
81822+#include <asm/resource.h>
81823+
81824+struct sprole_pw_compat {
81825+ compat_uptr_t rolename;
81826+ unsigned char salt[GR_SALT_LEN];
81827+ unsigned char sum[GR_SHA_LEN];
81828+};
81829+
81830+struct gr_hash_struct_compat {
81831+ compat_uptr_t table;
81832+ compat_uptr_t nametable;
81833+ compat_uptr_t first;
81834+ __u32 table_size;
81835+ __u32 used_size;
81836+ int type;
81837+};
81838+
81839+struct acl_subject_label_compat {
81840+ compat_uptr_t filename;
81841+ compat_u64 inode;
81842+ __u32 device;
81843+ __u32 mode;
81844+ kernel_cap_t cap_mask;
81845+ kernel_cap_t cap_lower;
81846+ kernel_cap_t cap_invert_audit;
81847+
81848+ struct compat_rlimit res[GR_NLIMITS];
81849+ __u32 resmask;
81850+
81851+ __u8 user_trans_type;
81852+ __u8 group_trans_type;
81853+ compat_uptr_t user_transitions;
81854+ compat_uptr_t group_transitions;
81855+ __u16 user_trans_num;
81856+ __u16 group_trans_num;
81857+
81858+ __u32 sock_families[2];
81859+ __u32 ip_proto[8];
81860+ __u32 ip_type;
81861+ compat_uptr_t ips;
81862+ __u32 ip_num;
81863+ __u32 inaddr_any_override;
81864+
81865+ __u32 crashes;
81866+ compat_ulong_t expires;
81867+
81868+ compat_uptr_t parent_subject;
81869+ compat_uptr_t hash;
81870+ compat_uptr_t prev;
81871+ compat_uptr_t next;
81872+
81873+ compat_uptr_t obj_hash;
81874+ __u32 obj_hash_size;
81875+ __u16 pax_flags;
81876+};
81877+
81878+struct role_allowed_ip_compat {
81879+ __u32 addr;
81880+ __u32 netmask;
81881+
81882+ compat_uptr_t prev;
81883+ compat_uptr_t next;
81884+};
81885+
81886+struct role_transition_compat {
81887+ compat_uptr_t rolename;
81888+
81889+ compat_uptr_t prev;
81890+ compat_uptr_t next;
81891+};
81892+
81893+struct acl_role_label_compat {
81894+ compat_uptr_t rolename;
81895+ uid_t uidgid;
81896+ __u16 roletype;
81897+
81898+ __u16 auth_attempts;
81899+ compat_ulong_t expires;
81900+
81901+ compat_uptr_t root_label;
81902+ compat_uptr_t hash;
81903+
81904+ compat_uptr_t prev;
81905+ compat_uptr_t next;
81906+
81907+ compat_uptr_t transitions;
81908+ compat_uptr_t allowed_ips;
81909+ compat_uptr_t domain_children;
81910+ __u16 domain_child_num;
81911+
81912+ umode_t umask;
81913+
81914+ compat_uptr_t subj_hash;
81915+ __u32 subj_hash_size;
81916+};
81917+
81918+struct user_acl_role_db_compat {
81919+ compat_uptr_t r_table;
81920+ __u32 num_pointers;
81921+ __u32 num_roles;
81922+ __u32 num_domain_children;
81923+ __u32 num_subjects;
81924+ __u32 num_objects;
81925+};
81926+
81927+struct acl_object_label_compat {
81928+ compat_uptr_t filename;
81929+ compat_u64 inode;
81930+ __u32 device;
81931+ __u32 mode;
81932+
81933+ compat_uptr_t nested;
81934+ compat_uptr_t globbed;
81935+
81936+ compat_uptr_t prev;
81937+ compat_uptr_t next;
81938+};
81939+
81940+struct acl_ip_label_compat {
81941+ compat_uptr_t iface;
81942+ __u32 addr;
81943+ __u32 netmask;
81944+ __u16 low, high;
81945+ __u8 mode;
81946+ __u32 type;
81947+ __u32 proto[8];
81948+
81949+ compat_uptr_t prev;
81950+ compat_uptr_t next;
81951+};
81952+
81953+struct gr_arg_compat {
81954+ struct user_acl_role_db_compat role_db;
81955+ unsigned char pw[GR_PW_LEN];
81956+ unsigned char salt[GR_SALT_LEN];
81957+ unsigned char sum[GR_SHA_LEN];
81958+ unsigned char sp_role[GR_SPROLE_LEN];
81959+ compat_uptr_t sprole_pws;
81960+ __u32 segv_device;
81961+ compat_u64 segv_inode;
81962+ uid_t segv_uid;
81963+ __u16 num_sprole_pws;
81964+ __u16 mode;
81965+};
81966+
81967+struct gr_arg_wrapper_compat {
81968+ compat_uptr_t arg;
81969+ __u32 version;
81970+ __u32 size;
81971+};
81972+
81973+#endif
81974diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81975new file mode 100644
81976index 0000000..323ecf2
81977--- /dev/null
81978+++ b/include/linux/gralloc.h
81979@@ -0,0 +1,9 @@
81980+#ifndef __GRALLOC_H
81981+#define __GRALLOC_H
81982+
81983+void acl_free_all(void);
81984+int acl_alloc_stack_init(unsigned long size);
81985+void *acl_alloc(unsigned long len);
81986+void *acl_alloc_num(unsigned long num, unsigned long len);
81987+
81988+#endif
81989diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81990new file mode 100644
81991index 0000000..be66033
81992--- /dev/null
81993+++ b/include/linux/grdefs.h
81994@@ -0,0 +1,140 @@
81995+#ifndef GRDEFS_H
81996+#define GRDEFS_H
81997+
81998+/* Begin grsecurity status declarations */
81999+
82000+enum {
82001+ GR_READY = 0x01,
82002+ GR_STATUS_INIT = 0x00 // disabled state
82003+};
82004+
82005+/* Begin ACL declarations */
82006+
82007+/* Role flags */
82008+
82009+enum {
82010+ GR_ROLE_USER = 0x0001,
82011+ GR_ROLE_GROUP = 0x0002,
82012+ GR_ROLE_DEFAULT = 0x0004,
82013+ GR_ROLE_SPECIAL = 0x0008,
82014+ GR_ROLE_AUTH = 0x0010,
82015+ GR_ROLE_NOPW = 0x0020,
82016+ GR_ROLE_GOD = 0x0040,
82017+ GR_ROLE_LEARN = 0x0080,
82018+ GR_ROLE_TPE = 0x0100,
82019+ GR_ROLE_DOMAIN = 0x0200,
82020+ GR_ROLE_PAM = 0x0400,
82021+ GR_ROLE_PERSIST = 0x0800
82022+};
82023+
82024+/* ACL Subject and Object mode flags */
82025+enum {
82026+ GR_DELETED = 0x80000000
82027+};
82028+
82029+/* ACL Object-only mode flags */
82030+enum {
82031+ GR_READ = 0x00000001,
82032+ GR_APPEND = 0x00000002,
82033+ GR_WRITE = 0x00000004,
82034+ GR_EXEC = 0x00000008,
82035+ GR_FIND = 0x00000010,
82036+ GR_INHERIT = 0x00000020,
82037+ GR_SETID = 0x00000040,
82038+ GR_CREATE = 0x00000080,
82039+ GR_DELETE = 0x00000100,
82040+ GR_LINK = 0x00000200,
82041+ GR_AUDIT_READ = 0x00000400,
82042+ GR_AUDIT_APPEND = 0x00000800,
82043+ GR_AUDIT_WRITE = 0x00001000,
82044+ GR_AUDIT_EXEC = 0x00002000,
82045+ GR_AUDIT_FIND = 0x00004000,
82046+ GR_AUDIT_INHERIT= 0x00008000,
82047+ GR_AUDIT_SETID = 0x00010000,
82048+ GR_AUDIT_CREATE = 0x00020000,
82049+ GR_AUDIT_DELETE = 0x00040000,
82050+ GR_AUDIT_LINK = 0x00080000,
82051+ GR_PTRACERD = 0x00100000,
82052+ GR_NOPTRACE = 0x00200000,
82053+ GR_SUPPRESS = 0x00400000,
82054+ GR_NOLEARN = 0x00800000,
82055+ GR_INIT_TRANSFER= 0x01000000
82056+};
82057+
82058+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
82059+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
82060+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
82061+
82062+/* ACL subject-only mode flags */
82063+enum {
82064+ GR_KILL = 0x00000001,
82065+ GR_VIEW = 0x00000002,
82066+ GR_PROTECTED = 0x00000004,
82067+ GR_LEARN = 0x00000008,
82068+ GR_OVERRIDE = 0x00000010,
82069+ /* just a placeholder, this mode is only used in userspace */
82070+ GR_DUMMY = 0x00000020,
82071+ GR_PROTSHM = 0x00000040,
82072+ GR_KILLPROC = 0x00000080,
82073+ GR_KILLIPPROC = 0x00000100,
82074+ /* just a placeholder, this mode is only used in userspace */
82075+ GR_NOTROJAN = 0x00000200,
82076+ GR_PROTPROCFD = 0x00000400,
82077+ GR_PROCACCT = 0x00000800,
82078+ GR_RELAXPTRACE = 0x00001000,
82079+ //GR_NESTED = 0x00002000,
82080+ GR_INHERITLEARN = 0x00004000,
82081+ GR_PROCFIND = 0x00008000,
82082+ GR_POVERRIDE = 0x00010000,
82083+ GR_KERNELAUTH = 0x00020000,
82084+ GR_ATSECURE = 0x00040000,
82085+ GR_SHMEXEC = 0x00080000
82086+};
82087+
82088+enum {
82089+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82090+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82091+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82092+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82093+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82094+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82095+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82096+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82097+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82098+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82099+};
82100+
82101+enum {
82102+ GR_ID_USER = 0x01,
82103+ GR_ID_GROUP = 0x02,
82104+};
82105+
82106+enum {
82107+ GR_ID_ALLOW = 0x01,
82108+ GR_ID_DENY = 0x02,
82109+};
82110+
82111+#define GR_CRASH_RES 31
82112+#define GR_UIDTABLE_MAX 500
82113+
82114+/* begin resource learning section */
82115+enum {
82116+ GR_RLIM_CPU_BUMP = 60,
82117+ GR_RLIM_FSIZE_BUMP = 50000,
82118+ GR_RLIM_DATA_BUMP = 10000,
82119+ GR_RLIM_STACK_BUMP = 1000,
82120+ GR_RLIM_CORE_BUMP = 10000,
82121+ GR_RLIM_RSS_BUMP = 500000,
82122+ GR_RLIM_NPROC_BUMP = 1,
82123+ GR_RLIM_NOFILE_BUMP = 5,
82124+ GR_RLIM_MEMLOCK_BUMP = 50000,
82125+ GR_RLIM_AS_BUMP = 500000,
82126+ GR_RLIM_LOCKS_BUMP = 2,
82127+ GR_RLIM_SIGPENDING_BUMP = 5,
82128+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82129+ GR_RLIM_NICE_BUMP = 1,
82130+ GR_RLIM_RTPRIO_BUMP = 1,
82131+ GR_RLIM_RTTIME_BUMP = 1000000
82132+};
82133+
82134+#endif
82135diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82136new file mode 100644
82137index 0000000..fb1de5d
82138--- /dev/null
82139+++ b/include/linux/grinternal.h
82140@@ -0,0 +1,230 @@
82141+#ifndef __GRINTERNAL_H
82142+#define __GRINTERNAL_H
82143+
82144+#ifdef CONFIG_GRKERNSEC
82145+
82146+#include <linux/fs.h>
82147+#include <linux/mnt_namespace.h>
82148+#include <linux/nsproxy.h>
82149+#include <linux/gracl.h>
82150+#include <linux/grdefs.h>
82151+#include <linux/grmsg.h>
82152+
82153+void gr_add_learn_entry(const char *fmt, ...)
82154+ __attribute__ ((format (printf, 1, 2)));
82155+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82156+ const struct vfsmount *mnt);
82157+__u32 gr_check_create(const struct dentry *new_dentry,
82158+ const struct dentry *parent,
82159+ const struct vfsmount *mnt, const __u32 mode);
82160+int gr_check_protected_task(const struct task_struct *task);
82161+__u32 to_gr_audit(const __u32 reqmode);
82162+int gr_set_acls(const int type);
82163+int gr_acl_is_enabled(void);
82164+char gr_roletype_to_char(void);
82165+
82166+void gr_handle_alertkill(struct task_struct *task);
82167+char *gr_to_filename(const struct dentry *dentry,
82168+ const struct vfsmount *mnt);
82169+char *gr_to_filename1(const struct dentry *dentry,
82170+ const struct vfsmount *mnt);
82171+char *gr_to_filename2(const struct dentry *dentry,
82172+ const struct vfsmount *mnt);
82173+char *gr_to_filename3(const struct dentry *dentry,
82174+ const struct vfsmount *mnt);
82175+
82176+extern int grsec_enable_ptrace_readexec;
82177+extern int grsec_enable_harden_ptrace;
82178+extern int grsec_enable_link;
82179+extern int grsec_enable_fifo;
82180+extern int grsec_enable_execve;
82181+extern int grsec_enable_shm;
82182+extern int grsec_enable_execlog;
82183+extern int grsec_enable_signal;
82184+extern int grsec_enable_audit_ptrace;
82185+extern int grsec_enable_forkfail;
82186+extern int grsec_enable_time;
82187+extern int grsec_enable_rofs;
82188+extern int grsec_deny_new_usb;
82189+extern int grsec_enable_chroot_shmat;
82190+extern int grsec_enable_chroot_mount;
82191+extern int grsec_enable_chroot_double;
82192+extern int grsec_enable_chroot_pivot;
82193+extern int grsec_enable_chroot_chdir;
82194+extern int grsec_enable_chroot_chmod;
82195+extern int grsec_enable_chroot_mknod;
82196+extern int grsec_enable_chroot_fchdir;
82197+extern int grsec_enable_chroot_nice;
82198+extern int grsec_enable_chroot_execlog;
82199+extern int grsec_enable_chroot_caps;
82200+extern int grsec_enable_chroot_rename;
82201+extern int grsec_enable_chroot_sysctl;
82202+extern int grsec_enable_chroot_unix;
82203+extern int grsec_enable_symlinkown;
82204+extern kgid_t grsec_symlinkown_gid;
82205+extern int grsec_enable_tpe;
82206+extern kgid_t grsec_tpe_gid;
82207+extern int grsec_enable_tpe_all;
82208+extern int grsec_enable_tpe_invert;
82209+extern int grsec_enable_socket_all;
82210+extern kgid_t grsec_socket_all_gid;
82211+extern int grsec_enable_socket_client;
82212+extern kgid_t grsec_socket_client_gid;
82213+extern int grsec_enable_socket_server;
82214+extern kgid_t grsec_socket_server_gid;
82215+extern kgid_t grsec_audit_gid;
82216+extern int grsec_enable_group;
82217+extern int grsec_enable_log_rwxmaps;
82218+extern int grsec_enable_mount;
82219+extern int grsec_enable_chdir;
82220+extern int grsec_resource_logging;
82221+extern int grsec_enable_blackhole;
82222+extern int grsec_lastack_retries;
82223+extern int grsec_enable_brute;
82224+extern int grsec_enable_harden_ipc;
82225+extern int grsec_lock;
82226+
82227+extern spinlock_t grsec_alert_lock;
82228+extern unsigned long grsec_alert_wtime;
82229+extern unsigned long grsec_alert_fyet;
82230+
82231+extern spinlock_t grsec_audit_lock;
82232+
82233+extern rwlock_t grsec_exec_file_lock;
82234+
82235+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82236+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82237+ (tsk)->exec_file->f_path.mnt) : "/")
82238+
82239+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82240+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82241+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82242+
82243+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82244+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82245+ (tsk)->exec_file->f_path.mnt) : "/")
82246+
82247+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82248+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82249+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82250+
82251+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82252+
82253+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82254+
82255+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82256+{
82257+ if (file1 && file2) {
82258+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82259+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82260+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82261+ return true;
82262+ }
82263+
82264+ return false;
82265+}
82266+
82267+#define GR_CHROOT_CAPS {{ \
82268+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82269+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82270+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82271+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82272+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82273+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82274+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82275+
82276+#define security_learn(normal_msg,args...) \
82277+({ \
82278+ read_lock(&grsec_exec_file_lock); \
82279+ gr_add_learn_entry(normal_msg "\n", ## args); \
82280+ read_unlock(&grsec_exec_file_lock); \
82281+})
82282+
82283+enum {
82284+ GR_DO_AUDIT,
82285+ GR_DONT_AUDIT,
82286+ /* used for non-audit messages that we shouldn't kill the task on */
82287+ GR_DONT_AUDIT_GOOD
82288+};
82289+
82290+enum {
82291+ GR_TTYSNIFF,
82292+ GR_RBAC,
82293+ GR_RBAC_STR,
82294+ GR_STR_RBAC,
82295+ GR_RBAC_MODE2,
82296+ GR_RBAC_MODE3,
82297+ GR_FILENAME,
82298+ GR_SYSCTL_HIDDEN,
82299+ GR_NOARGS,
82300+ GR_ONE_INT,
82301+ GR_ONE_INT_TWO_STR,
82302+ GR_ONE_STR,
82303+ GR_STR_INT,
82304+ GR_TWO_STR_INT,
82305+ GR_TWO_INT,
82306+ GR_TWO_U64,
82307+ GR_THREE_INT,
82308+ GR_FIVE_INT_TWO_STR,
82309+ GR_TWO_STR,
82310+ GR_THREE_STR,
82311+ GR_FOUR_STR,
82312+ GR_STR_FILENAME,
82313+ GR_FILENAME_STR,
82314+ GR_FILENAME_TWO_INT,
82315+ GR_FILENAME_TWO_INT_STR,
82316+ GR_TEXTREL,
82317+ GR_PTRACE,
82318+ GR_RESOURCE,
82319+ GR_CAP,
82320+ GR_SIG,
82321+ GR_SIG2,
82322+ GR_CRASH1,
82323+ GR_CRASH2,
82324+ GR_PSACCT,
82325+ GR_RWXMAP,
82326+ GR_RWXMAPVMA
82327+};
82328+
82329+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82330+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82331+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82332+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82333+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82334+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82335+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82336+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82337+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82338+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82339+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82340+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82341+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82342+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82343+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82344+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82345+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82346+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82347+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82348+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82349+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82350+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82351+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82352+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82353+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82354+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82355+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82356+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82357+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82358+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82359+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82360+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82361+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82362+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82363+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82364+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82365+
82366+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82367+
82368+#endif
82369+
82370+#endif
82371diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82372new file mode 100644
82373index 0000000..26ef560
82374--- /dev/null
82375+++ b/include/linux/grmsg.h
82376@@ -0,0 +1,118 @@
82377+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82378+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82379+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82380+#define GR_STOPMOD_MSG "denied modification of module state by "
82381+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82382+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82383+#define GR_IOPERM_MSG "denied use of ioperm() by "
82384+#define GR_IOPL_MSG "denied use of iopl() by "
82385+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82386+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82387+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82388+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82389+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82390+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82391+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82392+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82393+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82394+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82395+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82396+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82397+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82398+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82399+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82400+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82401+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82402+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82403+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82404+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82405+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82406+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82407+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82408+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82409+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82410+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82411+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82412+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82413+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82414+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82415+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82416+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82417+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82418+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82419+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82420+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82421+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82422+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82423+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82424+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82425+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82426+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82427+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82428+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82429+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82430+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82431+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82432+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82433+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82434+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82435+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82436+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82437+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82438+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82439+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82440+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82441+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82442+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82443+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82444+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82445+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82446+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82447+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82448+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82449+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82450+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82451+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82452+#define GR_NICE_CHROOT_MSG "denied priority change by "
82453+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82454+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82455+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82456+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82457+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82458+#define GR_TIME_MSG "time set by "
82459+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82460+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82461+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82462+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82463+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82464+#define GR_BIND_MSG "denied bind() by "
82465+#define GR_CONNECT_MSG "denied connect() by "
82466+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82467+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82468+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82469+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82470+#define GR_CAP_ACL_MSG "use of %s denied for "
82471+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82472+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82473+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82474+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82475+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82476+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82477+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82478+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82479+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82480+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82481+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82482+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82483+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82484+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82485+#define GR_VM86_MSG "denied use of vm86 by "
82486+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82487+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82488+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82489+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82490+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82491+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82492+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82493+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82494+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82495diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82496new file mode 100644
82497index 0000000..63c1850
82498--- /dev/null
82499+++ b/include/linux/grsecurity.h
82500@@ -0,0 +1,250 @@
82501+#ifndef GR_SECURITY_H
82502+#define GR_SECURITY_H
82503+#include <linux/fs.h>
82504+#include <linux/fs_struct.h>
82505+#include <linux/binfmts.h>
82506+#include <linux/gracl.h>
82507+
82508+/* notify of brain-dead configs */
82509+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82510+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82511+#endif
82512+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82513+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82514+#endif
82515+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82516+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82517+#endif
82518+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82519+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82520+#endif
82521+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82522+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82523+#endif
82524+
82525+int gr_handle_new_usb(void);
82526+
82527+void gr_handle_brute_attach(int dumpable);
82528+void gr_handle_brute_check(void);
82529+void gr_handle_kernel_exploit(void);
82530+
82531+char gr_roletype_to_char(void);
82532+
82533+int gr_proc_is_restricted(void);
82534+
82535+int gr_acl_enable_at_secure(void);
82536+
82537+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82538+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82539+
82540+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82541+
82542+void gr_del_task_from_ip_table(struct task_struct *p);
82543+
82544+int gr_pid_is_chrooted(struct task_struct *p);
82545+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82546+int gr_handle_chroot_nice(void);
82547+int gr_handle_chroot_sysctl(const int op);
82548+int gr_handle_chroot_setpriority(struct task_struct *p,
82549+ const int niceval);
82550+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82551+int gr_chroot_fhandle(void);
82552+int gr_handle_chroot_chroot(const struct dentry *dentry,
82553+ const struct vfsmount *mnt);
82554+void gr_handle_chroot_chdir(const struct path *path);
82555+int gr_handle_chroot_chmod(const struct dentry *dentry,
82556+ const struct vfsmount *mnt, const int mode);
82557+int gr_handle_chroot_mknod(const struct dentry *dentry,
82558+ const struct vfsmount *mnt, const int mode);
82559+int gr_handle_chroot_mount(const struct dentry *dentry,
82560+ const struct vfsmount *mnt,
82561+ const char *dev_name);
82562+int gr_handle_chroot_pivot(void);
82563+int gr_handle_chroot_unix(const pid_t pid);
82564+
82565+int gr_handle_rawio(const struct inode *inode);
82566+
82567+void gr_handle_ioperm(void);
82568+void gr_handle_iopl(void);
82569+void gr_handle_msr_write(void);
82570+
82571+umode_t gr_acl_umask(void);
82572+
82573+int gr_tpe_allow(const struct file *file);
82574+
82575+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82576+void gr_clear_chroot_entries(struct task_struct *task);
82577+
82578+void gr_log_forkfail(const int retval);
82579+void gr_log_timechange(void);
82580+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82581+void gr_log_chdir(const struct dentry *dentry,
82582+ const struct vfsmount *mnt);
82583+void gr_log_chroot_exec(const struct dentry *dentry,
82584+ const struct vfsmount *mnt);
82585+void gr_log_remount(const char *devname, const int retval);
82586+void gr_log_unmount(const char *devname, const int retval);
82587+void gr_log_mount(const char *from, struct path *to, const int retval);
82588+void gr_log_textrel(struct vm_area_struct *vma);
82589+void gr_log_ptgnustack(struct file *file);
82590+void gr_log_rwxmmap(struct file *file);
82591+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82592+
82593+int gr_handle_follow_link(const struct inode *parent,
82594+ const struct inode *inode,
82595+ const struct dentry *dentry,
82596+ const struct vfsmount *mnt);
82597+int gr_handle_fifo(const struct dentry *dentry,
82598+ const struct vfsmount *mnt,
82599+ const struct dentry *dir, const int flag,
82600+ const int acc_mode);
82601+int gr_handle_hardlink(const struct dentry *dentry,
82602+ const struct vfsmount *mnt,
82603+ struct inode *inode,
82604+ const int mode, const struct filename *to);
82605+
82606+int gr_is_capable(const int cap);
82607+int gr_is_capable_nolog(const int cap);
82608+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82609+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82610+
82611+void gr_copy_label(struct task_struct *tsk);
82612+void gr_handle_crash(struct task_struct *task, const int sig);
82613+int gr_handle_signal(const struct task_struct *p, const int sig);
82614+int gr_check_crash_uid(const kuid_t uid);
82615+int gr_check_protected_task(const struct task_struct *task);
82616+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82617+int gr_acl_handle_mmap(const struct file *file,
82618+ const unsigned long prot);
82619+int gr_acl_handle_mprotect(const struct file *file,
82620+ const unsigned long prot);
82621+int gr_check_hidden_task(const struct task_struct *tsk);
82622+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82623+ const struct vfsmount *mnt);
82624+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82625+ const struct vfsmount *mnt);
82626+__u32 gr_acl_handle_access(const struct dentry *dentry,
82627+ const struct vfsmount *mnt, const int fmode);
82628+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82629+ const struct vfsmount *mnt, umode_t *mode);
82630+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82631+ const struct vfsmount *mnt);
82632+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82633+ const struct vfsmount *mnt);
82634+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82635+ const struct vfsmount *mnt);
82636+int gr_handle_ptrace(struct task_struct *task, const long request);
82637+int gr_handle_proc_ptrace(struct task_struct *task);
82638+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82639+ const struct vfsmount *mnt);
82640+int gr_check_crash_exec(const struct file *filp);
82641+int gr_acl_is_enabled(void);
82642+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82643+ const kgid_t gid);
82644+int gr_set_proc_label(const struct dentry *dentry,
82645+ const struct vfsmount *mnt,
82646+ const int unsafe_flags);
82647+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82648+ const struct vfsmount *mnt);
82649+__u32 gr_acl_handle_open(const struct dentry *dentry,
82650+ const struct vfsmount *mnt, int acc_mode);
82651+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82652+ const struct dentry *p_dentry,
82653+ const struct vfsmount *p_mnt,
82654+ int open_flags, int acc_mode, const int imode);
82655+void gr_handle_create(const struct dentry *dentry,
82656+ const struct vfsmount *mnt);
82657+void gr_handle_proc_create(const struct dentry *dentry,
82658+ const struct inode *inode);
82659+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82660+ const struct dentry *parent_dentry,
82661+ const struct vfsmount *parent_mnt,
82662+ const int mode);
82663+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82664+ const struct dentry *parent_dentry,
82665+ const struct vfsmount *parent_mnt);
82666+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82667+ const struct vfsmount *mnt);
82668+void gr_handle_delete(const u64 ino, const dev_t dev);
82669+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82670+ const struct vfsmount *mnt);
82671+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82672+ const struct dentry *parent_dentry,
82673+ const struct vfsmount *parent_mnt,
82674+ const struct filename *from);
82675+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82676+ const struct dentry *parent_dentry,
82677+ const struct vfsmount *parent_mnt,
82678+ const struct dentry *old_dentry,
82679+ const struct vfsmount *old_mnt, const struct filename *to);
82680+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82681+int gr_acl_handle_rename(struct dentry *new_dentry,
82682+ struct dentry *parent_dentry,
82683+ const struct vfsmount *parent_mnt,
82684+ struct dentry *old_dentry,
82685+ struct inode *old_parent_inode,
82686+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82687+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82688+ struct dentry *old_dentry,
82689+ struct dentry *new_dentry,
82690+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82691+__u32 gr_check_link(const struct dentry *new_dentry,
82692+ const struct dentry *parent_dentry,
82693+ const struct vfsmount *parent_mnt,
82694+ const struct dentry *old_dentry,
82695+ const struct vfsmount *old_mnt);
82696+int gr_acl_handle_filldir(const struct file *file, const char *name,
82697+ const unsigned int namelen, const u64 ino);
82698+
82699+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82700+ const struct vfsmount *mnt);
82701+void gr_acl_handle_exit(void);
82702+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82703+int gr_acl_handle_procpidmem(const struct task_struct *task);
82704+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82705+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82706+void gr_audit_ptrace(struct task_struct *task);
82707+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82708+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82709+void gr_put_exec_file(struct task_struct *task);
82710+
82711+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82712+
82713+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82714+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82715+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82716+ struct dentry *newdentry, struct vfsmount *newmnt);
82717+
82718+#ifdef CONFIG_GRKERNSEC_RESLOG
82719+extern void gr_log_resource(const struct task_struct *task, const int res,
82720+ const unsigned long wanted, const int gt);
82721+#else
82722+static inline void gr_log_resource(const struct task_struct *task, const int res,
82723+ const unsigned long wanted, const int gt)
82724+{
82725+}
82726+#endif
82727+
82728+#ifdef CONFIG_GRKERNSEC
82729+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82730+void gr_handle_vm86(void);
82731+void gr_handle_mem_readwrite(u64 from, u64 to);
82732+
82733+void gr_log_badprocpid(const char *entry);
82734+
82735+extern int grsec_enable_dmesg;
82736+extern int grsec_disable_privio;
82737+
82738+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82739+extern kgid_t grsec_proc_gid;
82740+#endif
82741+
82742+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82743+extern int grsec_enable_chroot_findtask;
82744+#endif
82745+#ifdef CONFIG_GRKERNSEC_SETXID
82746+extern int grsec_enable_setxid;
82747+#endif
82748+#endif
82749+
82750+#endif
82751diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82752new file mode 100644
82753index 0000000..e7ffaaf
82754--- /dev/null
82755+++ b/include/linux/grsock.h
82756@@ -0,0 +1,19 @@
82757+#ifndef __GRSOCK_H
82758+#define __GRSOCK_H
82759+
82760+extern void gr_attach_curr_ip(const struct sock *sk);
82761+extern int gr_handle_sock_all(const int family, const int type,
82762+ const int protocol);
82763+extern int gr_handle_sock_server(const struct sockaddr *sck);
82764+extern int gr_handle_sock_server_other(const struct sock *sck);
82765+extern int gr_handle_sock_client(const struct sockaddr *sck);
82766+extern int gr_search_connect(struct socket * sock,
82767+ struct sockaddr_in * addr);
82768+extern int gr_search_bind(struct socket * sock,
82769+ struct sockaddr_in * addr);
82770+extern int gr_search_listen(struct socket * sock);
82771+extern int gr_search_accept(struct socket * sock);
82772+extern int gr_search_socket(const int domain, const int type,
82773+ const int protocol);
82774+
82775+#endif
82776diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82777index 9286a46..373f27f 100644
82778--- a/include/linux/highmem.h
82779+++ b/include/linux/highmem.h
82780@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82781 kunmap_atomic(kaddr);
82782 }
82783
82784+static inline void sanitize_highpage(struct page *page)
82785+{
82786+ void *kaddr;
82787+ unsigned long flags;
82788+
82789+ local_irq_save(flags);
82790+ kaddr = kmap_atomic(page);
82791+ clear_page(kaddr);
82792+ kunmap_atomic(kaddr);
82793+ local_irq_restore(flags);
82794+}
82795+
82796 static inline void zero_user_segments(struct page *page,
82797 unsigned start1, unsigned end1,
82798 unsigned start2, unsigned end2)
82799diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82800index 1c7b89a..7dda400 100644
82801--- a/include/linux/hwmon-sysfs.h
82802+++ b/include/linux/hwmon-sysfs.h
82803@@ -25,7 +25,8 @@
82804 struct sensor_device_attribute{
82805 struct device_attribute dev_attr;
82806 int index;
82807-};
82808+} __do_const;
82809+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82810 #define to_sensor_dev_attr(_dev_attr) \
82811 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82812
82813@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82814 struct device_attribute dev_attr;
82815 u8 index;
82816 u8 nr;
82817-};
82818+} __do_const;
82819+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82820 #define to_sensor_dev_attr_2(_dev_attr) \
82821 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82822
82823diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82824index 7c76959..153e597 100644
82825--- a/include/linux/i2c.h
82826+++ b/include/linux/i2c.h
82827@@ -413,6 +413,7 @@ struct i2c_algorithm {
82828 int (*unreg_slave)(struct i2c_client *client);
82829 #endif
82830 };
82831+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82832
82833 /**
82834 * struct i2c_bus_recovery_info - I2C bus recovery information
82835diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82836index d23c3c2..eb63c81 100644
82837--- a/include/linux/i2o.h
82838+++ b/include/linux/i2o.h
82839@@ -565,7 +565,7 @@ struct i2o_controller {
82840 struct i2o_device *exec; /* Executive */
82841 #if BITS_PER_LONG == 64
82842 spinlock_t context_list_lock; /* lock for context_list */
82843- atomic_t context_list_counter; /* needed for unique contexts */
82844+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82845 struct list_head context_list; /* list of context id's
82846 and pointers */
82847 #endif
82848diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82849index aff7ad8..3942bbd 100644
82850--- a/include/linux/if_pppox.h
82851+++ b/include/linux/if_pppox.h
82852@@ -76,7 +76,7 @@ struct pppox_proto {
82853 int (*ioctl)(struct socket *sock, unsigned int cmd,
82854 unsigned long arg);
82855 struct module *owner;
82856-};
82857+} __do_const;
82858
82859 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82860 extern void unregister_pppox_proto(int proto_num);
82861diff --git a/include/linux/init.h b/include/linux/init.h
82862index 2df8e8d..3e1280d 100644
82863--- a/include/linux/init.h
82864+++ b/include/linux/init.h
82865@@ -37,9 +37,17 @@
82866 * section.
82867 */
82868
82869+#define add_init_latent_entropy __latent_entropy
82870+
82871+#ifdef CONFIG_MEMORY_HOTPLUG
82872+#define add_meminit_latent_entropy
82873+#else
82874+#define add_meminit_latent_entropy __latent_entropy
82875+#endif
82876+
82877 /* These are for everybody (although not all archs will actually
82878 discard it in modules) */
82879-#define __init __section(.init.text) __cold notrace
82880+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82881 #define __initdata __section(.init.data)
82882 #define __initconst __constsection(.init.rodata)
82883 #define __exitdata __section(.exit.data)
82884@@ -100,7 +108,7 @@
82885 #define __cpuexitconst
82886
82887 /* Used for MEMORY_HOTPLUG */
82888-#define __meminit __section(.meminit.text) __cold notrace
82889+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82890 #define __meminitdata __section(.meminit.data)
82891 #define __meminitconst __constsection(.meminit.rodata)
82892 #define __memexit __section(.memexit.text) __exitused __cold notrace
82893diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82894index 3037fc0..c6527ce 100644
82895--- a/include/linux/init_task.h
82896+++ b/include/linux/init_task.h
82897@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82898
82899 #define INIT_TASK_COMM "swapper"
82900
82901+#ifdef CONFIG_X86
82902+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82903+#else
82904+#define INIT_TASK_THREAD_INFO
82905+#endif
82906+
82907 #ifdef CONFIG_RT_MUTEXES
82908 # define INIT_RT_MUTEXES(tsk) \
82909 .pi_waiters = RB_ROOT, \
82910@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82911 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82912 .comm = INIT_TASK_COMM, \
82913 .thread = INIT_THREAD, \
82914+ INIT_TASK_THREAD_INFO \
82915 .fs = &init_fs, \
82916 .files = &init_files, \
82917 .signal = &init_signals, \
82918diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82919index d9b05b5..e5f5b7b 100644
82920--- a/include/linux/interrupt.h
82921+++ b/include/linux/interrupt.h
82922@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82923
82924 struct softirq_action
82925 {
82926- void (*action)(struct softirq_action *);
82927-};
82928+ void (*action)(void);
82929+} __no_const;
82930
82931 asmlinkage void do_softirq(void);
82932 asmlinkage void __do_softirq(void);
82933@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82934 }
82935 #endif
82936
82937-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82938+extern void open_softirq(int nr, void (*action)(void));
82939 extern void softirq_init(void);
82940 extern void __raise_softirq_irqoff(unsigned int nr);
82941
82942diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82943index 38daa45..4de4317 100644
82944--- a/include/linux/iommu.h
82945+++ b/include/linux/iommu.h
82946@@ -147,7 +147,7 @@ struct iommu_ops {
82947
82948 unsigned long pgsize_bitmap;
82949 void *priv;
82950-};
82951+} __do_const;
82952
82953 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82954 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82955diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82956index 2c525022..345b106 100644
82957--- a/include/linux/ioport.h
82958+++ b/include/linux/ioport.h
82959@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82960 int adjust_resource(struct resource *res, resource_size_t start,
82961 resource_size_t size);
82962 resource_size_t resource_alignment(struct resource *res);
82963-static inline resource_size_t resource_size(const struct resource *res)
82964+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82965 {
82966 return res->end - res->start + 1;
82967 }
82968diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82969index 1eee6bc..9cf4912 100644
82970--- a/include/linux/ipc_namespace.h
82971+++ b/include/linux/ipc_namespace.h
82972@@ -60,7 +60,7 @@ struct ipc_namespace {
82973 struct user_namespace *user_ns;
82974
82975 struct ns_common ns;
82976-};
82977+} __randomize_layout;
82978
82979 extern struct ipc_namespace init_ipc_ns;
82980 extern atomic_t nr_ipc_ns;
82981diff --git a/include/linux/irq.h b/include/linux/irq.h
82982index d09ec7a..f373eb5 100644
82983--- a/include/linux/irq.h
82984+++ b/include/linux/irq.h
82985@@ -364,7 +364,8 @@ struct irq_chip {
82986 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82987
82988 unsigned long flags;
82989-};
82990+} __do_const;
82991+typedef struct irq_chip __no_const irq_chip_no_const;
82992
82993 /*
82994 * irq_chip specific flags
82995diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82996index 71d706d..817cdec 100644
82997--- a/include/linux/irqchip/arm-gic.h
82998+++ b/include/linux/irqchip/arm-gic.h
82999@@ -95,7 +95,7 @@
83000
83001 struct device_node;
83002
83003-extern struct irq_chip gic_arch_extn;
83004+extern irq_chip_no_const gic_arch_extn;
83005
83006 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
83007 u32 offset, struct device_node *);
83008diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
83009index faf433a..7dcb186 100644
83010--- a/include/linux/irqdesc.h
83011+++ b/include/linux/irqdesc.h
83012@@ -61,7 +61,7 @@ struct irq_desc {
83013 unsigned int irq_count; /* For detecting broken IRQs */
83014 unsigned long last_unhandled; /* Aging timer for unhandled count */
83015 unsigned int irqs_unhandled;
83016- atomic_t threads_handled;
83017+ atomic_unchecked_t threads_handled;
83018 int threads_handled_last;
83019 raw_spinlock_t lock;
83020 struct cpumask *percpu_enabled;
83021diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
83022index c367cbd..c9b79e6 100644
83023--- a/include/linux/jiffies.h
83024+++ b/include/linux/jiffies.h
83025@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
83026 /*
83027 * Convert various time units to each other:
83028 */
83029-extern unsigned int jiffies_to_msecs(const unsigned long j);
83030-extern unsigned int jiffies_to_usecs(const unsigned long j);
83031+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
83032+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
83033
83034-static inline u64 jiffies_to_nsecs(const unsigned long j)
83035+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
83036 {
83037 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
83038 }
83039
83040-extern unsigned long msecs_to_jiffies(const unsigned int m);
83041-extern unsigned long usecs_to_jiffies(const unsigned int u);
83042+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
83043+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
83044 extern unsigned long timespec_to_jiffies(const struct timespec *value);
83045 extern void jiffies_to_timespec(const unsigned long jiffies,
83046- struct timespec *value);
83047-extern unsigned long timeval_to_jiffies(const struct timeval *value);
83048+ struct timespec *value) __intentional_overflow(-1);
83049+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
83050 extern void jiffies_to_timeval(const unsigned long jiffies,
83051 struct timeval *value);
83052
83053diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
83054index 6883e19..e854fcb 100644
83055--- a/include/linux/kallsyms.h
83056+++ b/include/linux/kallsyms.h
83057@@ -15,7 +15,8 @@
83058
83059 struct module;
83060
83061-#ifdef CONFIG_KALLSYMS
83062+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
83063+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83064 /* Lookup the address for a symbol. Returns 0 if not found. */
83065 unsigned long kallsyms_lookup_name(const char *name);
83066
83067@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
83068 /* Stupid that this does nothing, but I didn't create this mess. */
83069 #define __print_symbol(fmt, addr)
83070 #endif /*CONFIG_KALLSYMS*/
83071+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
83072+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
83073+extern unsigned long kallsyms_lookup_name(const char *name);
83074+extern void __print_symbol(const char *fmt, unsigned long address);
83075+extern int sprint_backtrace(char *buffer, unsigned long address);
83076+extern int sprint_symbol(char *buffer, unsigned long address);
83077+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
83078+const char *kallsyms_lookup(unsigned long addr,
83079+ unsigned long *symbolsize,
83080+ unsigned long *offset,
83081+ char **modname, char *namebuf);
83082+extern int kallsyms_lookup_size_offset(unsigned long addr,
83083+ unsigned long *symbolsize,
83084+ unsigned long *offset);
83085+#endif
83086
83087 /* This macro allows us to keep printk typechecking */
83088 static __printf(1, 2)
83089diff --git a/include/linux/kernel.h b/include/linux/kernel.h
83090index 64ce58b..6bcdbfa 100644
83091--- a/include/linux/kernel.h
83092+++ b/include/linux/kernel.h
83093@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
83094 /* Obsolete, do not use. Use kstrto<foo> instead */
83095
83096 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
83097-extern long simple_strtol(const char *,char **,unsigned int);
83098+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
83099 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
83100 extern long long simple_strtoll(const char *,char **,unsigned int);
83101
83102diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83103index ff9f1d3..6712be5 100644
83104--- a/include/linux/key-type.h
83105+++ b/include/linux/key-type.h
83106@@ -152,7 +152,7 @@ struct key_type {
83107 /* internal fields */
83108 struct list_head link; /* link in types list */
83109 struct lock_class_key lock_class; /* key->sem lock class */
83110-};
83111+} __do_const;
83112
83113 extern struct key_type key_type_keyring;
83114
83115diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83116index e465bb1..19f605fd 100644
83117--- a/include/linux/kgdb.h
83118+++ b/include/linux/kgdb.h
83119@@ -52,7 +52,7 @@ extern int kgdb_connected;
83120 extern int kgdb_io_module_registered;
83121
83122 extern atomic_t kgdb_setting_breakpoint;
83123-extern atomic_t kgdb_cpu_doing_single_step;
83124+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83125
83126 extern struct task_struct *kgdb_usethread;
83127 extern struct task_struct *kgdb_contthread;
83128@@ -254,7 +254,7 @@ struct kgdb_arch {
83129 void (*correct_hw_break)(void);
83130
83131 void (*enable_nmi)(bool on);
83132-};
83133+} __do_const;
83134
83135 /**
83136 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83137@@ -279,7 +279,7 @@ struct kgdb_io {
83138 void (*pre_exception) (void);
83139 void (*post_exception) (void);
83140 int is_console;
83141-};
83142+} __do_const;
83143
83144 extern struct kgdb_arch arch_kgdb_ops;
83145
83146diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
83147index e705467..a92471d 100644
83148--- a/include/linux/kmemleak.h
83149+++ b/include/linux/kmemleak.h
83150@@ -27,7 +27,7 @@
83151
83152 extern void kmemleak_init(void) __ref;
83153 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83154- gfp_t gfp) __ref;
83155+ gfp_t gfp) __ref __size_overflow(2);
83156 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
83157 extern void kmemleak_free(const void *ptr) __ref;
83158 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
83159@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
83160 static inline void kmemleak_init(void)
83161 {
83162 }
83163-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83164+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
83165 gfp_t gfp)
83166 {
83167 }
83168diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83169index 0555cc6..40116ce 100644
83170--- a/include/linux/kmod.h
83171+++ b/include/linux/kmod.h
83172@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83173 * usually useless though. */
83174 extern __printf(2, 3)
83175 int __request_module(bool wait, const char *name, ...);
83176+extern __printf(3, 4)
83177+int ___request_module(bool wait, char *param_name, const char *name, ...);
83178 #define request_module(mod...) __request_module(true, mod)
83179 #define request_module_nowait(mod...) __request_module(false, mod)
83180 #define try_then_request_module(x, mod...) \
83181@@ -57,6 +59,9 @@ struct subprocess_info {
83182 struct work_struct work;
83183 struct completion *complete;
83184 char *path;
83185+#ifdef CONFIG_GRKERNSEC
83186+ char *origpath;
83187+#endif
83188 char **argv;
83189 char **envp;
83190 int wait;
83191diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83192index 2d61b90..a1d0a13 100644
83193--- a/include/linux/kobject.h
83194+++ b/include/linux/kobject.h
83195@@ -118,7 +118,7 @@ struct kobj_type {
83196 struct attribute **default_attrs;
83197 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83198 const void *(*namespace)(struct kobject *kobj);
83199-};
83200+} __do_const;
83201
83202 struct kobj_uevent_env {
83203 char *argv[3];
83204@@ -142,6 +142,7 @@ struct kobj_attribute {
83205 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83206 const char *buf, size_t count);
83207 };
83208+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83209
83210 extern const struct sysfs_ops kobj_sysfs_ops;
83211
83212@@ -169,7 +170,7 @@ struct kset {
83213 spinlock_t list_lock;
83214 struct kobject kobj;
83215 const struct kset_uevent_ops *uevent_ops;
83216-};
83217+} __randomize_layout;
83218
83219 extern void kset_init(struct kset *kset);
83220 extern int __must_check kset_register(struct kset *kset);
83221diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83222index df32d25..fb52e27 100644
83223--- a/include/linux/kobject_ns.h
83224+++ b/include/linux/kobject_ns.h
83225@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83226 const void *(*netlink_ns)(struct sock *sk);
83227 const void *(*initial_ns)(void);
83228 void (*drop_ns)(void *);
83229-};
83230+} __do_const;
83231
83232 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83233 int kobj_ns_type_registered(enum kobj_ns_type type);
83234diff --git a/include/linux/kref.h b/include/linux/kref.h
83235index 484604d..0f6c5b6 100644
83236--- a/include/linux/kref.h
83237+++ b/include/linux/kref.h
83238@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83239 static inline int kref_sub(struct kref *kref, unsigned int count,
83240 void (*release)(struct kref *kref))
83241 {
83242- WARN_ON(release == NULL);
83243+ BUG_ON(release == NULL);
83244
83245 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83246 release(kref);
83247diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83248index 26f1060..bafc04a 100644
83249--- a/include/linux/kvm_host.h
83250+++ b/include/linux/kvm_host.h
83251@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
83252 {
83253 }
83254 #endif
83255-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83256+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83257 struct module *module);
83258 void kvm_exit(void);
83259
83260@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83261 struct kvm_guest_debug *dbg);
83262 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83263
83264-int kvm_arch_init(void *opaque);
83265+int kvm_arch_init(const void *opaque);
83266 void kvm_arch_exit(void);
83267
83268 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83269diff --git a/include/linux/libata.h b/include/linux/libata.h
83270index 91f705d..24be831 100644
83271--- a/include/linux/libata.h
83272+++ b/include/linux/libata.h
83273@@ -979,7 +979,7 @@ struct ata_port_operations {
83274 * fields must be pointers.
83275 */
83276 const struct ata_port_operations *inherits;
83277-};
83278+} __do_const;
83279
83280 struct ata_port_info {
83281 unsigned long flags;
83282diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83283index a6a42dd..6c5ebce 100644
83284--- a/include/linux/linkage.h
83285+++ b/include/linux/linkage.h
83286@@ -36,6 +36,7 @@
83287 #endif
83288
83289 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83290+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83291 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83292
83293 /*
83294diff --git a/include/linux/list.h b/include/linux/list.h
83295index feb773c..98f3075 100644
83296--- a/include/linux/list.h
83297+++ b/include/linux/list.h
83298@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83299 extern void list_del(struct list_head *entry);
83300 #endif
83301
83302+extern void __pax_list_add(struct list_head *new,
83303+ struct list_head *prev,
83304+ struct list_head *next);
83305+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83306+{
83307+ __pax_list_add(new, head, head->next);
83308+}
83309+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83310+{
83311+ __pax_list_add(new, head->prev, head);
83312+}
83313+extern void pax_list_del(struct list_head *entry);
83314+
83315 /**
83316 * list_replace - replace old entry by new one
83317 * @old : the element to be replaced
83318@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83319 INIT_LIST_HEAD(entry);
83320 }
83321
83322+extern void pax_list_del_init(struct list_head *entry);
83323+
83324 /**
83325 * list_move - delete from one list and add as another's head
83326 * @list: the entry to move
83327diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83328index 4bfde0e..d6e2e09 100644
83329--- a/include/linux/lockref.h
83330+++ b/include/linux/lockref.h
83331@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83332 return ((int)l->count < 0);
83333 }
83334
83335+static inline unsigned int __lockref_read(struct lockref *lockref)
83336+{
83337+ return lockref->count;
83338+}
83339+
83340+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83341+{
83342+ lockref->count = count;
83343+}
83344+
83345+static inline void __lockref_inc(struct lockref *lockref)
83346+{
83347+
83348+#ifdef CONFIG_PAX_REFCOUNT
83349+ atomic_inc((atomic_t *)&lockref->count);
83350+#else
83351+ lockref->count++;
83352+#endif
83353+
83354+}
83355+
83356+static inline void __lockref_dec(struct lockref *lockref)
83357+{
83358+
83359+#ifdef CONFIG_PAX_REFCOUNT
83360+ atomic_dec((atomic_t *)&lockref->count);
83361+#else
83362+ lockref->count--;
83363+#endif
83364+
83365+}
83366+
83367 #endif /* __LINUX_LOCKREF_H */
83368diff --git a/include/linux/math64.h b/include/linux/math64.h
83369index c45c089..298841c 100644
83370--- a/include/linux/math64.h
83371+++ b/include/linux/math64.h
83372@@ -15,7 +15,7 @@
83373 * This is commonly provided by 32bit archs to provide an optimized 64bit
83374 * divide.
83375 */
83376-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83377+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83378 {
83379 *remainder = dividend % divisor;
83380 return dividend / divisor;
83381@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83382 /**
83383 * div64_u64 - unsigned 64bit divide with 64bit divisor
83384 */
83385-static inline u64 div64_u64(u64 dividend, u64 divisor)
83386+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83387 {
83388 return dividend / divisor;
83389 }
83390@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83391 #define div64_ul(x, y) div_u64((x), (y))
83392
83393 #ifndef div_u64_rem
83394-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83395+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83396 {
83397 *remainder = do_div(dividend, divisor);
83398 return dividend;
83399@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83400 #endif
83401
83402 #ifndef div64_u64
83403-extern u64 div64_u64(u64 dividend, u64 divisor);
83404+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83405 #endif
83406
83407 #ifndef div64_s64
83408@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83409 * divide.
83410 */
83411 #ifndef div_u64
83412-static inline u64 div_u64(u64 dividend, u32 divisor)
83413+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83414 {
83415 u32 remainder;
83416 return div_u64_rem(dividend, divisor, &remainder);
83417diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83418index 3d385c8..deacb6a 100644
83419--- a/include/linux/mempolicy.h
83420+++ b/include/linux/mempolicy.h
83421@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83422 }
83423
83424 #define vma_policy(vma) ((vma)->vm_policy)
83425+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83426+{
83427+ vma->vm_policy = pol;
83428+}
83429
83430 static inline void mpol_get(struct mempolicy *pol)
83431 {
83432@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83433 }
83434
83435 #define vma_policy(vma) NULL
83436+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83437+{
83438+}
83439
83440 static inline int
83441 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83442diff --git a/include/linux/mm.h b/include/linux/mm.h
83443index dd5ea30..cf81cd1 100644
83444--- a/include/linux/mm.h
83445+++ b/include/linux/mm.h
83446@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83447
83448 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83449 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83450+
83451+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83452+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83453+#endif
83454+
83455 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83456 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83457 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83458@@ -256,8 +261,8 @@ struct vm_operations_struct {
83459 /* called by access_process_vm when get_user_pages() fails, typically
83460 * for use by special VMAs that can switch between memory and hardware
83461 */
83462- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83463- void *buf, int len, int write);
83464+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83465+ void *buf, size_t len, int write);
83466
83467 /* Called by the /proc/PID/maps code to ask the vma whether it
83468 * has a special name. Returning non-NULL will also cause this
83469@@ -291,6 +296,7 @@ struct vm_operations_struct {
83470 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83471 unsigned long size, pgoff_t pgoff);
83472 };
83473+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83474
83475 struct mmu_gather;
83476 struct inode;
83477@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83478 unsigned long *pfn);
83479 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83480 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83481-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83482- void *buf, int len, int write);
83483+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83484+ void *buf, size_t len, int write);
83485
83486 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83487 loff_t const holebegin, loff_t const holelen)
83488@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83489 }
83490 #endif
83491
83492-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83493-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83494- void *buf, int len, int write);
83495+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83496+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83497+ void *buf, size_t len, int write);
83498
83499 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83500 unsigned long start, unsigned long nr_pages,
83501@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83502 int clear_page_dirty_for_io(struct page *page);
83503 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83504
83505-/* Is the vma a continuation of the stack vma above it? */
83506-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83507-{
83508- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83509-}
83510-
83511-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83512- unsigned long addr)
83513-{
83514- return (vma->vm_flags & VM_GROWSDOWN) &&
83515- (vma->vm_start == addr) &&
83516- !vma_growsdown(vma->vm_prev, addr);
83517-}
83518-
83519-/* Is the vma a continuation of the stack vma below it? */
83520-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83521-{
83522- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83523-}
83524-
83525-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83526- unsigned long addr)
83527-{
83528- return (vma->vm_flags & VM_GROWSUP) &&
83529- (vma->vm_end == addr) &&
83530- !vma_growsup(vma->vm_next, addr);
83531-}
83532-
83533 extern struct task_struct *task_of_stack(struct task_struct *task,
83534 struct vm_area_struct *vma, bool in_group);
83535
83536@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83537 {
83538 return 0;
83539 }
83540+
83541+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83542+ unsigned long address)
83543+{
83544+ return 0;
83545+}
83546 #else
83547 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83548+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83549 #endif
83550
83551 #ifdef __PAGETABLE_PMD_FOLDED
83552@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83553 {
83554 return 0;
83555 }
83556+
83557+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83558+ unsigned long address)
83559+{
83560+ return 0;
83561+}
83562 #else
83563 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83564+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83565 #endif
83566
83567 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83568@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83569 NULL: pud_offset(pgd, address);
83570 }
83571
83572+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83573+{
83574+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83575+ NULL: pud_offset(pgd, address);
83576+}
83577+
83578 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83579 {
83580 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83581 NULL: pmd_offset(pud, address);
83582 }
83583+
83584+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83585+{
83586+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83587+ NULL: pmd_offset(pud, address);
83588+}
83589 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83590
83591 #if USE_SPLIT_PTE_PTLOCKS
83592@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83593 bool *need_rmap_locks);
83594 extern void exit_mmap(struct mm_struct *);
83595
83596+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83597+extern void gr_learn_resource(const struct task_struct *task, const int res,
83598+ const unsigned long wanted, const int gt);
83599+#else
83600+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83601+ const unsigned long wanted, const int gt)
83602+{
83603+}
83604+#endif
83605+
83606 static inline int check_data_rlimit(unsigned long rlim,
83607 unsigned long new,
83608 unsigned long start,
83609 unsigned long end_data,
83610 unsigned long start_data)
83611 {
83612+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83613 if (rlim < RLIM_INFINITY) {
83614 if (((new - start) + (end_data - start_data)) > rlim)
83615 return -ENOSPC;
83616@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83617 unsigned long addr, unsigned long len,
83618 unsigned long flags, struct page **pages);
83619
83620-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83621+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83622
83623 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83624 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83625@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83626 unsigned long len, unsigned long prot, unsigned long flags,
83627 unsigned long pgoff, unsigned long *populate);
83628 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83629+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83630
83631 #ifdef CONFIG_MMU
83632 extern int __mm_populate(unsigned long addr, unsigned long len,
83633@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83634 unsigned long high_limit;
83635 unsigned long align_mask;
83636 unsigned long align_offset;
83637+ unsigned long threadstack_offset;
83638 };
83639
83640-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83641-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83642+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83643+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83644
83645 /*
83646 * Search for an unmapped address range.
83647@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83648 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83649 */
83650 static inline unsigned long
83651-vm_unmapped_area(struct vm_unmapped_area_info *info)
83652+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83653 {
83654 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83655 return unmapped_area(info);
83656@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83657 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83658 struct vm_area_struct **pprev);
83659
83660+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83661+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83662+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83663+
83664 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83665 NULL if none. Assume start_addr < end_addr. */
83666 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83667@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83668 }
83669
83670 #ifdef CONFIG_MMU
83671-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83672+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83673 void vma_set_page_prot(struct vm_area_struct *vma);
83674 #else
83675-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83676+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83677 {
83678 return __pgprot(0);
83679 }
83680@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83681 static inline void vm_stat_account(struct mm_struct *mm,
83682 unsigned long flags, struct file *file, long pages)
83683 {
83684+
83685+#ifdef CONFIG_PAX_RANDMMAP
83686+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83687+#endif
83688+
83689 mm->total_vm += pages;
83690 }
83691 #endif /* CONFIG_PROC_FS */
83692@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83693 extern int sysctl_memory_failure_early_kill;
83694 extern int sysctl_memory_failure_recovery;
83695 extern void shake_page(struct page *p, int access);
83696-extern atomic_long_t num_poisoned_pages;
83697+extern atomic_long_unchecked_t num_poisoned_pages;
83698 extern int soft_offline_page(struct page *page, int flags);
83699
83700 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83701@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83702 static inline void setup_nr_node_ids(void) {}
83703 #endif
83704
83705+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83706+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83707+#else
83708+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83709+#endif
83710+
83711 #endif /* __KERNEL__ */
83712 #endif /* _LINUX_MM_H */
83713diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83714index 6d34aa2..d73d848 100644
83715--- a/include/linux/mm_types.h
83716+++ b/include/linux/mm_types.h
83717@@ -309,7 +309,9 @@ struct vm_area_struct {
83718 #ifdef CONFIG_NUMA
83719 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83720 #endif
83721-};
83722+
83723+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83724+} __randomize_layout;
83725
83726 struct core_thread {
83727 struct task_struct *task;
83728@@ -459,7 +461,25 @@ struct mm_struct {
83729 /* address of the bounds directory */
83730 void __user *bd_addr;
83731 #endif
83732-};
83733+
83734+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83735+ unsigned long pax_flags;
83736+#endif
83737+
83738+#ifdef CONFIG_PAX_DLRESOLVE
83739+ unsigned long call_dl_resolve;
83740+#endif
83741+
83742+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83743+ unsigned long call_syscall;
83744+#endif
83745+
83746+#ifdef CONFIG_PAX_ASLR
83747+ unsigned long delta_mmap; /* randomized offset */
83748+ unsigned long delta_stack; /* randomized offset */
83749+#endif
83750+
83751+} __randomize_layout;
83752
83753 static inline void mm_init_cpumask(struct mm_struct *mm)
83754 {
83755diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
83756index cb2b040..f3c9f5c 100644
83757--- a/include/linux/mmc/core.h
83758+++ b/include/linux/mmc/core.h
83759@@ -79,7 +79,7 @@ struct mmc_command {
83760 #define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
83761
83762 unsigned int retries; /* max number of retries */
83763- unsigned int error; /* command error */
83764+ int error; /* command error */
83765
83766 /*
83767 * Standard errno values are used for errors, but some have specific
83768diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83769index c5d5278..f0b68c8 100644
83770--- a/include/linux/mmiotrace.h
83771+++ b/include/linux/mmiotrace.h
83772@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83773 /* Called from ioremap.c */
83774 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83775 void __iomem *addr);
83776-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83777+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83778
83779 /* For anyone to insert markers. Remember trailing newline. */
83780 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83781@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83782 {
83783 }
83784
83785-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83786+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83787 {
83788 }
83789
83790diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83791index 2f0856d..5a4bc1e 100644
83792--- a/include/linux/mmzone.h
83793+++ b/include/linux/mmzone.h
83794@@ -527,7 +527,7 @@ struct zone {
83795
83796 ZONE_PADDING(_pad3_)
83797 /* Zone statistics */
83798- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83799+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83800 } ____cacheline_internodealigned_in_smp;
83801
83802 enum zone_flags {
83803diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83804index 745def8..08a820b 100644
83805--- a/include/linux/mod_devicetable.h
83806+++ b/include/linux/mod_devicetable.h
83807@@ -139,7 +139,7 @@ struct usb_device_id {
83808 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83809 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83810
83811-#define HID_ANY_ID (~0)
83812+#define HID_ANY_ID (~0U)
83813 #define HID_BUS_ANY 0xffff
83814 #define HID_GROUP_ANY 0x0000
83815
83816@@ -475,7 +475,7 @@ struct dmi_system_id {
83817 const char *ident;
83818 struct dmi_strmatch matches[4];
83819 void *driver_data;
83820-};
83821+} __do_const;
83822 /*
83823 * struct dmi_device_id appears during expansion of
83824 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83825diff --git a/include/linux/module.h b/include/linux/module.h
83826index b653d7c..22a238f 100644
83827--- a/include/linux/module.h
83828+++ b/include/linux/module.h
83829@@ -17,9 +17,11 @@
83830 #include <linux/moduleparam.h>
83831 #include <linux/jump_label.h>
83832 #include <linux/export.h>
83833+#include <linux/fs.h>
83834
83835 #include <linux/percpu.h>
83836 #include <asm/module.h>
83837+#include <asm/pgtable.h>
83838
83839 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83840 #define MODULE_SIG_STRING "~Module signature appended~\n"
83841@@ -42,7 +44,7 @@ struct module_kobject {
83842 struct kobject *drivers_dir;
83843 struct module_param_attrs *mp;
83844 struct completion *kobj_completion;
83845-};
83846+} __randomize_layout;
83847
83848 struct module_attribute {
83849 struct attribute attr;
83850@@ -54,12 +56,13 @@ struct module_attribute {
83851 int (*test)(struct module *);
83852 void (*free)(struct module *);
83853 };
83854+typedef struct module_attribute __no_const module_attribute_no_const;
83855
83856 struct module_version_attribute {
83857 struct module_attribute mattr;
83858 const char *module_name;
83859 const char *version;
83860-} __attribute__ ((__aligned__(sizeof(void *))));
83861+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83862
83863 extern ssize_t __modver_version_show(struct module_attribute *,
83864 struct module_kobject *, char *);
83865@@ -221,7 +224,7 @@ struct module {
83866
83867 /* Sysfs stuff. */
83868 struct module_kobject mkobj;
83869- struct module_attribute *modinfo_attrs;
83870+ module_attribute_no_const *modinfo_attrs;
83871 const char *version;
83872 const char *srcversion;
83873 struct kobject *holders_dir;
83874@@ -270,19 +273,16 @@ struct module {
83875 int (*init)(void);
83876
83877 /* If this is non-NULL, vfree after init() returns */
83878- void *module_init;
83879+ void *module_init_rx, *module_init_rw;
83880
83881 /* Here is the actual code + data, vfree'd on unload. */
83882- void *module_core;
83883+ void *module_core_rx, *module_core_rw;
83884
83885 /* Here are the sizes of the init and core sections */
83886- unsigned int init_size, core_size;
83887+ unsigned int init_size_rw, core_size_rw;
83888
83889 /* The size of the executable code in each section. */
83890- unsigned int init_text_size, core_text_size;
83891-
83892- /* Size of RO sections of the module (text+rodata) */
83893- unsigned int init_ro_size, core_ro_size;
83894+ unsigned int init_size_rx, core_size_rx;
83895
83896 /* Arch-specific module values */
83897 struct mod_arch_specific arch;
83898@@ -338,6 +338,10 @@ struct module {
83899 #ifdef CONFIG_EVENT_TRACING
83900 struct ftrace_event_call **trace_events;
83901 unsigned int num_trace_events;
83902+ struct file_operations trace_id;
83903+ struct file_operations trace_enable;
83904+ struct file_operations trace_format;
83905+ struct file_operations trace_filter;
83906 #endif
83907 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83908 unsigned int num_ftrace_callsites;
83909@@ -361,7 +365,7 @@ struct module {
83910 ctor_fn_t *ctors;
83911 unsigned int num_ctors;
83912 #endif
83913-};
83914+} __randomize_layout;
83915 #ifndef MODULE_ARCH_INIT
83916 #define MODULE_ARCH_INIT {}
83917 #endif
83918@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83919 bool is_module_percpu_address(unsigned long addr);
83920 bool is_module_text_address(unsigned long addr);
83921
83922+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83923+{
83924+
83925+#ifdef CONFIG_PAX_KERNEXEC
83926+ if (ktla_ktva(addr) >= (unsigned long)start &&
83927+ ktla_ktva(addr) < (unsigned long)start + size)
83928+ return 1;
83929+#endif
83930+
83931+ return ((void *)addr >= start && (void *)addr < start + size);
83932+}
83933+
83934+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83935+{
83936+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83937+}
83938+
83939+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83940+{
83941+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83942+}
83943+
83944+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83945+{
83946+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83947+}
83948+
83949+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83950+{
83951+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83952+}
83953+
83954 static inline bool within_module_core(unsigned long addr,
83955 const struct module *mod)
83956 {
83957- return (unsigned long)mod->module_core <= addr &&
83958- addr < (unsigned long)mod->module_core + mod->core_size;
83959+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83960 }
83961
83962 static inline bool within_module_init(unsigned long addr,
83963 const struct module *mod)
83964 {
83965- return (unsigned long)mod->module_init <= addr &&
83966- addr < (unsigned long)mod->module_init + mod->init_size;
83967+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83968 }
83969
83970 static inline bool within_module(unsigned long addr, const struct module *mod)
83971diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83972index f755626..641f822 100644
83973--- a/include/linux/moduleloader.h
83974+++ b/include/linux/moduleloader.h
83975@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83976 sections. Returns NULL on failure. */
83977 void *module_alloc(unsigned long size);
83978
83979+#ifdef CONFIG_PAX_KERNEXEC
83980+void *module_alloc_exec(unsigned long size);
83981+#else
83982+#define module_alloc_exec(x) module_alloc(x)
83983+#endif
83984+
83985 /* Free memory returned from module_alloc. */
83986 void module_memfree(void *module_region);
83987
83988+#ifdef CONFIG_PAX_KERNEXEC
83989+void module_memfree_exec(void *module_region);
83990+#else
83991+#define module_memfree_exec(x) module_memfree((x))
83992+#endif
83993+
83994 /*
83995 * Apply the given relocation to the (simplified) ELF. Return -error
83996 * or 0.
83997@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83998 unsigned int relsec,
83999 struct module *me)
84000 {
84001+#ifdef CONFIG_MODULES
84002 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84003 module_name(me));
84004+#endif
84005 return -ENOEXEC;
84006 }
84007 #endif
84008@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
84009 unsigned int relsec,
84010 struct module *me)
84011 {
84012+#ifdef CONFIG_MODULES
84013 printk(KERN_ERR "module %s: REL relocation unsupported\n",
84014 module_name(me));
84015+#endif
84016 return -ENOEXEC;
84017 }
84018 #endif
84019diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
84020index 1c9effa..1160bdd 100644
84021--- a/include/linux/moduleparam.h
84022+++ b/include/linux/moduleparam.h
84023@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
84024 * @len is usually just sizeof(string).
84025 */
84026 #define module_param_string(name, string, len, perm) \
84027- static const struct kparam_string __param_string_##name \
84028+ static const struct kparam_string __param_string_##name __used \
84029 = { len, string }; \
84030 __module_param_call(MODULE_PARAM_PREFIX, name, \
84031 &param_ops_string, \
84032@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
84033 */
84034 #define module_param_array_named(name, array, type, nump, perm) \
84035 param_check_##type(name, &(array)[0]); \
84036- static const struct kparam_array __param_arr_##name \
84037+ static const struct kparam_array __param_arr_##name __used \
84038 = { .max = ARRAY_SIZE(array), .num = nump, \
84039 .ops = &param_ops_##type, \
84040 .elemsize = sizeof(array[0]), .elem = array }; \
84041diff --git a/include/linux/mount.h b/include/linux/mount.h
84042index c2c561d..a5f2a8c 100644
84043--- a/include/linux/mount.h
84044+++ b/include/linux/mount.h
84045@@ -66,7 +66,7 @@ struct vfsmount {
84046 struct dentry *mnt_root; /* root of the mounted tree */
84047 struct super_block *mnt_sb; /* pointer to superblock */
84048 int mnt_flags;
84049-};
84050+} __randomize_layout;
84051
84052 struct file; /* forward dec */
84053 struct path;
84054diff --git a/include/linux/namei.h b/include/linux/namei.h
84055index c899077..b9a2010 100644
84056--- a/include/linux/namei.h
84057+++ b/include/linux/namei.h
84058@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
84059 extern void unlock_rename(struct dentry *, struct dentry *);
84060
84061 extern void nd_jump_link(struct nameidata *nd, struct path *path);
84062-extern void nd_set_link(struct nameidata *nd, char *path);
84063-extern char *nd_get_link(struct nameidata *nd);
84064+extern void nd_set_link(struct nameidata *nd, const char *path);
84065+extern const char *nd_get_link(const struct nameidata *nd);
84066
84067 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
84068 {
84069diff --git a/include/linux/net.h b/include/linux/net.h
84070index 17d8339..81656c0 100644
84071--- a/include/linux/net.h
84072+++ b/include/linux/net.h
84073@@ -192,7 +192,7 @@ struct net_proto_family {
84074 int (*create)(struct net *net, struct socket *sock,
84075 int protocol, int kern);
84076 struct module *owner;
84077-};
84078+} __do_const;
84079
84080 struct iovec;
84081 struct kvec;
84082diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
84083index 52fd8e8..19430a1 100644
84084--- a/include/linux/netdevice.h
84085+++ b/include/linux/netdevice.h
84086@@ -1191,6 +1191,7 @@ struct net_device_ops {
84087 u8 state);
84088 #endif
84089 };
84090+typedef struct net_device_ops __no_const net_device_ops_no_const;
84091
84092 /**
84093 * enum net_device_priv_flags - &struct net_device priv_flags
84094@@ -1537,10 +1538,10 @@ struct net_device {
84095
84096 struct net_device_stats stats;
84097
84098- atomic_long_t rx_dropped;
84099- atomic_long_t tx_dropped;
84100+ atomic_long_unchecked_t rx_dropped;
84101+ atomic_long_unchecked_t tx_dropped;
84102
84103- atomic_t carrier_changes;
84104+ atomic_unchecked_t carrier_changes;
84105
84106 #ifdef CONFIG_WIRELESS_EXT
84107 const struct iw_handler_def * wireless_handlers;
84108diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
84109index 2517ece..0bbfcfb 100644
84110--- a/include/linux/netfilter.h
84111+++ b/include/linux/netfilter.h
84112@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
84113 #endif
84114 /* Use the module struct to lock set/get code in place */
84115 struct module *owner;
84116-};
84117+} __do_const;
84118
84119 /* Function to register/unregister hook points. */
84120 int nf_register_hook(struct nf_hook_ops *reg);
84121diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84122index e955d47..04a5338 100644
84123--- a/include/linux/netfilter/nfnetlink.h
84124+++ b/include/linux/netfilter/nfnetlink.h
84125@@ -19,7 +19,7 @@ struct nfnl_callback {
84126 const struct nlattr * const cda[]);
84127 const struct nla_policy *policy; /* netlink attribute policy */
84128 const u_int16_t attr_count; /* number of nlattr's */
84129-};
84130+} __do_const;
84131
84132 struct nfnetlink_subsystem {
84133 const char *name;
84134diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84135new file mode 100644
84136index 0000000..33f4af8
84137--- /dev/null
84138+++ b/include/linux/netfilter/xt_gradm.h
84139@@ -0,0 +1,9 @@
84140+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84141+#define _LINUX_NETFILTER_XT_GRADM_H 1
84142+
84143+struct xt_gradm_mtinfo {
84144+ __u16 flags;
84145+ __u16 invflags;
84146+};
84147+
84148+#endif
84149diff --git a/include/linux/nls.h b/include/linux/nls.h
84150index 520681b..2b7fabb 100644
84151--- a/include/linux/nls.h
84152+++ b/include/linux/nls.h
84153@@ -31,7 +31,7 @@ struct nls_table {
84154 const unsigned char *charset2upper;
84155 struct module *owner;
84156 struct nls_table *next;
84157-};
84158+} __do_const;
84159
84160 /* this value hold the maximum octet of charset */
84161 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84162@@ -46,7 +46,7 @@ enum utf16_endian {
84163 /* nls_base.c */
84164 extern int __register_nls(struct nls_table *, struct module *);
84165 extern int unregister_nls(struct nls_table *);
84166-extern struct nls_table *load_nls(char *);
84167+extern struct nls_table *load_nls(const char *);
84168 extern void unload_nls(struct nls_table *);
84169 extern struct nls_table *load_nls_default(void);
84170 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84171diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84172index d14a4c3..a078786 100644
84173--- a/include/linux/notifier.h
84174+++ b/include/linux/notifier.h
84175@@ -54,7 +54,8 @@ struct notifier_block {
84176 notifier_fn_t notifier_call;
84177 struct notifier_block __rcu *next;
84178 int priority;
84179-};
84180+} __do_const;
84181+typedef struct notifier_block __no_const notifier_block_no_const;
84182
84183 struct atomic_notifier_head {
84184 spinlock_t lock;
84185diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84186index b2a0f15..4d7da32 100644
84187--- a/include/linux/oprofile.h
84188+++ b/include/linux/oprofile.h
84189@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84190 int oprofilefs_create_ro_ulong(struct dentry * root,
84191 char const * name, ulong * val);
84192
84193-/** Create a file for read-only access to an atomic_t. */
84194+/** Create a file for read-only access to an atomic_unchecked_t. */
84195 int oprofilefs_create_ro_atomic(struct dentry * root,
84196- char const * name, atomic_t * val);
84197+ char const * name, atomic_unchecked_t * val);
84198
84199 /** create a directory */
84200 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84201diff --git a/include/linux/padata.h b/include/linux/padata.h
84202index 4386946..f50c615 100644
84203--- a/include/linux/padata.h
84204+++ b/include/linux/padata.h
84205@@ -129,7 +129,7 @@ struct parallel_data {
84206 struct padata_serial_queue __percpu *squeue;
84207 atomic_t reorder_objects;
84208 atomic_t refcnt;
84209- atomic_t seq_nr;
84210+ atomic_unchecked_t seq_nr;
84211 struct padata_cpumask cpumask;
84212 spinlock_t lock ____cacheline_aligned;
84213 unsigned int processed;
84214diff --git a/include/linux/path.h b/include/linux/path.h
84215index d137218..be0c176 100644
84216--- a/include/linux/path.h
84217+++ b/include/linux/path.h
84218@@ -1,13 +1,15 @@
84219 #ifndef _LINUX_PATH_H
84220 #define _LINUX_PATH_H
84221
84222+#include <linux/compiler.h>
84223+
84224 struct dentry;
84225 struct vfsmount;
84226
84227 struct path {
84228 struct vfsmount *mnt;
84229 struct dentry *dentry;
84230-};
84231+} __randomize_layout;
84232
84233 extern void path_get(const struct path *);
84234 extern void path_put(const struct path *);
84235diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84236index 8c78950..0d74ed9 100644
84237--- a/include/linux/pci_hotplug.h
84238+++ b/include/linux/pci_hotplug.h
84239@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84240 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84241 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84242 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84243-};
84244+} __do_const;
84245+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84246
84247 /**
84248 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84249diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84250index caebf2a..4c3ae9d 100644
84251--- a/include/linux/percpu.h
84252+++ b/include/linux/percpu.h
84253@@ -34,7 +34,7 @@
84254 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84255 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84256 */
84257-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84258+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84259 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84260
84261 /*
84262diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84263index 664de5a..b3e1bf4 100644
84264--- a/include/linux/perf_event.h
84265+++ b/include/linux/perf_event.h
84266@@ -336,8 +336,8 @@ struct perf_event {
84267
84268 enum perf_event_active_state state;
84269 unsigned int attach_state;
84270- local64_t count;
84271- atomic64_t child_count;
84272+ local64_t count; /* PaX: fix it one day */
84273+ atomic64_unchecked_t child_count;
84274
84275 /*
84276 * These are the total time in nanoseconds that the event
84277@@ -388,8 +388,8 @@ struct perf_event {
84278 * These accumulate total time (in nanoseconds) that children
84279 * events have been enabled and running, respectively.
84280 */
84281- atomic64_t child_total_time_enabled;
84282- atomic64_t child_total_time_running;
84283+ atomic64_unchecked_t child_total_time_enabled;
84284+ atomic64_unchecked_t child_total_time_running;
84285
84286 /*
84287 * Protect attach/detach and child_list:
84288@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84289 entry->ip[entry->nr++] = ip;
84290 }
84291
84292-extern int sysctl_perf_event_paranoid;
84293+extern int sysctl_perf_event_legitimately_concerned;
84294 extern int sysctl_perf_event_mlock;
84295 extern int sysctl_perf_event_sample_rate;
84296 extern int sysctl_perf_cpu_time_max_percent;
84297@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84298 loff_t *ppos);
84299
84300
84301+static inline bool perf_paranoid_any(void)
84302+{
84303+ return sysctl_perf_event_legitimately_concerned > 2;
84304+}
84305+
84306 static inline bool perf_paranoid_tracepoint_raw(void)
84307 {
84308- return sysctl_perf_event_paranoid > -1;
84309+ return sysctl_perf_event_legitimately_concerned > -1;
84310 }
84311
84312 static inline bool perf_paranoid_cpu(void)
84313 {
84314- return sysctl_perf_event_paranoid > 0;
84315+ return sysctl_perf_event_legitimately_concerned > 0;
84316 }
84317
84318 static inline bool perf_paranoid_kernel(void)
84319 {
84320- return sysctl_perf_event_paranoid > 1;
84321+ return sysctl_perf_event_legitimately_concerned > 1;
84322 }
84323
84324 extern void perf_event_init(void);
84325@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84326 struct device_attribute attr;
84327 u64 id;
84328 const char *event_str;
84329-};
84330+} __do_const;
84331
84332 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84333 static struct perf_pmu_events_attr _var = { \
84334diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84335index b9cf6c5..5462472 100644
84336--- a/include/linux/pid_namespace.h
84337+++ b/include/linux/pid_namespace.h
84338@@ -45,7 +45,7 @@ struct pid_namespace {
84339 int hide_pid;
84340 int reboot; /* group exit code if this pidns was rebooted */
84341 struct ns_common ns;
84342-};
84343+} __randomize_layout;
84344
84345 extern struct pid_namespace init_pid_ns;
84346
84347diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84348index eb8b8ac..62649e1 100644
84349--- a/include/linux/pipe_fs_i.h
84350+++ b/include/linux/pipe_fs_i.h
84351@@ -47,10 +47,10 @@ struct pipe_inode_info {
84352 struct mutex mutex;
84353 wait_queue_head_t wait;
84354 unsigned int nrbufs, curbuf, buffers;
84355- unsigned int readers;
84356- unsigned int writers;
84357- unsigned int files;
84358- unsigned int waiting_writers;
84359+ atomic_t readers;
84360+ atomic_t writers;
84361+ atomic_t files;
84362+ atomic_t waiting_writers;
84363 unsigned int r_counter;
84364 unsigned int w_counter;
84365 struct page *tmp_page;
84366diff --git a/include/linux/pm.h b/include/linux/pm.h
84367index 8b59763..8a05939 100644
84368--- a/include/linux/pm.h
84369+++ b/include/linux/pm.h
84370@@ -608,6 +608,7 @@ struct dev_pm_domain {
84371 struct dev_pm_ops ops;
84372 void (*detach)(struct device *dev, bool power_off);
84373 };
84374+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84375
84376 /*
84377 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84378diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84379index a9edab2..8bada56 100644
84380--- a/include/linux/pm_domain.h
84381+++ b/include/linux/pm_domain.h
84382@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84383 int (*save_state)(struct device *dev);
84384 int (*restore_state)(struct device *dev);
84385 bool (*active_wakeup)(struct device *dev);
84386-};
84387+} __no_const;
84388
84389 struct gpd_cpuidle_data {
84390 unsigned int saved_exit_latency;
84391- struct cpuidle_state *idle_state;
84392+ cpuidle_state_no_const *idle_state;
84393 };
84394
84395 struct generic_pm_domain {
84396diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84397index 30e84d4..22278b4 100644
84398--- a/include/linux/pm_runtime.h
84399+++ b/include/linux/pm_runtime.h
84400@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84401
84402 static inline void pm_runtime_mark_last_busy(struct device *dev)
84403 {
84404- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84405+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84406 }
84407
84408 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84409diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84410index 195aafc..49a7bc2 100644
84411--- a/include/linux/pnp.h
84412+++ b/include/linux/pnp.h
84413@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84414 struct pnp_fixup {
84415 char id[7];
84416 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84417-};
84418+} __do_const;
84419
84420 /* config parameters */
84421 #define PNP_CONFIG_NORMAL 0x0001
84422diff --git a/include/linux/poison.h b/include/linux/poison.h
84423index 2110a81..13a11bb 100644
84424--- a/include/linux/poison.h
84425+++ b/include/linux/poison.h
84426@@ -19,8 +19,8 @@
84427 * under normal circumstances, used to verify that nobody uses
84428 * non-initialized list entries.
84429 */
84430-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84431-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84432+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84433+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84434
84435 /********** include/linux/timer.h **********/
84436 /*
84437diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84438index d8b187c3..9a9257a 100644
84439--- a/include/linux/power/smartreflex.h
84440+++ b/include/linux/power/smartreflex.h
84441@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84442 int (*notify)(struct omap_sr *sr, u32 status);
84443 u8 notify_flags;
84444 u8 class_type;
84445-};
84446+} __do_const;
84447
84448 /**
84449 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84450diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84451index 4ea1d37..80f4b33 100644
84452--- a/include/linux/ppp-comp.h
84453+++ b/include/linux/ppp-comp.h
84454@@ -84,7 +84,7 @@ struct compressor {
84455 struct module *owner;
84456 /* Extra skb space needed by the compressor algorithm */
84457 unsigned int comp_extra;
84458-};
84459+} __do_const;
84460
84461 /*
84462 * The return value from decompress routine is the length of the
84463diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84464index de83b4e..c4b997d 100644
84465--- a/include/linux/preempt.h
84466+++ b/include/linux/preempt.h
84467@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84468 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84469 #endif
84470
84471+#define raw_preempt_count_add(val) __preempt_count_add(val)
84472+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84473+
84474 #define __preempt_count_inc() __preempt_count_add(1)
84475 #define __preempt_count_dec() __preempt_count_sub(1)
84476
84477 #define preempt_count_inc() preempt_count_add(1)
84478+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84479 #define preempt_count_dec() preempt_count_sub(1)
84480+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84481
84482 #ifdef CONFIG_PREEMPT_COUNT
84483
84484@@ -41,6 +46,12 @@ do { \
84485 barrier(); \
84486 } while (0)
84487
84488+#define raw_preempt_disable() \
84489+do { \
84490+ raw_preempt_count_inc(); \
84491+ barrier(); \
84492+} while (0)
84493+
84494 #define sched_preempt_enable_no_resched() \
84495 do { \
84496 barrier(); \
84497@@ -49,6 +60,12 @@ do { \
84498
84499 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84500
84501+#define raw_preempt_enable_no_resched() \
84502+do { \
84503+ barrier(); \
84504+ raw_preempt_count_dec(); \
84505+} while (0)
84506+
84507 #ifdef CONFIG_PREEMPT
84508 #define preempt_enable() \
84509 do { \
84510@@ -113,8 +130,10 @@ do { \
84511 * region.
84512 */
84513 #define preempt_disable() barrier()
84514+#define raw_preempt_disable() barrier()
84515 #define sched_preempt_enable_no_resched() barrier()
84516 #define preempt_enable_no_resched() barrier()
84517+#define raw_preempt_enable_no_resched() barrier()
84518 #define preempt_enable() barrier()
84519 #define preempt_check_resched() do { } while (0)
84520
84521@@ -128,11 +147,13 @@ do { \
84522 /*
84523 * Modules have no business playing preemption tricks.
84524 */
84525+#ifndef CONFIG_PAX_KERNEXEC
84526 #undef sched_preempt_enable_no_resched
84527 #undef preempt_enable_no_resched
84528 #undef preempt_enable_no_resched_notrace
84529 #undef preempt_check_resched
84530 #endif
84531+#endif
84532
84533 #define preempt_set_need_resched() \
84534 do { \
84535diff --git a/include/linux/printk.h b/include/linux/printk.h
84536index 4d5bf57..d94eccf 100644
84537--- a/include/linux/printk.h
84538+++ b/include/linux/printk.h
84539@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84540 #endif
84541
84542 typedef int(*printk_func_t)(const char *fmt, va_list args);
84543+extern int kptr_restrict;
84544
84545 #ifdef CONFIG_PRINTK
84546 asmlinkage __printf(5, 0)
84547@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84548
84549 extern int printk_delay_msec;
84550 extern int dmesg_restrict;
84551-extern int kptr_restrict;
84552
84553 extern void wake_up_klogd(void);
84554
84555diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84556index b97bf2e..f14c92d4 100644
84557--- a/include/linux/proc_fs.h
84558+++ b/include/linux/proc_fs.h
84559@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84560 extern struct proc_dir_entry *proc_symlink(const char *,
84561 struct proc_dir_entry *, const char *);
84562 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84563+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84564 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84565 struct proc_dir_entry *, void *);
84566+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84567+ struct proc_dir_entry *, void *);
84568 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84569 struct proc_dir_entry *);
84570
84571@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84572 return proc_create_data(name, mode, parent, proc_fops, NULL);
84573 }
84574
84575+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84576+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84577+{
84578+#ifdef CONFIG_GRKERNSEC_PROC_USER
84579+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84580+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84581+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84582+#else
84583+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84584+#endif
84585+}
84586+
84587+
84588 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84589 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84590 extern void *PDE_DATA(const struct inode *);
84591@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84592 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84593 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84594 struct proc_dir_entry *parent) {return NULL;}
84595+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84596+ struct proc_dir_entry *parent) { return NULL; }
84597 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84598 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84599+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84600+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84601 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84602 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84603 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84604@@ -79,7 +99,7 @@ struct net;
84605 static inline struct proc_dir_entry *proc_net_mkdir(
84606 struct net *net, const char *name, struct proc_dir_entry *parent)
84607 {
84608- return proc_mkdir_data(name, 0, parent, net);
84609+ return proc_mkdir_data_restrict(name, 0, parent, net);
84610 }
84611
84612 #endif /* _LINUX_PROC_FS_H */
84613diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84614index 42dfc61..8113a99 100644
84615--- a/include/linux/proc_ns.h
84616+++ b/include/linux/proc_ns.h
84617@@ -16,7 +16,7 @@ struct proc_ns_operations {
84618 struct ns_common *(*get)(struct task_struct *task);
84619 void (*put)(struct ns_common *ns);
84620 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84621-};
84622+} __do_const __randomize_layout;
84623
84624 extern const struct proc_ns_operations netns_operations;
84625 extern const struct proc_ns_operations utsns_operations;
84626diff --git a/include/linux/quota.h b/include/linux/quota.h
84627index b86df49..8002997 100644
84628--- a/include/linux/quota.h
84629+++ b/include/linux/quota.h
84630@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84631
84632 extern bool qid_eq(struct kqid left, struct kqid right);
84633 extern bool qid_lt(struct kqid left, struct kqid right);
84634-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84635+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84636 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84637 extern bool qid_valid(struct kqid qid);
84638
84639diff --git a/include/linux/random.h b/include/linux/random.h
84640index b05856e..0a9f14e 100644
84641--- a/include/linux/random.h
84642+++ b/include/linux/random.h
84643@@ -9,9 +9,19 @@
84644 #include <uapi/linux/random.h>
84645
84646 extern void add_device_randomness(const void *, unsigned int);
84647+
84648+static inline void add_latent_entropy(void)
84649+{
84650+
84651+#ifdef LATENT_ENTROPY_PLUGIN
84652+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84653+#endif
84654+
84655+}
84656+
84657 extern void add_input_randomness(unsigned int type, unsigned int code,
84658- unsigned int value);
84659-extern void add_interrupt_randomness(int irq, int irq_flags);
84660+ unsigned int value) __latent_entropy;
84661+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84662
84663 extern void get_random_bytes(void *buf, int nbytes);
84664 extern void get_random_bytes_arch(void *buf, int nbytes);
84665@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84666 extern const struct file_operations random_fops, urandom_fops;
84667 #endif
84668
84669-unsigned int get_random_int(void);
84670+unsigned int __intentional_overflow(-1) get_random_int(void);
84671 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84672
84673-u32 prandom_u32(void);
84674+u32 prandom_u32(void) __intentional_overflow(-1);
84675 void prandom_bytes(void *buf, size_t nbytes);
84676 void prandom_seed(u32 seed);
84677 void prandom_reseed_late(void);
84678@@ -37,6 +47,11 @@ struct rnd_state {
84679 u32 prandom_u32_state(struct rnd_state *state);
84680 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84681
84682+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84683+{
84684+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84685+}
84686+
84687 /**
84688 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84689 * @ep_ro: right open interval endpoint
84690@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84691 *
84692 * Returns: pseudo-random number in interval [0, ep_ro)
84693 */
84694-static inline u32 prandom_u32_max(u32 ep_ro)
84695+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84696 {
84697 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84698 }
84699diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84700index 378c5ee..aa84a47 100644
84701--- a/include/linux/rbtree_augmented.h
84702+++ b/include/linux/rbtree_augmented.h
84703@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84704 old->rbaugmented = rbcompute(old); \
84705 } \
84706 rbstatic const struct rb_augment_callbacks rbname = { \
84707- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84708+ .propagate = rbname ## _propagate, \
84709+ .copy = rbname ## _copy, \
84710+ .rotate = rbname ## _rotate \
84711 };
84712
84713
84714diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84715index 529bc94..82ce778 100644
84716--- a/include/linux/rculist.h
84717+++ b/include/linux/rculist.h
84718@@ -29,8 +29,8 @@
84719 */
84720 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84721 {
84722- ACCESS_ONCE(list->next) = list;
84723- ACCESS_ONCE(list->prev) = list;
84724+ ACCESS_ONCE_RW(list->next) = list;
84725+ ACCESS_ONCE_RW(list->prev) = list;
84726 }
84727
84728 /*
84729@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84730 struct list_head *prev, struct list_head *next);
84731 #endif
84732
84733+void __pax_list_add_rcu(struct list_head *new,
84734+ struct list_head *prev, struct list_head *next);
84735+
84736 /**
84737 * list_add_rcu - add a new entry to rcu-protected list
84738 * @new: new entry to be added
84739@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84740 __list_add_rcu(new, head, head->next);
84741 }
84742
84743+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84744+{
84745+ __pax_list_add_rcu(new, head, head->next);
84746+}
84747+
84748 /**
84749 * list_add_tail_rcu - add a new entry to rcu-protected list
84750 * @new: new entry to be added
84751@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84752 __list_add_rcu(new, head->prev, head);
84753 }
84754
84755+static inline void pax_list_add_tail_rcu(struct list_head *new,
84756+ struct list_head *head)
84757+{
84758+ __pax_list_add_rcu(new, head->prev, head);
84759+}
84760+
84761 /**
84762 * list_del_rcu - deletes entry from list without re-initialization
84763 * @entry: the element to delete from the list.
84764@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84765 entry->prev = LIST_POISON2;
84766 }
84767
84768+extern void pax_list_del_rcu(struct list_head *entry);
84769+
84770 /**
84771 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84772 * @n: the element to delete from the hash list.
84773diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84774index ed4f593..8a51501 100644
84775--- a/include/linux/rcupdate.h
84776+++ b/include/linux/rcupdate.h
84777@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84778 #define rcu_note_voluntary_context_switch(t) \
84779 do { \
84780 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84781- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84782+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84783 } while (0)
84784 #else /* #ifdef CONFIG_TASKS_RCU */
84785 #define TASKS_RCU(x) do { } while (0)
84786diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84787index 67fc8fc..a90f7d8 100644
84788--- a/include/linux/reboot.h
84789+++ b/include/linux/reboot.h
84790@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84791 */
84792
84793 extern void migrate_to_reboot_cpu(void);
84794-extern void machine_restart(char *cmd);
84795-extern void machine_halt(void);
84796-extern void machine_power_off(void);
84797+extern void machine_restart(char *cmd) __noreturn;
84798+extern void machine_halt(void) __noreturn;
84799+extern void machine_power_off(void) __noreturn;
84800
84801 extern void machine_shutdown(void);
84802 struct pt_regs;
84803@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84804 */
84805
84806 extern void kernel_restart_prepare(char *cmd);
84807-extern void kernel_restart(char *cmd);
84808-extern void kernel_halt(void);
84809-extern void kernel_power_off(void);
84810+extern void kernel_restart(char *cmd) __noreturn;
84811+extern void kernel_halt(void) __noreturn;
84812+extern void kernel_power_off(void) __noreturn;
84813
84814 extern int C_A_D; /* for sysctl */
84815 void ctrl_alt_del(void);
84816@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84817 * Emergency restart, callable from an interrupt handler.
84818 */
84819
84820-extern void emergency_restart(void);
84821+extern void emergency_restart(void) __noreturn;
84822 #include <asm/emergency-restart.h>
84823
84824 #endif /* _LINUX_REBOOT_H */
84825diff --git a/include/linux/regset.h b/include/linux/regset.h
84826index 8e0c9fe..ac4d221 100644
84827--- a/include/linux/regset.h
84828+++ b/include/linux/regset.h
84829@@ -161,7 +161,8 @@ struct user_regset {
84830 unsigned int align;
84831 unsigned int bias;
84832 unsigned int core_note_type;
84833-};
84834+} __do_const;
84835+typedef struct user_regset __no_const user_regset_no_const;
84836
84837 /**
84838 * struct user_regset_view - available regsets
84839diff --git a/include/linux/relay.h b/include/linux/relay.h
84840index d7c8359..818daf5 100644
84841--- a/include/linux/relay.h
84842+++ b/include/linux/relay.h
84843@@ -157,7 +157,7 @@ struct rchan_callbacks
84844 * The callback should return 0 if successful, negative if not.
84845 */
84846 int (*remove_buf_file)(struct dentry *dentry);
84847-};
84848+} __no_const;
84849
84850 /*
84851 * CONFIG_RELAY kernel API, kernel/relay.c
84852diff --git a/include/linux/rio.h b/include/linux/rio.h
84853index 6bda06f..bf39a9b 100644
84854--- a/include/linux/rio.h
84855+++ b/include/linux/rio.h
84856@@ -358,7 +358,7 @@ struct rio_ops {
84857 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84858 u64 rstart, u32 size, u32 flags);
84859 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84860-};
84861+} __no_const;
84862
84863 #define RIO_RESOURCE_MEM 0x00000100
84864 #define RIO_RESOURCE_DOORBELL 0x00000200
84865diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84866index d9d7e7e..86f47ac 100644
84867--- a/include/linux/rmap.h
84868+++ b/include/linux/rmap.h
84869@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84870 void anon_vma_init(void); /* create anon_vma_cachep */
84871 int anon_vma_prepare(struct vm_area_struct *);
84872 void unlink_anon_vmas(struct vm_area_struct *);
84873-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84874-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84875+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84876+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84877
84878 static inline void anon_vma_merge(struct vm_area_struct *vma,
84879 struct vm_area_struct *next)
84880diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84881index ed8f9e7..999bc96 100644
84882--- a/include/linux/scatterlist.h
84883+++ b/include/linux/scatterlist.h
84884@@ -1,6 +1,7 @@
84885 #ifndef _LINUX_SCATTERLIST_H
84886 #define _LINUX_SCATTERLIST_H
84887
84888+#include <linux/sched.h>
84889 #include <linux/string.h>
84890 #include <linux/bug.h>
84891 #include <linux/mm.h>
84892@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84893 #ifdef CONFIG_DEBUG_SG
84894 BUG_ON(!virt_addr_valid(buf));
84895 #endif
84896+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84897+ if (object_starts_on_stack(buf)) {
84898+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84899+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84900+ } else
84901+#endif
84902 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84903 }
84904
84905diff --git a/include/linux/sched.h b/include/linux/sched.h
84906index 8db31ef..0af1f81 100644
84907--- a/include/linux/sched.h
84908+++ b/include/linux/sched.h
84909@@ -133,6 +133,7 @@ struct fs_struct;
84910 struct perf_event_context;
84911 struct blk_plug;
84912 struct filename;
84913+struct linux_binprm;
84914
84915 #define VMACACHE_BITS 2
84916 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84917@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84918 extern int in_sched_functions(unsigned long addr);
84919
84920 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84921-extern signed long schedule_timeout(signed long timeout);
84922+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84923 extern signed long schedule_timeout_interruptible(signed long timeout);
84924 extern signed long schedule_timeout_killable(signed long timeout);
84925 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84926@@ -426,6 +427,19 @@ struct nsproxy;
84927 struct user_namespace;
84928
84929 #ifdef CONFIG_MMU
84930+
84931+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84932+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84933+#else
84934+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84935+{
84936+ return 0;
84937+}
84938+#endif
84939+
84940+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84941+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84942+
84943 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84944 extern unsigned long
84945 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84946@@ -724,6 +738,17 @@ struct signal_struct {
84947 #ifdef CONFIG_TASKSTATS
84948 struct taskstats *stats;
84949 #endif
84950+
84951+#ifdef CONFIG_GRKERNSEC
84952+ u32 curr_ip;
84953+ u32 saved_ip;
84954+ u32 gr_saddr;
84955+ u32 gr_daddr;
84956+ u16 gr_sport;
84957+ u16 gr_dport;
84958+ u8 used_accept:1;
84959+#endif
84960+
84961 #ifdef CONFIG_AUDIT
84962 unsigned audit_tty;
84963 unsigned audit_tty_log_passwd;
84964@@ -750,7 +775,7 @@ struct signal_struct {
84965 struct mutex cred_guard_mutex; /* guard against foreign influences on
84966 * credential calculations
84967 * (notably. ptrace) */
84968-};
84969+} __randomize_layout;
84970
84971 /*
84972 * Bits in flags field of signal_struct.
84973@@ -803,6 +828,14 @@ struct user_struct {
84974 struct key *session_keyring; /* UID's default session keyring */
84975 #endif
84976
84977+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84978+ unsigned char kernel_banned;
84979+#endif
84980+#ifdef CONFIG_GRKERNSEC_BRUTE
84981+ unsigned char suid_banned;
84982+ unsigned long suid_ban_expires;
84983+#endif
84984+
84985 /* Hash table maintenance information */
84986 struct hlist_node uidhash_node;
84987 kuid_t uid;
84988@@ -810,7 +843,7 @@ struct user_struct {
84989 #ifdef CONFIG_PERF_EVENTS
84990 atomic_long_t locked_vm;
84991 #endif
84992-};
84993+} __randomize_layout;
84994
84995 extern int uids_sysfs_init(void);
84996
84997@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84998 struct task_struct {
84999 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
85000 void *stack;
85001+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85002+ void *lowmem_stack;
85003+#endif
85004 atomic_t usage;
85005 unsigned int flags; /* per process flags, defined below */
85006 unsigned int ptrace;
85007@@ -1405,8 +1441,8 @@ struct task_struct {
85008 struct list_head thread_node;
85009
85010 struct completion *vfork_done; /* for vfork() */
85011- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
85012- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85013+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
85014+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
85015
85016 cputime_t utime, stime, utimescaled, stimescaled;
85017 cputime_t gtime;
85018@@ -1431,11 +1467,6 @@ struct task_struct {
85019 struct task_cputime cputime_expires;
85020 struct list_head cpu_timers[3];
85021
85022-/* process credentials */
85023- const struct cred __rcu *real_cred; /* objective and real subjective task
85024- * credentials (COW) */
85025- const struct cred __rcu *cred; /* effective (overridable) subjective task
85026- * credentials (COW) */
85027 char comm[TASK_COMM_LEN]; /* executable name excluding path
85028 - access with [gs]et_task_comm (which lock
85029 it with task_lock())
85030@@ -1453,6 +1484,10 @@ struct task_struct {
85031 #endif
85032 /* CPU-specific state of this task */
85033 struct thread_struct thread;
85034+/* thread_info moved to task_struct */
85035+#ifdef CONFIG_X86
85036+ struct thread_info tinfo;
85037+#endif
85038 /* filesystem information */
85039 struct fs_struct *fs;
85040 /* open file information */
85041@@ -1527,6 +1562,10 @@ struct task_struct {
85042 gfp_t lockdep_reclaim_gfp;
85043 #endif
85044
85045+/* process credentials */
85046+ const struct cred __rcu *real_cred; /* objective and real subjective task
85047+ * credentials (COW) */
85048+
85049 /* journalling filesystem info */
85050 void *journal_info;
85051
85052@@ -1565,6 +1604,10 @@ struct task_struct {
85053 /* cg_list protected by css_set_lock and tsk->alloc_lock */
85054 struct list_head cg_list;
85055 #endif
85056+
85057+ const struct cred __rcu *cred; /* effective (overridable) subjective task
85058+ * credentials (COW) */
85059+
85060 #ifdef CONFIG_FUTEX
85061 struct robust_list_head __user *robust_list;
85062 #ifdef CONFIG_COMPAT
85063@@ -1673,7 +1716,7 @@ struct task_struct {
85064 * Number of functions that haven't been traced
85065 * because of depth overrun.
85066 */
85067- atomic_t trace_overrun;
85068+ atomic_unchecked_t trace_overrun;
85069 /* Pause for the tracing */
85070 atomic_t tracing_graph_pause;
85071 #endif
85072@@ -1701,7 +1744,78 @@ struct task_struct {
85073 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
85074 unsigned long task_state_change;
85075 #endif
85076-};
85077+
85078+#ifdef CONFIG_GRKERNSEC
85079+ /* grsecurity */
85080+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85081+ u64 exec_id;
85082+#endif
85083+#ifdef CONFIG_GRKERNSEC_SETXID
85084+ const struct cred *delayed_cred;
85085+#endif
85086+ struct dentry *gr_chroot_dentry;
85087+ struct acl_subject_label *acl;
85088+ struct acl_subject_label *tmpacl;
85089+ struct acl_role_label *role;
85090+ struct file *exec_file;
85091+ unsigned long brute_expires;
85092+ u16 acl_role_id;
85093+ u8 inherited;
85094+ /* is this the task that authenticated to the special role */
85095+ u8 acl_sp_role;
85096+ u8 is_writable;
85097+ u8 brute;
85098+ u8 gr_is_chrooted;
85099+#endif
85100+
85101+} __randomize_layout;
85102+
85103+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
85104+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
85105+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
85106+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
85107+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
85108+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
85109+
85110+#ifdef CONFIG_PAX_SOFTMODE
85111+extern int pax_softmode;
85112+#endif
85113+
85114+extern int pax_check_flags(unsigned long *);
85115+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
85116+
85117+/* if tsk != current then task_lock must be held on it */
85118+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
85119+static inline unsigned long pax_get_flags(struct task_struct *tsk)
85120+{
85121+ if (likely(tsk->mm))
85122+ return tsk->mm->pax_flags;
85123+ else
85124+ return 0UL;
85125+}
85126+
85127+/* if tsk != current then task_lock must be held on it */
85128+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
85129+{
85130+ if (likely(tsk->mm)) {
85131+ tsk->mm->pax_flags = flags;
85132+ return 0;
85133+ }
85134+ return -EINVAL;
85135+}
85136+#endif
85137+
85138+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
85139+extern void pax_set_initial_flags(struct linux_binprm *bprm);
85140+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
85141+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
85142+#endif
85143+
85144+struct path;
85145+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85146+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85147+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85148+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85149
85150 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85151 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85152@@ -1783,7 +1897,7 @@ struct pid_namespace;
85153 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85154 struct pid_namespace *ns);
85155
85156-static inline pid_t task_pid_nr(struct task_struct *tsk)
85157+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85158 {
85159 return tsk->pid;
85160 }
85161@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
85162
85163 extern void sched_clock_init(void);
85164
85165+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85166+static inline void populate_stack(void)
85167+{
85168+ struct task_struct *curtask = current;
85169+ int c;
85170+ int *ptr = curtask->stack;
85171+ int *end = curtask->stack + THREAD_SIZE;
85172+
85173+ while (ptr < end) {
85174+ c = *(volatile int *)ptr;
85175+ ptr += PAGE_SIZE/sizeof(int);
85176+ }
85177+}
85178+#else
85179+static inline void populate_stack(void)
85180+{
85181+}
85182+#endif
85183+
85184 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85185 static inline void sched_clock_tick(void)
85186 {
85187@@ -2283,7 +2416,9 @@ void yield(void);
85188 extern struct exec_domain default_exec_domain;
85189
85190 union thread_union {
85191+#ifndef CONFIG_X86
85192 struct thread_info thread_info;
85193+#endif
85194 unsigned long stack[THREAD_SIZE/sizeof(long)];
85195 };
85196
85197@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
85198 */
85199
85200 extern struct task_struct *find_task_by_vpid(pid_t nr);
85201+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85202 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85203 struct pid_namespace *ns);
85204
85205@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85206 extern void exit_itimers(struct signal_struct *);
85207 extern void flush_itimer_signals(void);
85208
85209-extern void do_group_exit(int);
85210+extern __noreturn void do_group_exit(int);
85211
85212 extern int do_execve(struct filename *,
85213 const char __user * const __user *,
85214@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85215 #define task_stack_end_corrupted(task) \
85216 (*(end_of_stack(task)) != STACK_END_MAGIC)
85217
85218-static inline int object_is_on_stack(void *obj)
85219+static inline int object_starts_on_stack(const void *obj)
85220 {
85221- void *stack = task_stack_page(current);
85222+ const void *stack = task_stack_page(current);
85223
85224 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85225 }
85226diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85227index 596a0e0..bea77ec 100644
85228--- a/include/linux/sched/sysctl.h
85229+++ b/include/linux/sched/sysctl.h
85230@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85231 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85232
85233 extern int sysctl_max_map_count;
85234+extern unsigned long sysctl_heap_stack_gap;
85235
85236 extern unsigned int sysctl_sched_latency;
85237 extern unsigned int sysctl_sched_min_granularity;
85238diff --git a/include/linux/security.h b/include/linux/security.h
85239index ba96471..74fb3f6 100644
85240--- a/include/linux/security.h
85241+++ b/include/linux/security.h
85242@@ -27,6 +27,7 @@
85243 #include <linux/slab.h>
85244 #include <linux/err.h>
85245 #include <linux/string.h>
85246+#include <linux/grsecurity.h>
85247
85248 struct linux_binprm;
85249 struct cred;
85250@@ -116,8 +117,6 @@ struct seq_file;
85251
85252 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85253
85254-void reset_security_ops(void);
85255-
85256 #ifdef CONFIG_MMU
85257 extern unsigned long mmap_min_addr;
85258 extern unsigned long dac_mmap_min_addr;
85259@@ -1729,7 +1728,7 @@ struct security_operations {
85260 struct audit_context *actx);
85261 void (*audit_rule_free) (void *lsmrule);
85262 #endif /* CONFIG_AUDIT */
85263-};
85264+} __randomize_layout;
85265
85266 /* prototypes */
85267 extern int security_init(void);
85268diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85269index dc368b8..e895209 100644
85270--- a/include/linux/semaphore.h
85271+++ b/include/linux/semaphore.h
85272@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85273 }
85274
85275 extern void down(struct semaphore *sem);
85276-extern int __must_check down_interruptible(struct semaphore *sem);
85277+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85278 extern int __must_check down_killable(struct semaphore *sem);
85279 extern int __must_check down_trylock(struct semaphore *sem);
85280 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85281diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85282index cf6a9da..bd86b1f 100644
85283--- a/include/linux/seq_file.h
85284+++ b/include/linux/seq_file.h
85285@@ -27,6 +27,9 @@ struct seq_file {
85286 struct mutex lock;
85287 const struct seq_operations *op;
85288 int poll_event;
85289+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85290+ u64 exec_id;
85291+#endif
85292 #ifdef CONFIG_USER_NS
85293 struct user_namespace *user_ns;
85294 #endif
85295@@ -39,6 +42,7 @@ struct seq_operations {
85296 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85297 int (*show) (struct seq_file *m, void *v);
85298 };
85299+typedef struct seq_operations __no_const seq_operations_no_const;
85300
85301 #define SEQ_SKIP 1
85302
85303@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85304
85305 char *mangle_path(char *s, const char *p, const char *esc);
85306 int seq_open(struct file *, const struct seq_operations *);
85307+int seq_open_restrict(struct file *, const struct seq_operations *);
85308 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85309 loff_t seq_lseek(struct file *, loff_t, int);
85310 int seq_release(struct inode *, struct file *);
85311@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85312 }
85313
85314 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85315+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85316 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85317 int single_release(struct inode *, struct file *);
85318 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85319diff --git a/include/linux/shm.h b/include/linux/shm.h
85320index 6fb8016..ab4465e 100644
85321--- a/include/linux/shm.h
85322+++ b/include/linux/shm.h
85323@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85324 /* The task created the shm object. NULL if the task is dead. */
85325 struct task_struct *shm_creator;
85326 struct list_head shm_clist; /* list by creator */
85327+#ifdef CONFIG_GRKERNSEC
85328+ u64 shm_createtime;
85329+ pid_t shm_lapid;
85330+#endif
85331 };
85332
85333 /* shm_mode upper byte flags */
85334diff --git a/include/linux/signal.h b/include/linux/signal.h
85335index ab1e039..ad4229e 100644
85336--- a/include/linux/signal.h
85337+++ b/include/linux/signal.h
85338@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85339 * know it'll be handled, so that they don't get converted to
85340 * SIGKILL or just silently dropped.
85341 */
85342- kernel_sigaction(sig, (__force __sighandler_t)2);
85343+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85344 }
85345
85346 static inline void disallow_signal(int sig)
85347diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85348index 85ab7d7..eb1585a 100644
85349--- a/include/linux/skbuff.h
85350+++ b/include/linux/skbuff.h
85351@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85352 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85353 int node);
85354 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85355-static inline struct sk_buff *alloc_skb(unsigned int size,
85356+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85357 gfp_t priority)
85358 {
85359 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85360@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85361 return skb->inner_transport_header - skb->inner_network_header;
85362 }
85363
85364-static inline int skb_network_offset(const struct sk_buff *skb)
85365+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85366 {
85367 return skb_network_header(skb) - skb->data;
85368 }
85369@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85370 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85371 */
85372 #ifndef NET_SKB_PAD
85373-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85374+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85375 #endif
85376
85377 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85378@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85379 int *err);
85380 unsigned int datagram_poll(struct file *file, struct socket *sock,
85381 struct poll_table_struct *wait);
85382-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85383+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85384 struct iov_iter *to, int size);
85385-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85386+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85387 struct msghdr *msg, int size)
85388 {
85389 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85390@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85391 nf_bridge_put(skb->nf_bridge);
85392 skb->nf_bridge = NULL;
85393 #endif
85394+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85395+ skb->nf_trace = 0;
85396+#endif
85397 }
85398
85399 static inline void nf_reset_trace(struct sk_buff *skb)
85400diff --git a/include/linux/slab.h b/include/linux/slab.h
85401index 9a139b6..aab37b4 100644
85402--- a/include/linux/slab.h
85403+++ b/include/linux/slab.h
85404@@ -14,15 +14,29 @@
85405 #include <linux/gfp.h>
85406 #include <linux/types.h>
85407 #include <linux/workqueue.h>
85408-
85409+#include <linux/err.h>
85410
85411 /*
85412 * Flags to pass to kmem_cache_create().
85413 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85414 */
85415 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85416+
85417+#ifdef CONFIG_PAX_USERCOPY_SLABS
85418+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85419+#else
85420+#define SLAB_USERCOPY 0x00000000UL
85421+#endif
85422+
85423 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85424 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85425+
85426+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85427+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85428+#else
85429+#define SLAB_NO_SANITIZE 0x00000000UL
85430+#endif
85431+
85432 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85433 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85434 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85435@@ -98,10 +112,13 @@
85436 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85437 * Both make kfree a no-op.
85438 */
85439-#define ZERO_SIZE_PTR ((void *)16)
85440+#define ZERO_SIZE_PTR \
85441+({ \
85442+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85443+ (void *)(-MAX_ERRNO-1L); \
85444+})
85445
85446-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85447- (unsigned long)ZERO_SIZE_PTR)
85448+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85449
85450 #include <linux/kmemleak.h>
85451
85452@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85453 void kfree(const void *);
85454 void kzfree(const void *);
85455 size_t ksize(const void *);
85456+const char *check_heap_object(const void *ptr, unsigned long n);
85457+bool is_usercopy_object(const void *ptr);
85458
85459 /*
85460 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85461@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85462 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85463 #endif
85464
85465+#ifdef CONFIG_PAX_USERCOPY_SLABS
85466+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85467+#endif
85468+
85469 /*
85470 * Figure out which kmalloc slab an allocation of a certain size
85471 * belongs to.
85472@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85473 * 2 = 120 .. 192 bytes
85474 * n = 2^(n-1) .. 2^n -1
85475 */
85476-static __always_inline int kmalloc_index(size_t size)
85477+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85478 {
85479 if (!size)
85480 return 0;
85481@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85482 }
85483 #endif /* !CONFIG_SLOB */
85484
85485-void *__kmalloc(size_t size, gfp_t flags);
85486+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85487 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85488
85489 #ifdef CONFIG_NUMA
85490-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85491+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85492 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85493 #else
85494-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85495+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85496 {
85497 return __kmalloc(size, flags);
85498 }
85499diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85500index b869d16..1453c73 100644
85501--- a/include/linux/slab_def.h
85502+++ b/include/linux/slab_def.h
85503@@ -40,7 +40,7 @@ struct kmem_cache {
85504 /* 4) cache creation/removal */
85505 const char *name;
85506 struct list_head list;
85507- int refcount;
85508+ atomic_t refcount;
85509 int object_size;
85510 int align;
85511
85512@@ -56,10 +56,14 @@ struct kmem_cache {
85513 unsigned long node_allocs;
85514 unsigned long node_frees;
85515 unsigned long node_overflow;
85516- atomic_t allochit;
85517- atomic_t allocmiss;
85518- atomic_t freehit;
85519- atomic_t freemiss;
85520+ atomic_unchecked_t allochit;
85521+ atomic_unchecked_t allocmiss;
85522+ atomic_unchecked_t freehit;
85523+ atomic_unchecked_t freemiss;
85524+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85525+ atomic_unchecked_t sanitized;
85526+ atomic_unchecked_t not_sanitized;
85527+#endif
85528
85529 /*
85530 * If debugging is enabled, then the allocator can add additional
85531diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85532index d82abd4..408c3a0 100644
85533--- a/include/linux/slub_def.h
85534+++ b/include/linux/slub_def.h
85535@@ -74,7 +74,7 @@ struct kmem_cache {
85536 struct kmem_cache_order_objects max;
85537 struct kmem_cache_order_objects min;
85538 gfp_t allocflags; /* gfp flags to use on each alloc */
85539- int refcount; /* Refcount for slab cache destroy */
85540+ atomic_t refcount; /* Refcount for slab cache destroy */
85541 void (*ctor)(void *);
85542 int inuse; /* Offset to metadata */
85543 int align; /* Alignment */
85544diff --git a/include/linux/smp.h b/include/linux/smp.h
85545index 93dff5f..933c561 100644
85546--- a/include/linux/smp.h
85547+++ b/include/linux/smp.h
85548@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85549 #endif
85550
85551 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85552+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85553 #define put_cpu() preempt_enable()
85554+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85555
85556 /*
85557 * Callback to arch code if there's nosmp or maxcpus=0 on the
85558diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85559index 46cca4c..3323536 100644
85560--- a/include/linux/sock_diag.h
85561+++ b/include/linux/sock_diag.h
85562@@ -11,7 +11,7 @@ struct sock;
85563 struct sock_diag_handler {
85564 __u8 family;
85565 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85566-};
85567+} __do_const;
85568
85569 int sock_diag_register(const struct sock_diag_handler *h);
85570 void sock_diag_unregister(const struct sock_diag_handler *h);
85571diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85572index 680f9a3..f13aeb0 100644
85573--- a/include/linux/sonet.h
85574+++ b/include/linux/sonet.h
85575@@ -7,7 +7,7 @@
85576 #include <uapi/linux/sonet.h>
85577
85578 struct k_sonet_stats {
85579-#define __HANDLE_ITEM(i) atomic_t i
85580+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85581 __SONET_ITEMS
85582 #undef __HANDLE_ITEM
85583 };
85584diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85585index 07d8e53..dc934c9 100644
85586--- a/include/linux/sunrpc/addr.h
85587+++ b/include/linux/sunrpc/addr.h
85588@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85589 {
85590 switch (sap->sa_family) {
85591 case AF_INET:
85592- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85593+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85594 case AF_INET6:
85595- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85596+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85597 }
85598 return 0;
85599 }
85600@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85601 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85602 const struct sockaddr *src)
85603 {
85604- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85605+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85606 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85607
85608 dsin->sin_family = ssin->sin_family;
85609@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85610 if (sa->sa_family != AF_INET6)
85611 return 0;
85612
85613- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85614+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85615 }
85616
85617 #endif /* _LINUX_SUNRPC_ADDR_H */
85618diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85619index 598ba80..d90cba6 100644
85620--- a/include/linux/sunrpc/clnt.h
85621+++ b/include/linux/sunrpc/clnt.h
85622@@ -100,7 +100,7 @@ struct rpc_procinfo {
85623 unsigned int p_timer; /* Which RTT timer to use */
85624 u32 p_statidx; /* Which procedure to account */
85625 const char * p_name; /* name of procedure */
85626-};
85627+} __do_const;
85628
85629 #ifdef __KERNEL__
85630
85631diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85632index 6f22cfe..9fd0909 100644
85633--- a/include/linux/sunrpc/svc.h
85634+++ b/include/linux/sunrpc/svc.h
85635@@ -420,7 +420,7 @@ struct svc_procedure {
85636 unsigned int pc_count; /* call count */
85637 unsigned int pc_cachetype; /* cache info (NFS) */
85638 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85639-};
85640+} __do_const;
85641
85642 /*
85643 * Function prototypes.
85644diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85645index 975da75..318c083 100644
85646--- a/include/linux/sunrpc/svc_rdma.h
85647+++ b/include/linux/sunrpc/svc_rdma.h
85648@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85649 extern unsigned int svcrdma_max_requests;
85650 extern unsigned int svcrdma_max_req_size;
85651
85652-extern atomic_t rdma_stat_recv;
85653-extern atomic_t rdma_stat_read;
85654-extern atomic_t rdma_stat_write;
85655-extern atomic_t rdma_stat_sq_starve;
85656-extern atomic_t rdma_stat_rq_starve;
85657-extern atomic_t rdma_stat_rq_poll;
85658-extern atomic_t rdma_stat_rq_prod;
85659-extern atomic_t rdma_stat_sq_poll;
85660-extern atomic_t rdma_stat_sq_prod;
85661+extern atomic_unchecked_t rdma_stat_recv;
85662+extern atomic_unchecked_t rdma_stat_read;
85663+extern atomic_unchecked_t rdma_stat_write;
85664+extern atomic_unchecked_t rdma_stat_sq_starve;
85665+extern atomic_unchecked_t rdma_stat_rq_starve;
85666+extern atomic_unchecked_t rdma_stat_rq_poll;
85667+extern atomic_unchecked_t rdma_stat_rq_prod;
85668+extern atomic_unchecked_t rdma_stat_sq_poll;
85669+extern atomic_unchecked_t rdma_stat_sq_prod;
85670
85671 #define RPCRDMA_VERSION 1
85672
85673diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85674index 8d71d65..f79586e 100644
85675--- a/include/linux/sunrpc/svcauth.h
85676+++ b/include/linux/sunrpc/svcauth.h
85677@@ -120,7 +120,7 @@ struct auth_ops {
85678 int (*release)(struct svc_rqst *rq);
85679 void (*domain_release)(struct auth_domain *);
85680 int (*set_client)(struct svc_rqst *rq);
85681-};
85682+} __do_const;
85683
85684 #define SVC_GARBAGE 1
85685 #define SVC_SYSERR 2
85686diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85687index e7a018e..49f8b17 100644
85688--- a/include/linux/swiotlb.h
85689+++ b/include/linux/swiotlb.h
85690@@ -60,7 +60,8 @@ extern void
85691
85692 extern void
85693 swiotlb_free_coherent(struct device *hwdev, size_t size,
85694- void *vaddr, dma_addr_t dma_handle);
85695+ void *vaddr, dma_addr_t dma_handle,
85696+ struct dma_attrs *attrs);
85697
85698 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85699 unsigned long offset, size_t size,
85700diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85701index 85893d7..e78c660 100644
85702--- a/include/linux/syscalls.h
85703+++ b/include/linux/syscalls.h
85704@@ -102,7 +102,12 @@ union bpf_attr;
85705 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85706 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85707 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85708-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85709+#define __SC_LONG(t, a) __typeof__( \
85710+ __builtin_choose_expr( \
85711+ sizeof(t) > sizeof(int), \
85712+ (t) 0, \
85713+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
85714+ )) a
85715 #define __SC_CAST(t, a) (t) a
85716 #define __SC_ARGS(t, a) a
85717 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85718@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
85719 asmlinkage long sys_fsync(unsigned int fd);
85720 asmlinkage long sys_fdatasync(unsigned int fd);
85721 asmlinkage long sys_bdflush(int func, long data);
85722-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85723- char __user *type, unsigned long flags,
85724+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85725+ const char __user *type, unsigned long flags,
85726 void __user *data);
85727-asmlinkage long sys_umount(char __user *name, int flags);
85728-asmlinkage long sys_oldumount(char __user *name);
85729+asmlinkage long sys_umount(const char __user *name, int flags);
85730+asmlinkage long sys_oldumount(const char __user *name);
85731 asmlinkage long sys_truncate(const char __user *path, long length);
85732 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85733 asmlinkage long sys_stat(const char __user *filename,
85734@@ -600,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85735 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85736 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85737 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85738- struct sockaddr __user *, int);
85739+ struct sockaddr __user *, int) __intentional_overflow(0);
85740 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85741 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85742 unsigned int vlen, unsigned flags);
85743diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85744index 27b3b0b..e093dd9 100644
85745--- a/include/linux/syscore_ops.h
85746+++ b/include/linux/syscore_ops.h
85747@@ -16,7 +16,7 @@ struct syscore_ops {
85748 int (*suspend)(void);
85749 void (*resume)(void);
85750 void (*shutdown)(void);
85751-};
85752+} __do_const;
85753
85754 extern void register_syscore_ops(struct syscore_ops *ops);
85755 extern void unregister_syscore_ops(struct syscore_ops *ops);
85756diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85757index b7361f8..341a15a 100644
85758--- a/include/linux/sysctl.h
85759+++ b/include/linux/sysctl.h
85760@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85761
85762 extern int proc_dostring(struct ctl_table *, int,
85763 void __user *, size_t *, loff_t *);
85764+extern int proc_dostring_modpriv(struct ctl_table *, int,
85765+ void __user *, size_t *, loff_t *);
85766 extern int proc_dointvec(struct ctl_table *, int,
85767 void __user *, size_t *, loff_t *);
85768 extern int proc_dointvec_minmax(struct ctl_table *, int,
85769@@ -113,7 +115,8 @@ struct ctl_table
85770 struct ctl_table_poll *poll;
85771 void *extra1;
85772 void *extra2;
85773-};
85774+} __do_const __randomize_layout;
85775+typedef struct ctl_table __no_const ctl_table_no_const;
85776
85777 struct ctl_node {
85778 struct rb_node node;
85779diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85780index ddad161..a3efd26 100644
85781--- a/include/linux/sysfs.h
85782+++ b/include/linux/sysfs.h
85783@@ -34,7 +34,8 @@ struct attribute {
85784 struct lock_class_key *key;
85785 struct lock_class_key skey;
85786 #endif
85787-};
85788+} __do_const;
85789+typedef struct attribute __no_const attribute_no_const;
85790
85791 /**
85792 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85793@@ -63,7 +64,8 @@ struct attribute_group {
85794 struct attribute *, int);
85795 struct attribute **attrs;
85796 struct bin_attribute **bin_attrs;
85797-};
85798+} __do_const;
85799+typedef struct attribute_group __no_const attribute_group_no_const;
85800
85801 /**
85802 * Use these macros to make defining attributes easier. See include/linux/device.h
85803@@ -137,7 +139,8 @@ struct bin_attribute {
85804 char *, loff_t, size_t);
85805 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85806 struct vm_area_struct *vma);
85807-};
85808+} __do_const;
85809+typedef struct bin_attribute __no_const bin_attribute_no_const;
85810
85811 /**
85812 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85813diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85814index 387fa7d..3fcde6b 100644
85815--- a/include/linux/sysrq.h
85816+++ b/include/linux/sysrq.h
85817@@ -16,6 +16,7 @@
85818
85819 #include <linux/errno.h>
85820 #include <linux/types.h>
85821+#include <linux/compiler.h>
85822
85823 /* Possible values of bitmask for enabling sysrq functions */
85824 /* 0x0001 is reserved for enable everything */
85825@@ -33,7 +34,7 @@ struct sysrq_key_op {
85826 char *help_msg;
85827 char *action_msg;
85828 int enable_mask;
85829-};
85830+} __do_const;
85831
85832 #ifdef CONFIG_MAGIC_SYSRQ
85833
85834diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85835index ff307b5..f1a4468 100644
85836--- a/include/linux/thread_info.h
85837+++ b/include/linux/thread_info.h
85838@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85839 #error "no set_restore_sigmask() provided and default one won't work"
85840 #endif
85841
85842+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85843+
85844+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85845+{
85846+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85847+}
85848+
85849 #endif /* __KERNEL__ */
85850
85851 #endif /* _LINUX_THREAD_INFO_H */
85852diff --git a/include/linux/tty.h b/include/linux/tty.h
85853index 7d66ae5..0327149 100644
85854--- a/include/linux/tty.h
85855+++ b/include/linux/tty.h
85856@@ -202,7 +202,7 @@ struct tty_port {
85857 const struct tty_port_operations *ops; /* Port operations */
85858 spinlock_t lock; /* Lock protecting tty field */
85859 int blocked_open; /* Waiting to open */
85860- int count; /* Usage count */
85861+ atomic_t count; /* Usage count */
85862 wait_queue_head_t open_wait; /* Open waiters */
85863 wait_queue_head_t close_wait; /* Close waiters */
85864 wait_queue_head_t delta_msr_wait; /* Modem status change */
85865@@ -290,7 +290,7 @@ struct tty_struct {
85866 /* If the tty has a pending do_SAK, queue it here - akpm */
85867 struct work_struct SAK_work;
85868 struct tty_port *port;
85869-};
85870+} __randomize_layout;
85871
85872 /* Each of a tty's open files has private_data pointing to tty_file_private */
85873 struct tty_file_private {
85874@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85875 struct tty_struct *tty, struct file *filp);
85876 static inline int tty_port_users(struct tty_port *port)
85877 {
85878- return port->count + port->blocked_open;
85879+ return atomic_read(&port->count) + port->blocked_open;
85880 }
85881
85882 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85883diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85884index 92e337c..f46757b 100644
85885--- a/include/linux/tty_driver.h
85886+++ b/include/linux/tty_driver.h
85887@@ -291,7 +291,7 @@ struct tty_operations {
85888 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85889 #endif
85890 const struct file_operations *proc_fops;
85891-};
85892+} __do_const __randomize_layout;
85893
85894 struct tty_driver {
85895 int magic; /* magic number for this structure */
85896@@ -325,7 +325,7 @@ struct tty_driver {
85897
85898 const struct tty_operations *ops;
85899 struct list_head tty_drivers;
85900-};
85901+} __randomize_layout;
85902
85903 extern struct list_head tty_drivers;
85904
85905diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85906index 00c9d68..bc0188b 100644
85907--- a/include/linux/tty_ldisc.h
85908+++ b/include/linux/tty_ldisc.h
85909@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85910
85911 struct module *owner;
85912
85913- int refcount;
85914+ atomic_t refcount;
85915 };
85916
85917 struct tty_ldisc {
85918diff --git a/include/linux/types.h b/include/linux/types.h
85919index a0bb704..f511c77 100644
85920--- a/include/linux/types.h
85921+++ b/include/linux/types.h
85922@@ -177,10 +177,26 @@ typedef struct {
85923 int counter;
85924 } atomic_t;
85925
85926+#ifdef CONFIG_PAX_REFCOUNT
85927+typedef struct {
85928+ int counter;
85929+} atomic_unchecked_t;
85930+#else
85931+typedef atomic_t atomic_unchecked_t;
85932+#endif
85933+
85934 #ifdef CONFIG_64BIT
85935 typedef struct {
85936 long counter;
85937 } atomic64_t;
85938+
85939+#ifdef CONFIG_PAX_REFCOUNT
85940+typedef struct {
85941+ long counter;
85942+} atomic64_unchecked_t;
85943+#else
85944+typedef atomic64_t atomic64_unchecked_t;
85945+#endif
85946 #endif
85947
85948 struct list_head {
85949diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85950index ecd3319..8a36ded 100644
85951--- a/include/linux/uaccess.h
85952+++ b/include/linux/uaccess.h
85953@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85954 long ret; \
85955 mm_segment_t old_fs = get_fs(); \
85956 \
85957- set_fs(KERNEL_DS); \
85958 pagefault_disable(); \
85959- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85960- pagefault_enable(); \
85961+ set_fs(KERNEL_DS); \
85962+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85963 set_fs(old_fs); \
85964+ pagefault_enable(); \
85965 ret; \
85966 })
85967
85968diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85969index 2d1f9b6..d7a9fce 100644
85970--- a/include/linux/uidgid.h
85971+++ b/include/linux/uidgid.h
85972@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85973
85974 #endif /* CONFIG_USER_NS */
85975
85976+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85977+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85978+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85979+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85980+
85981 #endif /* _LINUX_UIDGID_H */
85982diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85983index 32c0e83..671eb35 100644
85984--- a/include/linux/uio_driver.h
85985+++ b/include/linux/uio_driver.h
85986@@ -67,7 +67,7 @@ struct uio_device {
85987 struct module *owner;
85988 struct device *dev;
85989 int minor;
85990- atomic_t event;
85991+ atomic_unchecked_t event;
85992 struct fasync_struct *async_queue;
85993 wait_queue_head_t wait;
85994 struct uio_info *info;
85995diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85996index 99c1b4d..562e6f3 100644
85997--- a/include/linux/unaligned/access_ok.h
85998+++ b/include/linux/unaligned/access_ok.h
85999@@ -4,34 +4,34 @@
86000 #include <linux/kernel.h>
86001 #include <asm/byteorder.h>
86002
86003-static inline u16 get_unaligned_le16(const void *p)
86004+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
86005 {
86006- return le16_to_cpup((__le16 *)p);
86007+ return le16_to_cpup((const __le16 *)p);
86008 }
86009
86010-static inline u32 get_unaligned_le32(const void *p)
86011+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
86012 {
86013- return le32_to_cpup((__le32 *)p);
86014+ return le32_to_cpup((const __le32 *)p);
86015 }
86016
86017-static inline u64 get_unaligned_le64(const void *p)
86018+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
86019 {
86020- return le64_to_cpup((__le64 *)p);
86021+ return le64_to_cpup((const __le64 *)p);
86022 }
86023
86024-static inline u16 get_unaligned_be16(const void *p)
86025+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
86026 {
86027- return be16_to_cpup((__be16 *)p);
86028+ return be16_to_cpup((const __be16 *)p);
86029 }
86030
86031-static inline u32 get_unaligned_be32(const void *p)
86032+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
86033 {
86034- return be32_to_cpup((__be32 *)p);
86035+ return be32_to_cpup((const __be32 *)p);
86036 }
86037
86038-static inline u64 get_unaligned_be64(const void *p)
86039+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
86040 {
86041- return be64_to_cpup((__be64 *)p);
86042+ return be64_to_cpup((const __be64 *)p);
86043 }
86044
86045 static inline void put_unaligned_le16(u16 val, void *p)
86046diff --git a/include/linux/usb.h b/include/linux/usb.h
86047index 058a769..c17a1c2c 100644
86048--- a/include/linux/usb.h
86049+++ b/include/linux/usb.h
86050@@ -566,7 +566,7 @@ struct usb_device {
86051 int maxchild;
86052
86053 u32 quirks;
86054- atomic_t urbnum;
86055+ atomic_unchecked_t urbnum;
86056
86057 unsigned long active_duration;
86058
86059@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
86060
86061 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
86062 __u8 request, __u8 requesttype, __u16 value, __u16 index,
86063- void *data, __u16 size, int timeout);
86064+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
86065 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
86066 void *data, int len, int *actual_length, int timeout);
86067 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
86068diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
86069index 9fd9e48..e2c5f35 100644
86070--- a/include/linux/usb/renesas_usbhs.h
86071+++ b/include/linux/usb/renesas_usbhs.h
86072@@ -39,7 +39,7 @@ enum {
86073 */
86074 struct renesas_usbhs_driver_callback {
86075 int (*notify_hotplug)(struct platform_device *pdev);
86076-};
86077+} __no_const;
86078
86079 /*
86080 * callback functions for platform
86081diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
86082index 8297e5b..0dfae27 100644
86083--- a/include/linux/user_namespace.h
86084+++ b/include/linux/user_namespace.h
86085@@ -39,7 +39,7 @@ struct user_namespace {
86086 struct key *persistent_keyring_register;
86087 struct rw_semaphore persistent_keyring_register_sem;
86088 #endif
86089-};
86090+} __randomize_layout;
86091
86092 extern struct user_namespace init_user_ns;
86093
86094diff --git a/include/linux/utsname.h b/include/linux/utsname.h
86095index 5093f58..c103e58 100644
86096--- a/include/linux/utsname.h
86097+++ b/include/linux/utsname.h
86098@@ -25,7 +25,7 @@ struct uts_namespace {
86099 struct new_utsname name;
86100 struct user_namespace *user_ns;
86101 struct ns_common ns;
86102-};
86103+} __randomize_layout;
86104 extern struct uts_namespace init_uts_ns;
86105
86106 #ifdef CONFIG_UTS_NS
86107diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
86108index 6f8fbcf..4efc177 100644
86109--- a/include/linux/vermagic.h
86110+++ b/include/linux/vermagic.h
86111@@ -25,9 +25,42 @@
86112 #define MODULE_ARCH_VERMAGIC ""
86113 #endif
86114
86115+#ifdef CONFIG_PAX_REFCOUNT
86116+#define MODULE_PAX_REFCOUNT "REFCOUNT "
86117+#else
86118+#define MODULE_PAX_REFCOUNT ""
86119+#endif
86120+
86121+#ifdef CONSTIFY_PLUGIN
86122+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
86123+#else
86124+#define MODULE_CONSTIFY_PLUGIN ""
86125+#endif
86126+
86127+#ifdef STACKLEAK_PLUGIN
86128+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86129+#else
86130+#define MODULE_STACKLEAK_PLUGIN ""
86131+#endif
86132+
86133+#ifdef RANDSTRUCT_PLUGIN
86134+#include <generated/randomize_layout_hash.h>
86135+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86136+#else
86137+#define MODULE_RANDSTRUCT_PLUGIN
86138+#endif
86139+
86140+#ifdef CONFIG_GRKERNSEC
86141+#define MODULE_GRSEC "GRSEC "
86142+#else
86143+#define MODULE_GRSEC ""
86144+#endif
86145+
86146 #define VERMAGIC_STRING \
86147 UTS_RELEASE " " \
86148 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86149 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86150- MODULE_ARCH_VERMAGIC
86151+ MODULE_ARCH_VERMAGIC \
86152+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86153+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86154
86155diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86156index b483abd..af305ad 100644
86157--- a/include/linux/vga_switcheroo.h
86158+++ b/include/linux/vga_switcheroo.h
86159@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86160
86161 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86162
86163-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86164+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86165 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86166-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86167+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86168 #else
86169
86170 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86171@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86172
86173 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86174
86175-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86176+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86177 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86178-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86179+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86180
86181 #endif
86182 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86183diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86184index b87696f..1d11de7 100644
86185--- a/include/linux/vmalloc.h
86186+++ b/include/linux/vmalloc.h
86187@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86188 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86189 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86190 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86191+
86192+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86193+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86194+#endif
86195+
86196 /* bits [20..32] reserved for arch specific ioremap internals */
86197
86198 /*
86199@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86200 unsigned long flags, pgprot_t prot);
86201 extern void vunmap(const void *addr);
86202
86203+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86204+extern void unmap_process_stacks(struct task_struct *task);
86205+#endif
86206+
86207 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86208 unsigned long uaddr, void *kaddr,
86209 unsigned long size);
86210@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86211
86212 /* for /dev/kmem */
86213 extern long vread(char *buf, char *addr, unsigned long count);
86214-extern long vwrite(char *buf, char *addr, unsigned long count);
86215+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86216
86217 /*
86218 * Internals. Dont't use..
86219diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86220index 82e7db7..f8ce3d0 100644
86221--- a/include/linux/vmstat.h
86222+++ b/include/linux/vmstat.h
86223@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86224 /*
86225 * Zone based page accounting with per cpu differentials.
86226 */
86227-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86228+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86229
86230 static inline void zone_page_state_add(long x, struct zone *zone,
86231 enum zone_stat_item item)
86232 {
86233- atomic_long_add(x, &zone->vm_stat[item]);
86234- atomic_long_add(x, &vm_stat[item]);
86235+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86236+ atomic_long_add_unchecked(x, &vm_stat[item]);
86237 }
86238
86239-static inline unsigned long global_page_state(enum zone_stat_item item)
86240+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86241 {
86242- long x = atomic_long_read(&vm_stat[item]);
86243+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86244 #ifdef CONFIG_SMP
86245 if (x < 0)
86246 x = 0;
86247@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86248 return x;
86249 }
86250
86251-static inline unsigned long zone_page_state(struct zone *zone,
86252+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86253 enum zone_stat_item item)
86254 {
86255- long x = atomic_long_read(&zone->vm_stat[item]);
86256+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86257 #ifdef CONFIG_SMP
86258 if (x < 0)
86259 x = 0;
86260@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86261 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86262 enum zone_stat_item item)
86263 {
86264- long x = atomic_long_read(&zone->vm_stat[item]);
86265+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86266
86267 #ifdef CONFIG_SMP
86268 int cpu;
86269@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86270
86271 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86272 {
86273- atomic_long_inc(&zone->vm_stat[item]);
86274- atomic_long_inc(&vm_stat[item]);
86275+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86276+ atomic_long_inc_unchecked(&vm_stat[item]);
86277 }
86278
86279 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86280 {
86281- atomic_long_dec(&zone->vm_stat[item]);
86282- atomic_long_dec(&vm_stat[item]);
86283+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86284+ atomic_long_dec_unchecked(&vm_stat[item]);
86285 }
86286
86287 static inline void __inc_zone_page_state(struct page *page,
86288diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86289index 91b0a68..0e9adf6 100644
86290--- a/include/linux/xattr.h
86291+++ b/include/linux/xattr.h
86292@@ -28,7 +28,7 @@ struct xattr_handler {
86293 size_t size, int handler_flags);
86294 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86295 size_t size, int flags, int handler_flags);
86296-};
86297+} __do_const;
86298
86299 struct xattr {
86300 const char *name;
86301@@ -37,6 +37,9 @@ struct xattr {
86302 };
86303
86304 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86305+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86306+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86307+#endif
86308 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86309 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86310 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86311diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86312index 92dbbd3..13ab0b3 100644
86313--- a/include/linux/zlib.h
86314+++ b/include/linux/zlib.h
86315@@ -31,6 +31,7 @@
86316 #define _ZLIB_H
86317
86318 #include <linux/zconf.h>
86319+#include <linux/compiler.h>
86320
86321 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86322 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86323@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86324
86325 /* basic functions */
86326
86327-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86328+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86329 /*
86330 Returns the number of bytes that needs to be allocated for a per-
86331 stream workspace with the specified parameters. A pointer to this
86332diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86333index eb76cfd..9fd0e7c 100644
86334--- a/include/media/v4l2-dev.h
86335+++ b/include/media/v4l2-dev.h
86336@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86337 int (*mmap) (struct file *, struct vm_area_struct *);
86338 int (*open) (struct file *);
86339 int (*release) (struct file *);
86340-};
86341+} __do_const;
86342
86343 /*
86344 * Newer version of video_device, handled by videodev2.c
86345diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86346index ffb69da..040393e 100644
86347--- a/include/media/v4l2-device.h
86348+++ b/include/media/v4l2-device.h
86349@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86350 this function returns 0. If the name ends with a digit (e.g. cx18),
86351 then the name will be set to cx18-0 since cx180 looks really odd. */
86352 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86353- atomic_t *instance);
86354+ atomic_unchecked_t *instance);
86355
86356 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86357 Since the parent disappears this ensures that v4l2_dev doesn't have an
86358diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86359index 2a25dec..bf6dd8a 100644
86360--- a/include/net/9p/transport.h
86361+++ b/include/net/9p/transport.h
86362@@ -62,7 +62,7 @@ struct p9_trans_module {
86363 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86364 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86365 char *, char *, int , int, int, int);
86366-};
86367+} __do_const;
86368
86369 void v9fs_register_trans(struct p9_trans_module *m);
86370 void v9fs_unregister_trans(struct p9_trans_module *m);
86371diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86372index a175ba4..196eb8242 100644
86373--- a/include/net/af_unix.h
86374+++ b/include/net/af_unix.h
86375@@ -36,7 +36,7 @@ struct unix_skb_parms {
86376 u32 secid; /* Security ID */
86377 #endif
86378 u32 consumed;
86379-};
86380+} __randomize_layout;
86381
86382 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86383 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86384diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86385index d1bb342..e12f7d2 100644
86386--- a/include/net/bluetooth/l2cap.h
86387+++ b/include/net/bluetooth/l2cap.h
86388@@ -608,7 +608,7 @@ struct l2cap_ops {
86389 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86390 unsigned long hdr_len,
86391 unsigned long len, int nb);
86392-};
86393+} __do_const;
86394
86395 struct l2cap_conn {
86396 struct hci_conn *hcon;
86397diff --git a/include/net/bonding.h b/include/net/bonding.h
86398index 983a94b..7aa9b16 100644
86399--- a/include/net/bonding.h
86400+++ b/include/net/bonding.h
86401@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86402
86403 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86404 {
86405- atomic_long_inc(&dev->tx_dropped);
86406+ atomic_long_inc_unchecked(&dev->tx_dropped);
86407 dev_kfree_skb_any(skb);
86408 }
86409
86410diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86411index f2ae33d..c457cf0 100644
86412--- a/include/net/caif/cfctrl.h
86413+++ b/include/net/caif/cfctrl.h
86414@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86415 void (*radioset_rsp)(void);
86416 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86417 struct cflayer *client_layer);
86418-};
86419+} __no_const;
86420
86421 /* Link Setup Parameters for CAIF-Links. */
86422 struct cfctrl_link_param {
86423@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86424 struct cfctrl {
86425 struct cfsrvl serv;
86426 struct cfctrl_rsp res;
86427- atomic_t req_seq_no;
86428- atomic_t rsp_seq_no;
86429+ atomic_unchecked_t req_seq_no;
86430+ atomic_unchecked_t rsp_seq_no;
86431 struct list_head list;
86432 /* Protects from simultaneous access to first_req list */
86433 spinlock_t info_list_lock;
86434diff --git a/include/net/flow.h b/include/net/flow.h
86435index 8109a15..504466d 100644
86436--- a/include/net/flow.h
86437+++ b/include/net/flow.h
86438@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86439
86440 void flow_cache_flush(struct net *net);
86441 void flow_cache_flush_deferred(struct net *net);
86442-extern atomic_t flow_cache_genid;
86443+extern atomic_unchecked_t flow_cache_genid;
86444
86445 #endif
86446diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86447index 6c92415..3a352d8 100644
86448--- a/include/net/genetlink.h
86449+++ b/include/net/genetlink.h
86450@@ -130,7 +130,7 @@ struct genl_ops {
86451 u8 cmd;
86452 u8 internal_flags;
86453 u8 flags;
86454-};
86455+} __do_const;
86456
86457 int __genl_register_family(struct genl_family *family);
86458
86459diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86460index 734d9b5..48a9a4b 100644
86461--- a/include/net/gro_cells.h
86462+++ b/include/net/gro_cells.h
86463@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86464 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86465
86466 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86467- atomic_long_inc(&dev->rx_dropped);
86468+ atomic_long_inc_unchecked(&dev->rx_dropped);
86469 kfree_skb(skb);
86470 return;
86471 }
86472diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86473index 848e85c..051c7de 100644
86474--- a/include/net/inet_connection_sock.h
86475+++ b/include/net/inet_connection_sock.h
86476@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86477 int (*bind_conflict)(const struct sock *sk,
86478 const struct inet_bind_bucket *tb, bool relax);
86479 void (*mtu_reduced)(struct sock *sk);
86480-};
86481+} __do_const;
86482
86483 /** inet_connection_sock - INET connection oriented sock
86484 *
86485diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86486index 80479ab..0c3f647 100644
86487--- a/include/net/inetpeer.h
86488+++ b/include/net/inetpeer.h
86489@@ -47,7 +47,7 @@ struct inet_peer {
86490 */
86491 union {
86492 struct {
86493- atomic_t rid; /* Frag reception counter */
86494+ atomic_unchecked_t rid; /* Frag reception counter */
86495 };
86496 struct rcu_head rcu;
86497 struct inet_peer *gc_next;
86498diff --git a/include/net/ip.h b/include/net/ip.h
86499index 09cf5ae..ab62fcf 100644
86500--- a/include/net/ip.h
86501+++ b/include/net/ip.h
86502@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86503 }
86504 }
86505
86506-u32 ip_idents_reserve(u32 hash, int segs);
86507+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86508 void __ip_select_ident(struct iphdr *iph, int segs);
86509
86510 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86511diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86512index 09a819e..3ab9e14 100644
86513--- a/include/net/ip_fib.h
86514+++ b/include/net/ip_fib.h
86515@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86516
86517 #define FIB_RES_SADDR(net, res) \
86518 ((FIB_RES_NH(res).nh_saddr_genid == \
86519- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86520+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86521 FIB_RES_NH(res).nh_saddr : \
86522 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86523 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86524diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86525index 615b20b..fd4cbd8 100644
86526--- a/include/net/ip_vs.h
86527+++ b/include/net/ip_vs.h
86528@@ -534,7 +534,7 @@ struct ip_vs_conn {
86529 struct ip_vs_conn *control; /* Master control connection */
86530 atomic_t n_control; /* Number of controlled ones */
86531 struct ip_vs_dest *dest; /* real server */
86532- atomic_t in_pkts; /* incoming packet counter */
86533+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86534
86535 /* Packet transmitter for different forwarding methods. If it
86536 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86537@@ -682,7 +682,7 @@ struct ip_vs_dest {
86538 __be16 port; /* port number of the server */
86539 union nf_inet_addr addr; /* IP address of the server */
86540 volatile unsigned int flags; /* dest status flags */
86541- atomic_t conn_flags; /* flags to copy to conn */
86542+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86543 atomic_t weight; /* server weight */
86544
86545 atomic_t refcnt; /* reference counter */
86546@@ -928,11 +928,11 @@ struct netns_ipvs {
86547 /* ip_vs_lblc */
86548 int sysctl_lblc_expiration;
86549 struct ctl_table_header *lblc_ctl_header;
86550- struct ctl_table *lblc_ctl_table;
86551+ ctl_table_no_const *lblc_ctl_table;
86552 /* ip_vs_lblcr */
86553 int sysctl_lblcr_expiration;
86554 struct ctl_table_header *lblcr_ctl_header;
86555- struct ctl_table *lblcr_ctl_table;
86556+ ctl_table_no_const *lblcr_ctl_table;
86557 /* ip_vs_est */
86558 struct list_head est_list; /* estimator list */
86559 spinlock_t est_lock;
86560diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86561index 8d4f588..2e37ad2 100644
86562--- a/include/net/irda/ircomm_tty.h
86563+++ b/include/net/irda/ircomm_tty.h
86564@@ -33,6 +33,7 @@
86565 #include <linux/termios.h>
86566 #include <linux/timer.h>
86567 #include <linux/tty.h> /* struct tty_struct */
86568+#include <asm/local.h>
86569
86570 #include <net/irda/irias_object.h>
86571 #include <net/irda/ircomm_core.h>
86572diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86573index 714cc9a..ea05f3e 100644
86574--- a/include/net/iucv/af_iucv.h
86575+++ b/include/net/iucv/af_iucv.h
86576@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86577 struct iucv_sock_list {
86578 struct hlist_head head;
86579 rwlock_t lock;
86580- atomic_t autobind_name;
86581+ atomic_unchecked_t autobind_name;
86582 };
86583
86584 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86585diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86586index f3be818..bf46196 100644
86587--- a/include/net/llc_c_ac.h
86588+++ b/include/net/llc_c_ac.h
86589@@ -87,7 +87,7 @@
86590 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86591 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86592
86593-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86594+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86595
86596 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86597 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86598diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86599index 3948cf1..83b28c4 100644
86600--- a/include/net/llc_c_ev.h
86601+++ b/include/net/llc_c_ev.h
86602@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86603 return (struct llc_conn_state_ev *)skb->cb;
86604 }
86605
86606-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86607-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86608+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86609+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86610
86611 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86612 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86613diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86614index 48f3f89..0e92c50 100644
86615--- a/include/net/llc_c_st.h
86616+++ b/include/net/llc_c_st.h
86617@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86618 u8 next_state;
86619 const llc_conn_ev_qfyr_t *ev_qualifiers;
86620 const llc_conn_action_t *ev_actions;
86621-};
86622+} __do_const;
86623
86624 struct llc_conn_state {
86625 u8 current_state;
86626diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86627index a61b98c..aade1eb 100644
86628--- a/include/net/llc_s_ac.h
86629+++ b/include/net/llc_s_ac.h
86630@@ -23,7 +23,7 @@
86631 #define SAP_ACT_TEST_IND 9
86632
86633 /* All action functions must look like this */
86634-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86635+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86636
86637 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86638 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86639diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86640index c4359e2..76dbc4a 100644
86641--- a/include/net/llc_s_st.h
86642+++ b/include/net/llc_s_st.h
86643@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86644 llc_sap_ev_t ev;
86645 u8 next_state;
86646 const llc_sap_action_t *ev_actions;
86647-};
86648+} __do_const;
86649
86650 struct llc_sap_state {
86651 u8 curr_state;
86652diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86653index 29c7be8..746bd73 100644
86654--- a/include/net/mac80211.h
86655+++ b/include/net/mac80211.h
86656@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86657 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86658
86659 u32 (*get_expected_throughput)(void *priv_sta);
86660-};
86661+} __do_const;
86662
86663 static inline int rate_supported(struct ieee80211_sta *sta,
86664 enum ieee80211_band band,
86665diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86666index 76f7084..8f36e39 100644
86667--- a/include/net/neighbour.h
86668+++ b/include/net/neighbour.h
86669@@ -163,7 +163,7 @@ struct neigh_ops {
86670 void (*error_report)(struct neighbour *, struct sk_buff *);
86671 int (*output)(struct neighbour *, struct sk_buff *);
86672 int (*connected_output)(struct neighbour *, struct sk_buff *);
86673-};
86674+} __do_const;
86675
86676 struct pneigh_entry {
86677 struct pneigh_entry *next;
86678@@ -217,7 +217,7 @@ struct neigh_table {
86679 struct neigh_statistics __percpu *stats;
86680 struct neigh_hash_table __rcu *nht;
86681 struct pneigh_entry **phash_buckets;
86682-};
86683+} __randomize_layout;
86684
86685 enum {
86686 NEIGH_ARP_TABLE = 0,
86687diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86688index 2e8756b8..0bd0083 100644
86689--- a/include/net/net_namespace.h
86690+++ b/include/net/net_namespace.h
86691@@ -130,8 +130,8 @@ struct net {
86692 struct netns_ipvs *ipvs;
86693 #endif
86694 struct sock *diag_nlsk;
86695- atomic_t fnhe_genid;
86696-};
86697+ atomic_unchecked_t fnhe_genid;
86698+} __randomize_layout;
86699
86700 #include <linux/seq_file_net.h>
86701
86702@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86703 #define __net_init __init
86704 #define __net_exit __exit_refok
86705 #define __net_initdata __initdata
86706+#ifdef CONSTIFY_PLUGIN
86707 #define __net_initconst __initconst
86708+#else
86709+#define __net_initconst __initdata
86710+#endif
86711 #endif
86712
86713 struct pernet_operations {
86714@@ -297,7 +301,7 @@ struct pernet_operations {
86715 void (*exit_batch)(struct list_head *net_exit_list);
86716 int *id;
86717 size_t size;
86718-};
86719+} __do_const;
86720
86721 /*
86722 * Use these carefully. If you implement a network device and it
86723@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86724
86725 static inline int rt_genid_ipv4(struct net *net)
86726 {
86727- return atomic_read(&net->ipv4.rt_genid);
86728+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86729 }
86730
86731 static inline void rt_genid_bump_ipv4(struct net *net)
86732 {
86733- atomic_inc(&net->ipv4.rt_genid);
86734+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86735 }
86736
86737 extern void (*__fib6_flush_trees)(struct net *net);
86738@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86739
86740 static inline int fnhe_genid(struct net *net)
86741 {
86742- return atomic_read(&net->fnhe_genid);
86743+ return atomic_read_unchecked(&net->fnhe_genid);
86744 }
86745
86746 static inline void fnhe_genid_bump(struct net *net)
86747 {
86748- atomic_inc(&net->fnhe_genid);
86749+ atomic_inc_unchecked(&net->fnhe_genid);
86750 }
86751
86752 #endif /* __NET_NET_NAMESPACE_H */
86753diff --git a/include/net/netlink.h b/include/net/netlink.h
86754index 6415835..ab96d87 100644
86755--- a/include/net/netlink.h
86756+++ b/include/net/netlink.h
86757@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86758 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86759 {
86760 if (mark)
86761- skb_trim(skb, (unsigned char *) mark - skb->data);
86762+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86763 }
86764
86765 /**
86766diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86767index 29d6a94..235d3d84 100644
86768--- a/include/net/netns/conntrack.h
86769+++ b/include/net/netns/conntrack.h
86770@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86771 struct nf_proto_net {
86772 #ifdef CONFIG_SYSCTL
86773 struct ctl_table_header *ctl_table_header;
86774- struct ctl_table *ctl_table;
86775+ ctl_table_no_const *ctl_table;
86776 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86777 struct ctl_table_header *ctl_compat_header;
86778- struct ctl_table *ctl_compat_table;
86779+ ctl_table_no_const *ctl_compat_table;
86780 #endif
86781 #endif
86782 unsigned int users;
86783@@ -60,7 +60,7 @@ struct nf_ip_net {
86784 struct nf_icmp_net icmpv6;
86785 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86786 struct ctl_table_header *ctl_table_header;
86787- struct ctl_table *ctl_table;
86788+ ctl_table_no_const *ctl_table;
86789 #endif
86790 };
86791
86792diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86793index 0ffef1a..2ce1ceb 100644
86794--- a/include/net/netns/ipv4.h
86795+++ b/include/net/netns/ipv4.h
86796@@ -84,7 +84,7 @@ struct netns_ipv4 {
86797
86798 struct ping_group_range ping_group_range;
86799
86800- atomic_t dev_addr_genid;
86801+ atomic_unchecked_t dev_addr_genid;
86802
86803 #ifdef CONFIG_SYSCTL
86804 unsigned long *sysctl_local_reserved_ports;
86805@@ -98,6 +98,6 @@ struct netns_ipv4 {
86806 struct fib_rules_ops *mr_rules_ops;
86807 #endif
86808 #endif
86809- atomic_t rt_genid;
86810+ atomic_unchecked_t rt_genid;
86811 };
86812 #endif
86813diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86814index 69ae41f..4f94868 100644
86815--- a/include/net/netns/ipv6.h
86816+++ b/include/net/netns/ipv6.h
86817@@ -75,8 +75,8 @@ struct netns_ipv6 {
86818 struct fib_rules_ops *mr6_rules_ops;
86819 #endif
86820 #endif
86821- atomic_t dev_addr_genid;
86822- atomic_t fib6_sernum;
86823+ atomic_unchecked_t dev_addr_genid;
86824+ atomic_unchecked_t fib6_sernum;
86825 };
86826
86827 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86828diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86829index 730d82a..045f2c4 100644
86830--- a/include/net/netns/xfrm.h
86831+++ b/include/net/netns/xfrm.h
86832@@ -78,7 +78,7 @@ struct netns_xfrm {
86833
86834 /* flow cache part */
86835 struct flow_cache flow_cache_global;
86836- atomic_t flow_cache_genid;
86837+ atomic_unchecked_t flow_cache_genid;
86838 struct list_head flow_cache_gc_list;
86839 spinlock_t flow_cache_gc_lock;
86840 struct work_struct flow_cache_gc_work;
86841diff --git a/include/net/ping.h b/include/net/ping.h
86842index f074060..830fba0 100644
86843--- a/include/net/ping.h
86844+++ b/include/net/ping.h
86845@@ -54,7 +54,7 @@ struct ping_iter_state {
86846
86847 extern struct proto ping_prot;
86848 #if IS_ENABLED(CONFIG_IPV6)
86849-extern struct pingv6_ops pingv6_ops;
86850+extern struct pingv6_ops *pingv6_ops;
86851 #endif
86852
86853 struct pingfakehdr {
86854diff --git a/include/net/protocol.h b/include/net/protocol.h
86855index d6fcc1f..ca277058 100644
86856--- a/include/net/protocol.h
86857+++ b/include/net/protocol.h
86858@@ -49,7 +49,7 @@ struct net_protocol {
86859 * socket lookup?
86860 */
86861 icmp_strict_tag_validation:1;
86862-};
86863+} __do_const;
86864
86865 #if IS_ENABLED(CONFIG_IPV6)
86866 struct inet6_protocol {
86867@@ -62,7 +62,7 @@ struct inet6_protocol {
86868 u8 type, u8 code, int offset,
86869 __be32 info);
86870 unsigned int flags; /* INET6_PROTO_xxx */
86871-};
86872+} __do_const;
86873
86874 #define INET6_PROTO_NOPOLICY 0x1
86875 #define INET6_PROTO_FINAL 0x2
86876diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86877index e21b9f9..0191ef0 100644
86878--- a/include/net/rtnetlink.h
86879+++ b/include/net/rtnetlink.h
86880@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86881 int (*fill_slave_info)(struct sk_buff *skb,
86882 const struct net_device *dev,
86883 const struct net_device *slave_dev);
86884-};
86885+} __do_const;
86886
86887 int __rtnl_link_register(struct rtnl_link_ops *ops);
86888 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86889diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86890index 4a5b9a3..ca27d73 100644
86891--- a/include/net/sctp/checksum.h
86892+++ b/include/net/sctp/checksum.h
86893@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86894 unsigned int offset)
86895 {
86896 struct sctphdr *sh = sctp_hdr(skb);
86897- __le32 ret, old = sh->checksum;
86898- const struct skb_checksum_ops ops = {
86899+ __le32 ret, old = sh->checksum;
86900+ static const struct skb_checksum_ops ops = {
86901 .update = sctp_csum_update,
86902 .combine = sctp_csum_combine,
86903 };
86904diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86905index 487ef34..d457f98 100644
86906--- a/include/net/sctp/sm.h
86907+++ b/include/net/sctp/sm.h
86908@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86909 typedef struct {
86910 sctp_state_fn_t *fn;
86911 const char *name;
86912-} sctp_sm_table_entry_t;
86913+} __do_const sctp_sm_table_entry_t;
86914
86915 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86916 * currently in use.
86917@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86918 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86919
86920 /* Extern declarations for major data structures. */
86921-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86922+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86923
86924
86925 /* Get the size of a DATA chunk payload. */
86926diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86927index 2bb2fcf..d17c291 100644
86928--- a/include/net/sctp/structs.h
86929+++ b/include/net/sctp/structs.h
86930@@ -509,7 +509,7 @@ struct sctp_pf {
86931 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86932 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86933 struct sctp_af *af;
86934-};
86935+} __do_const;
86936
86937
86938 /* Structure to track chunk fragments that have been acked, but peer
86939diff --git a/include/net/sock.h b/include/net/sock.h
86940index 2210fec..2249ad0 100644
86941--- a/include/net/sock.h
86942+++ b/include/net/sock.h
86943@@ -362,7 +362,7 @@ struct sock {
86944 unsigned int sk_napi_id;
86945 unsigned int sk_ll_usec;
86946 #endif
86947- atomic_t sk_drops;
86948+ atomic_unchecked_t sk_drops;
86949 int sk_rcvbuf;
86950
86951 struct sk_filter __rcu *sk_filter;
86952@@ -1061,7 +1061,7 @@ struct proto {
86953 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86954 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86955 #endif
86956-};
86957+} __randomize_layout;
86958
86959 /*
86960 * Bits in struct cg_proto.flags
86961@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86962 page_counter_uncharge(&prot->memory_allocated, amt);
86963 }
86964
86965-static inline long
86966+static inline long __intentional_overflow(-1)
86967 sk_memory_allocated(const struct sock *sk)
86968 {
86969 struct proto *prot = sk->sk_prot;
86970@@ -1385,7 +1385,7 @@ struct sock_iocb {
86971 struct scm_cookie *scm;
86972 struct msghdr *msg, async_msg;
86973 struct kiocb *kiocb;
86974-};
86975+} __randomize_layout;
86976
86977 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86978 {
86979@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86980 }
86981
86982 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86983- char __user *from, char *to,
86984+ char __user *from, unsigned char *to,
86985 int copy, int offset)
86986 {
86987 if (skb->ip_summed == CHECKSUM_NONE) {
86988@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86989 }
86990 }
86991
86992-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86993+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86994
86995 /**
86996 * sk_page_frag - return an appropriate page_frag
86997diff --git a/include/net/tcp.h b/include/net/tcp.h
86998index 9d9111e..349c847 100644
86999--- a/include/net/tcp.h
87000+++ b/include/net/tcp.h
87001@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
87002 void tcp_xmit_retransmit_queue(struct sock *);
87003 void tcp_simple_retransmit(struct sock *);
87004 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
87005-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87006+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
87007
87008 void tcp_send_probe0(struct sock *);
87009 void tcp_send_partial(struct sock *);
87010@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
87011 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
87012 */
87013 struct tcp_skb_cb {
87014- __u32 seq; /* Starting sequence number */
87015- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
87016+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
87017+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
87018 union {
87019 /* Note : tcp_tw_isn is used in input path only
87020 * (isn chosen by tcp_timewait_state_process())
87021@@ -715,7 +715,7 @@ struct tcp_skb_cb {
87022
87023 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
87024 /* 1 byte hole */
87025- __u32 ack_seq; /* Sequence number ACK'd */
87026+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
87027 union {
87028 struct inet_skb_parm h4;
87029 #if IS_ENABLED(CONFIG_IPV6)
87030diff --git a/include/net/xfrm.h b/include/net/xfrm.h
87031index dc4865e..152ee4c 100644
87032--- a/include/net/xfrm.h
87033+++ b/include/net/xfrm.h
87034@@ -285,7 +285,6 @@ struct xfrm_dst;
87035 struct xfrm_policy_afinfo {
87036 unsigned short family;
87037 struct dst_ops *dst_ops;
87038- void (*garbage_collect)(struct net *net);
87039 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
87040 const xfrm_address_t *saddr,
87041 const xfrm_address_t *daddr);
87042@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
87043 struct net_device *dev,
87044 const struct flowi *fl);
87045 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
87046-};
87047+} __do_const;
87048
87049 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
87050 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
87051@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
87052 int (*transport_finish)(struct sk_buff *skb,
87053 int async);
87054 void (*local_error)(struct sk_buff *skb, u32 mtu);
87055-};
87056+} __do_const;
87057
87058 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
87059 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
87060@@ -437,7 +436,7 @@ struct xfrm_mode {
87061 struct module *owner;
87062 unsigned int encap;
87063 int flags;
87064-};
87065+} __do_const;
87066
87067 /* Flags for xfrm_mode. */
87068 enum {
87069@@ -534,7 +533,7 @@ struct xfrm_policy {
87070 struct timer_list timer;
87071
87072 struct flow_cache_object flo;
87073- atomic_t genid;
87074+ atomic_unchecked_t genid;
87075 u32 priority;
87076 u32 index;
87077 struct xfrm_mark mark;
87078@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
87079 }
87080
87081 void xfrm_garbage_collect(struct net *net);
87082+void xfrm_garbage_collect_deferred(struct net *net);
87083
87084 #else
87085
87086@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
87087 static inline void xfrm_garbage_collect(struct net *net)
87088 {
87089 }
87090+static inline void xfrm_garbage_collect_deferred(struct net *net)
87091+{
87092+}
87093 #endif
87094
87095 static __inline__
87096diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
87097index 1017e0b..227aa4d 100644
87098--- a/include/rdma/iw_cm.h
87099+++ b/include/rdma/iw_cm.h
87100@@ -122,7 +122,7 @@ struct iw_cm_verbs {
87101 int backlog);
87102
87103 int (*destroy_listen)(struct iw_cm_id *cm_id);
87104-};
87105+} __no_const;
87106
87107 /**
87108 * iw_create_cm_id - Create an IW CM identifier.
87109diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
87110index 93d14da..734b3d8 100644
87111--- a/include/scsi/libfc.h
87112+++ b/include/scsi/libfc.h
87113@@ -771,6 +771,7 @@ struct libfc_function_template {
87114 */
87115 void (*disc_stop_final) (struct fc_lport *);
87116 };
87117+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
87118
87119 /**
87120 * struct fc_disc - Discovery context
87121@@ -875,7 +876,7 @@ struct fc_lport {
87122 struct fc_vport *vport;
87123
87124 /* Operational Information */
87125- struct libfc_function_template tt;
87126+ libfc_function_template_no_const tt;
87127 u8 link_up;
87128 u8 qfull;
87129 enum fc_lport_state state;
87130diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87131index 3a4edd1..feb2e3e 100644
87132--- a/include/scsi/scsi_device.h
87133+++ b/include/scsi/scsi_device.h
87134@@ -185,9 +185,9 @@ struct scsi_device {
87135 unsigned int max_device_blocked; /* what device_blocked counts down from */
87136 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87137
87138- atomic_t iorequest_cnt;
87139- atomic_t iodone_cnt;
87140- atomic_t ioerr_cnt;
87141+ atomic_unchecked_t iorequest_cnt;
87142+ atomic_unchecked_t iodone_cnt;
87143+ atomic_unchecked_t ioerr_cnt;
87144
87145 struct device sdev_gendev,
87146 sdev_dev;
87147diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87148index 007a0bc..7188db8 100644
87149--- a/include/scsi/scsi_transport_fc.h
87150+++ b/include/scsi/scsi_transport_fc.h
87151@@ -756,7 +756,8 @@ struct fc_function_template {
87152 unsigned long show_host_system_hostname:1;
87153
87154 unsigned long disable_target_scan:1;
87155-};
87156+} __do_const;
87157+typedef struct fc_function_template __no_const fc_function_template_no_const;
87158
87159
87160 /**
87161diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87162index 396e8f7..b037e89 100644
87163--- a/include/sound/compress_driver.h
87164+++ b/include/sound/compress_driver.h
87165@@ -129,7 +129,7 @@ struct snd_compr_ops {
87166 struct snd_compr_caps *caps);
87167 int (*get_codec_caps) (struct snd_compr_stream *stream,
87168 struct snd_compr_codec_caps *codec);
87169-};
87170+} __no_const;
87171
87172 /**
87173 * struct snd_compr: Compressed device
87174diff --git a/include/sound/soc.h b/include/sound/soc.h
87175index ac8b333..59c3692 100644
87176--- a/include/sound/soc.h
87177+++ b/include/sound/soc.h
87178@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
87179 enum snd_soc_dapm_type, int);
87180
87181 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
87182-};
87183+} __do_const;
87184
87185 /* SoC platform interface */
87186 struct snd_soc_platform_driver {
87187@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
87188 const struct snd_compr_ops *compr_ops;
87189
87190 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87191-};
87192+} __do_const;
87193
87194 struct snd_soc_dai_link_component {
87195 const char *name;
87196diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87197index 672150b..9d4bec4 100644
87198--- a/include/target/target_core_base.h
87199+++ b/include/target/target_core_base.h
87200@@ -767,7 +767,7 @@ struct se_device {
87201 atomic_long_t write_bytes;
87202 /* Active commands on this virtual SE device */
87203 atomic_t simple_cmds;
87204- atomic_t dev_ordered_id;
87205+ atomic_unchecked_t dev_ordered_id;
87206 atomic_t dev_ordered_sync;
87207 atomic_t dev_qf_count;
87208 int export_count;
87209diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87210new file mode 100644
87211index 0000000..fb634b7
87212--- /dev/null
87213+++ b/include/trace/events/fs.h
87214@@ -0,0 +1,53 @@
87215+#undef TRACE_SYSTEM
87216+#define TRACE_SYSTEM fs
87217+
87218+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87219+#define _TRACE_FS_H
87220+
87221+#include <linux/fs.h>
87222+#include <linux/tracepoint.h>
87223+
87224+TRACE_EVENT(do_sys_open,
87225+
87226+ TP_PROTO(const char *filename, int flags, int mode),
87227+
87228+ TP_ARGS(filename, flags, mode),
87229+
87230+ TP_STRUCT__entry(
87231+ __string( filename, filename )
87232+ __field( int, flags )
87233+ __field( int, mode )
87234+ ),
87235+
87236+ TP_fast_assign(
87237+ __assign_str(filename, filename);
87238+ __entry->flags = flags;
87239+ __entry->mode = mode;
87240+ ),
87241+
87242+ TP_printk("\"%s\" %x %o",
87243+ __get_str(filename), __entry->flags, __entry->mode)
87244+);
87245+
87246+TRACE_EVENT(open_exec,
87247+
87248+ TP_PROTO(const char *filename),
87249+
87250+ TP_ARGS(filename),
87251+
87252+ TP_STRUCT__entry(
87253+ __string( filename, filename )
87254+ ),
87255+
87256+ TP_fast_assign(
87257+ __assign_str(filename, filename);
87258+ ),
87259+
87260+ TP_printk("\"%s\"",
87261+ __get_str(filename))
87262+);
87263+
87264+#endif /* _TRACE_FS_H */
87265+
87266+/* This part must be outside protection */
87267+#include <trace/define_trace.h>
87268diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87269index 3608beb..df39d8a 100644
87270--- a/include/trace/events/irq.h
87271+++ b/include/trace/events/irq.h
87272@@ -36,7 +36,7 @@ struct softirq_action;
87273 */
87274 TRACE_EVENT(irq_handler_entry,
87275
87276- TP_PROTO(int irq, struct irqaction *action),
87277+ TP_PROTO(int irq, const struct irqaction *action),
87278
87279 TP_ARGS(irq, action),
87280
87281@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87282 */
87283 TRACE_EVENT(irq_handler_exit,
87284
87285- TP_PROTO(int irq, struct irqaction *action, int ret),
87286+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87287
87288 TP_ARGS(irq, action, ret),
87289
87290diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87291index 7caf44c..23c6f27 100644
87292--- a/include/uapi/linux/a.out.h
87293+++ b/include/uapi/linux/a.out.h
87294@@ -39,6 +39,14 @@ enum machine_type {
87295 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87296 };
87297
87298+/* Constants for the N_FLAGS field */
87299+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87300+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87301+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87302+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87303+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87304+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87305+
87306 #if !defined (N_MAGIC)
87307 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87308 #endif
87309diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87310index 22b6ad3..aeba37e 100644
87311--- a/include/uapi/linux/bcache.h
87312+++ b/include/uapi/linux/bcache.h
87313@@ -5,6 +5,7 @@
87314 * Bcache on disk data structures
87315 */
87316
87317+#include <linux/compiler.h>
87318 #include <asm/types.h>
87319
87320 #define BITMASK(name, type, field, offset, size) \
87321@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87322 /* Btree keys - all units are in sectors */
87323
87324 struct bkey {
87325- __u64 high;
87326- __u64 low;
87327+ __u64 high __intentional_overflow(-1);
87328+ __u64 low __intentional_overflow(-1);
87329 __u64 ptr[];
87330 };
87331
87332diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87333index d876736..ccce5c0 100644
87334--- a/include/uapi/linux/byteorder/little_endian.h
87335+++ b/include/uapi/linux/byteorder/little_endian.h
87336@@ -42,51 +42,51 @@
87337
87338 static inline __le64 __cpu_to_le64p(const __u64 *p)
87339 {
87340- return (__force __le64)*p;
87341+ return (__force const __le64)*p;
87342 }
87343-static inline __u64 __le64_to_cpup(const __le64 *p)
87344+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87345 {
87346- return (__force __u64)*p;
87347+ return (__force const __u64)*p;
87348 }
87349 static inline __le32 __cpu_to_le32p(const __u32 *p)
87350 {
87351- return (__force __le32)*p;
87352+ return (__force const __le32)*p;
87353 }
87354 static inline __u32 __le32_to_cpup(const __le32 *p)
87355 {
87356- return (__force __u32)*p;
87357+ return (__force const __u32)*p;
87358 }
87359 static inline __le16 __cpu_to_le16p(const __u16 *p)
87360 {
87361- return (__force __le16)*p;
87362+ return (__force const __le16)*p;
87363 }
87364 static inline __u16 __le16_to_cpup(const __le16 *p)
87365 {
87366- return (__force __u16)*p;
87367+ return (__force const __u16)*p;
87368 }
87369 static inline __be64 __cpu_to_be64p(const __u64 *p)
87370 {
87371- return (__force __be64)__swab64p(p);
87372+ return (__force const __be64)__swab64p(p);
87373 }
87374 static inline __u64 __be64_to_cpup(const __be64 *p)
87375 {
87376- return __swab64p((__u64 *)p);
87377+ return __swab64p((const __u64 *)p);
87378 }
87379 static inline __be32 __cpu_to_be32p(const __u32 *p)
87380 {
87381- return (__force __be32)__swab32p(p);
87382+ return (__force const __be32)__swab32p(p);
87383 }
87384-static inline __u32 __be32_to_cpup(const __be32 *p)
87385+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87386 {
87387- return __swab32p((__u32 *)p);
87388+ return __swab32p((const __u32 *)p);
87389 }
87390 static inline __be16 __cpu_to_be16p(const __u16 *p)
87391 {
87392- return (__force __be16)__swab16p(p);
87393+ return (__force const __be16)__swab16p(p);
87394 }
87395 static inline __u16 __be16_to_cpup(const __be16 *p)
87396 {
87397- return __swab16p((__u16 *)p);
87398+ return __swab16p((const __u16 *)p);
87399 }
87400 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87401 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87402diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87403index 71e1d0e..6cc9caf 100644
87404--- a/include/uapi/linux/elf.h
87405+++ b/include/uapi/linux/elf.h
87406@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87407 #define PT_GNU_EH_FRAME 0x6474e550
87408
87409 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87410+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87411+
87412+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87413+
87414+/* Constants for the e_flags field */
87415+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87416+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87417+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87418+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87419+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87420+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87421
87422 /*
87423 * Extended Numbering
87424@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87425 #define DT_DEBUG 21
87426 #define DT_TEXTREL 22
87427 #define DT_JMPREL 23
87428+#define DT_FLAGS 30
87429+ #define DF_TEXTREL 0x00000004
87430 #define DT_ENCODING 32
87431 #define OLD_DT_LOOS 0x60000000
87432 #define DT_LOOS 0x6000000d
87433@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87434 #define PF_W 0x2
87435 #define PF_X 0x1
87436
87437+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87438+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87439+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87440+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87441+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87442+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87443+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87444+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87445+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87446+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87447+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87448+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87449+
87450 typedef struct elf32_phdr{
87451 Elf32_Word p_type;
87452 Elf32_Off p_offset;
87453@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87454 #define EI_OSABI 7
87455 #define EI_PAD 8
87456
87457+#define EI_PAX 14
87458+
87459 #define ELFMAG0 0x7f /* EI_MAG */
87460 #define ELFMAG1 'E'
87461 #define ELFMAG2 'L'
87462diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87463index aa169c4..6a2771d 100644
87464--- a/include/uapi/linux/personality.h
87465+++ b/include/uapi/linux/personality.h
87466@@ -30,6 +30,7 @@ enum {
87467 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87468 ADDR_NO_RANDOMIZE | \
87469 ADDR_COMPAT_LAYOUT | \
87470+ ADDR_LIMIT_3GB | \
87471 MMAP_PAGE_ZERO)
87472
87473 /*
87474diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87475index 7530e74..e714828 100644
87476--- a/include/uapi/linux/screen_info.h
87477+++ b/include/uapi/linux/screen_info.h
87478@@ -43,7 +43,8 @@ struct screen_info {
87479 __u16 pages; /* 0x32 */
87480 __u16 vesa_attributes; /* 0x34 */
87481 __u32 capabilities; /* 0x36 */
87482- __u8 _reserved[6]; /* 0x3a */
87483+ __u16 vesapm_size; /* 0x3a */
87484+ __u8 _reserved[4]; /* 0x3c */
87485 } __attribute__((packed));
87486
87487 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87488diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87489index 0e011eb..82681b1 100644
87490--- a/include/uapi/linux/swab.h
87491+++ b/include/uapi/linux/swab.h
87492@@ -43,7 +43,7 @@
87493 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87494 */
87495
87496-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87497+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87498 {
87499 #ifdef __HAVE_BUILTIN_BSWAP16__
87500 return __builtin_bswap16(val);
87501@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87502 #endif
87503 }
87504
87505-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87506+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87507 {
87508 #ifdef __HAVE_BUILTIN_BSWAP32__
87509 return __builtin_bswap32(val);
87510@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87511 #endif
87512 }
87513
87514-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87515+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87516 {
87517 #ifdef __HAVE_BUILTIN_BSWAP64__
87518 return __builtin_bswap64(val);
87519diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87520index 1590c49..5eab462 100644
87521--- a/include/uapi/linux/xattr.h
87522+++ b/include/uapi/linux/xattr.h
87523@@ -73,5 +73,9 @@
87524 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87525 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87526
87527+/* User namespace */
87528+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87529+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87530+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87531
87532 #endif /* _UAPI_LINUX_XATTR_H */
87533diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87534index f9466fa..f4e2b81 100644
87535--- a/include/video/udlfb.h
87536+++ b/include/video/udlfb.h
87537@@ -53,10 +53,10 @@ struct dlfb_data {
87538 u32 pseudo_palette[256];
87539 int blank_mode; /*one of FB_BLANK_ */
87540 /* blit-only rendering path metrics, exposed through sysfs */
87541- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87542- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87543- atomic_t bytes_sent; /* to usb, after compression including overhead */
87544- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87545+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87546+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87547+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87548+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87549 };
87550
87551 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87552diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87553index 30f5362..8ed8ac9 100644
87554--- a/include/video/uvesafb.h
87555+++ b/include/video/uvesafb.h
87556@@ -122,6 +122,7 @@ struct uvesafb_par {
87557 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87558 u8 pmi_setpal; /* PMI for palette changes */
87559 u16 *pmi_base; /* protected mode interface location */
87560+ u8 *pmi_code; /* protected mode code location */
87561 void *pmi_start;
87562 void *pmi_pal;
87563 u8 *vbe_state_orig; /*
87564diff --git a/init/Kconfig b/init/Kconfig
87565index 9afb971..27d6fca 100644
87566--- a/init/Kconfig
87567+++ b/init/Kconfig
87568@@ -1129,6 +1129,7 @@ endif # CGROUPS
87569
87570 config CHECKPOINT_RESTORE
87571 bool "Checkpoint/restore support" if EXPERT
87572+ depends on !GRKERNSEC
87573 default n
87574 help
87575 Enables additional kernel features in a sake of checkpoint/restore.
87576@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87577
87578 config COMPAT_BRK
87579 bool "Disable heap randomization"
87580- default y
87581+ default n
87582 help
87583 Randomizing heap placement makes heap exploits harder, but it
87584 also breaks ancient binaries (including anything libc5 based).
87585@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87586 config STOP_MACHINE
87587 bool
87588 default y
87589- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87590+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87591 help
87592 Need stop_machine() primitive.
87593
87594diff --git a/init/Makefile b/init/Makefile
87595index 7bc47ee..6da2dc7 100644
87596--- a/init/Makefile
87597+++ b/init/Makefile
87598@@ -2,6 +2,9 @@
87599 # Makefile for the linux kernel.
87600 #
87601
87602+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87603+asflags-y := $(GCC_PLUGINS_AFLAGS)
87604+
87605 obj-y := main.o version.o mounts.o
87606 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87607 obj-y += noinitramfs.o
87608diff --git a/init/do_mounts.c b/init/do_mounts.c
87609index eb41008..f5dbbf9 100644
87610--- a/init/do_mounts.c
87611+++ b/init/do_mounts.c
87612@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87613 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87614 {
87615 struct super_block *s;
87616- int err = sys_mount(name, "/root", fs, flags, data);
87617+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87618 if (err)
87619 return err;
87620
87621- sys_chdir("/root");
87622+ sys_chdir((const char __force_user *)"/root");
87623 s = current->fs->pwd.dentry->d_sb;
87624 ROOT_DEV = s->s_dev;
87625 printk(KERN_INFO
87626@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87627 va_start(args, fmt);
87628 vsprintf(buf, fmt, args);
87629 va_end(args);
87630- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87631+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87632 if (fd >= 0) {
87633 sys_ioctl(fd, FDEJECT, 0);
87634 sys_close(fd);
87635 }
87636 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87637- fd = sys_open("/dev/console", O_RDWR, 0);
87638+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87639 if (fd >= 0) {
87640 sys_ioctl(fd, TCGETS, (long)&termios);
87641 termios.c_lflag &= ~ICANON;
87642 sys_ioctl(fd, TCSETSF, (long)&termios);
87643- sys_read(fd, &c, 1);
87644+ sys_read(fd, (char __user *)&c, 1);
87645 termios.c_lflag |= ICANON;
87646 sys_ioctl(fd, TCSETSF, (long)&termios);
87647 sys_close(fd);
87648@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87649 mount_root();
87650 out:
87651 devtmpfs_mount("dev");
87652- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87653- sys_chroot(".");
87654+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87655+ sys_chroot((const char __force_user *)".");
87656 }
87657
87658 static bool is_tmpfs;
87659diff --git a/init/do_mounts.h b/init/do_mounts.h
87660index f5b978a..69dbfe8 100644
87661--- a/init/do_mounts.h
87662+++ b/init/do_mounts.h
87663@@ -15,15 +15,15 @@ extern int root_mountflags;
87664
87665 static inline int create_dev(char *name, dev_t dev)
87666 {
87667- sys_unlink(name);
87668- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87669+ sys_unlink((char __force_user *)name);
87670+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87671 }
87672
87673 #if BITS_PER_LONG == 32
87674 static inline u32 bstat(char *name)
87675 {
87676 struct stat64 stat;
87677- if (sys_stat64(name, &stat) != 0)
87678+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87679 return 0;
87680 if (!S_ISBLK(stat.st_mode))
87681 return 0;
87682@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87683 static inline u32 bstat(char *name)
87684 {
87685 struct stat stat;
87686- if (sys_newstat(name, &stat) != 0)
87687+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87688 return 0;
87689 if (!S_ISBLK(stat.st_mode))
87690 return 0;
87691diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87692index 3e0878e..8a9d7a0 100644
87693--- a/init/do_mounts_initrd.c
87694+++ b/init/do_mounts_initrd.c
87695@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87696 {
87697 sys_unshare(CLONE_FS | CLONE_FILES);
87698 /* stdin/stdout/stderr for /linuxrc */
87699- sys_open("/dev/console", O_RDWR, 0);
87700+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87701 sys_dup(0);
87702 sys_dup(0);
87703 /* move initrd over / and chdir/chroot in initrd root */
87704- sys_chdir("/root");
87705- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87706- sys_chroot(".");
87707+ sys_chdir((const char __force_user *)"/root");
87708+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87709+ sys_chroot((const char __force_user *)".");
87710 sys_setsid();
87711 return 0;
87712 }
87713@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87714 create_dev("/dev/root.old", Root_RAM0);
87715 /* mount initrd on rootfs' /root */
87716 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87717- sys_mkdir("/old", 0700);
87718- sys_chdir("/old");
87719+ sys_mkdir((const char __force_user *)"/old", 0700);
87720+ sys_chdir((const char __force_user *)"/old");
87721
87722 /* try loading default modules from initrd */
87723 load_default_modules();
87724@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87725 current->flags &= ~PF_FREEZER_SKIP;
87726
87727 /* move initrd to rootfs' /old */
87728- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87729+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87730 /* switch root and cwd back to / of rootfs */
87731- sys_chroot("..");
87732+ sys_chroot((const char __force_user *)"..");
87733
87734 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87735- sys_chdir("/old");
87736+ sys_chdir((const char __force_user *)"/old");
87737 return;
87738 }
87739
87740- sys_chdir("/");
87741+ sys_chdir((const char __force_user *)"/");
87742 ROOT_DEV = new_decode_dev(real_root_dev);
87743 mount_root();
87744
87745 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87746- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87747+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87748 if (!error)
87749 printk("okay\n");
87750 else {
87751- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87752+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87753 if (error == -ENOENT)
87754 printk("/initrd does not exist. Ignored.\n");
87755 else
87756 printk("failed\n");
87757 printk(KERN_NOTICE "Unmounting old root\n");
87758- sys_umount("/old", MNT_DETACH);
87759+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87760 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87761 if (fd < 0) {
87762 error = fd;
87763@@ -127,11 +127,11 @@ int __init initrd_load(void)
87764 * mounted in the normal path.
87765 */
87766 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87767- sys_unlink("/initrd.image");
87768+ sys_unlink((const char __force_user *)"/initrd.image");
87769 handle_initrd();
87770 return 1;
87771 }
87772 }
87773- sys_unlink("/initrd.image");
87774+ sys_unlink((const char __force_user *)"/initrd.image");
87775 return 0;
87776 }
87777diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87778index 8cb6db5..d729f50 100644
87779--- a/init/do_mounts_md.c
87780+++ b/init/do_mounts_md.c
87781@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87782 partitioned ? "_d" : "", minor,
87783 md_setup_args[ent].device_names);
87784
87785- fd = sys_open(name, 0, 0);
87786+ fd = sys_open((char __force_user *)name, 0, 0);
87787 if (fd < 0) {
87788 printk(KERN_ERR "md: open failed - cannot start "
87789 "array %s\n", name);
87790@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87791 * array without it
87792 */
87793 sys_close(fd);
87794- fd = sys_open(name, 0, 0);
87795+ fd = sys_open((char __force_user *)name, 0, 0);
87796 sys_ioctl(fd, BLKRRPART, 0);
87797 }
87798 sys_close(fd);
87799@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87800
87801 wait_for_device_probe();
87802
87803- fd = sys_open("/dev/md0", 0, 0);
87804+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87805 if (fd >= 0) {
87806 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87807 sys_close(fd);
87808diff --git a/init/init_task.c b/init/init_task.c
87809index ba0a7f36..2bcf1d5 100644
87810--- a/init/init_task.c
87811+++ b/init/init_task.c
87812@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87813 * Initial thread structure. Alignment of this is handled by a special
87814 * linker map entry.
87815 */
87816+#ifdef CONFIG_X86
87817+union thread_union init_thread_union __init_task_data;
87818+#else
87819 union thread_union init_thread_union __init_task_data =
87820 { INIT_THREAD_INFO(init_task) };
87821+#endif
87822diff --git a/init/initramfs.c b/init/initramfs.c
87823index ad1bd77..dca2c1b 100644
87824--- a/init/initramfs.c
87825+++ b/init/initramfs.c
87826@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87827
87828 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87829 while (count) {
87830- ssize_t rv = sys_write(fd, p, count);
87831+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87832
87833 if (rv < 0) {
87834 if (rv == -EINTR || rv == -EAGAIN)
87835@@ -107,7 +107,7 @@ static void __init free_hash(void)
87836 }
87837 }
87838
87839-static long __init do_utime(char *filename, time_t mtime)
87840+static long __init do_utime(char __force_user *filename, time_t mtime)
87841 {
87842 struct timespec t[2];
87843
87844@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87845 struct dir_entry *de, *tmp;
87846 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87847 list_del(&de->list);
87848- do_utime(de->name, de->mtime);
87849+ do_utime((char __force_user *)de->name, de->mtime);
87850 kfree(de->name);
87851 kfree(de);
87852 }
87853@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87854 if (nlink >= 2) {
87855 char *old = find_link(major, minor, ino, mode, collected);
87856 if (old)
87857- return (sys_link(old, collected) < 0) ? -1 : 1;
87858+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87859 }
87860 return 0;
87861 }
87862@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87863 {
87864 struct stat st;
87865
87866- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87867+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87868 if (S_ISDIR(st.st_mode))
87869- sys_rmdir(path);
87870+ sys_rmdir((char __force_user *)path);
87871 else
87872- sys_unlink(path);
87873+ sys_unlink((char __force_user *)path);
87874 }
87875 }
87876
87877@@ -338,7 +338,7 @@ static int __init do_name(void)
87878 int openflags = O_WRONLY|O_CREAT;
87879 if (ml != 1)
87880 openflags |= O_TRUNC;
87881- wfd = sys_open(collected, openflags, mode);
87882+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87883
87884 if (wfd >= 0) {
87885 sys_fchown(wfd, uid, gid);
87886@@ -350,17 +350,17 @@ static int __init do_name(void)
87887 }
87888 }
87889 } else if (S_ISDIR(mode)) {
87890- sys_mkdir(collected, mode);
87891- sys_chown(collected, uid, gid);
87892- sys_chmod(collected, mode);
87893+ sys_mkdir((char __force_user *)collected, mode);
87894+ sys_chown((char __force_user *)collected, uid, gid);
87895+ sys_chmod((char __force_user *)collected, mode);
87896 dir_add(collected, mtime);
87897 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87898 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87899 if (maybe_link() == 0) {
87900- sys_mknod(collected, mode, rdev);
87901- sys_chown(collected, uid, gid);
87902- sys_chmod(collected, mode);
87903- do_utime(collected, mtime);
87904+ sys_mknod((char __force_user *)collected, mode, rdev);
87905+ sys_chown((char __force_user *)collected, uid, gid);
87906+ sys_chmod((char __force_user *)collected, mode);
87907+ do_utime((char __force_user *)collected, mtime);
87908 }
87909 }
87910 return 0;
87911@@ -372,7 +372,7 @@ static int __init do_copy(void)
87912 if (xwrite(wfd, victim, body_len) != body_len)
87913 error("write error");
87914 sys_close(wfd);
87915- do_utime(vcollected, mtime);
87916+ do_utime((char __force_user *)vcollected, mtime);
87917 kfree(vcollected);
87918 eat(body_len);
87919 state = SkipIt;
87920@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87921 {
87922 collected[N_ALIGN(name_len) + body_len] = '\0';
87923 clean_path(collected, 0);
87924- sys_symlink(collected + N_ALIGN(name_len), collected);
87925- sys_lchown(collected, uid, gid);
87926- do_utime(collected, mtime);
87927+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87928+ sys_lchown((char __force_user *)collected, uid, gid);
87929+ do_utime((char __force_user *)collected, mtime);
87930 state = SkipIt;
87931 next_state = Reset;
87932 return 0;
87933diff --git a/init/main.c b/init/main.c
87934index 61b99376..1e346cb 100644
87935--- a/init/main.c
87936+++ b/init/main.c
87937@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87938 static inline void mark_rodata_ro(void) { }
87939 #endif
87940
87941+extern void grsecurity_init(void);
87942+
87943 /*
87944 * Debug helper: via this flag we know that we are in 'early bootup code'
87945 * where only the boot processor is running with IRQ disabled. This means
87946@@ -161,6 +163,85 @@ static int __init set_reset_devices(char *str)
87947
87948 __setup("reset_devices", set_reset_devices);
87949
87950+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87951+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87952+static int __init setup_grsec_proc_gid(char *str)
87953+{
87954+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87955+ return 1;
87956+}
87957+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87958+#endif
87959+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
87960+int grsec_enable_sysfs_restrict = 1;
87961+static int __init setup_grsec_sysfs_restrict(char *str)
87962+{
87963+ if (!simple_strtol(str, NULL, 0))
87964+ grsec_enable_sysfs_restrict = 0;
87965+ return 1;
87966+}
87967+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
87968+#endif
87969+
87970+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87971+unsigned long pax_user_shadow_base __read_only;
87972+EXPORT_SYMBOL(pax_user_shadow_base);
87973+extern char pax_enter_kernel_user[];
87974+extern char pax_exit_kernel_user[];
87975+#endif
87976+
87977+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87978+static int __init setup_pax_nouderef(char *str)
87979+{
87980+#ifdef CONFIG_X86_32
87981+ unsigned int cpu;
87982+ struct desc_struct *gdt;
87983+
87984+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87985+ gdt = get_cpu_gdt_table(cpu);
87986+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87987+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87988+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87989+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87990+ }
87991+ loadsegment(ds, __KERNEL_DS);
87992+ loadsegment(es, __KERNEL_DS);
87993+ loadsegment(ss, __KERNEL_DS);
87994+#else
87995+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87996+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87997+ clone_pgd_mask = ~(pgdval_t)0UL;
87998+ pax_user_shadow_base = 0UL;
87999+ setup_clear_cpu_cap(X86_FEATURE_PCID);
88000+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
88001+#endif
88002+
88003+ return 0;
88004+}
88005+early_param("pax_nouderef", setup_pax_nouderef);
88006+
88007+#ifdef CONFIG_X86_64
88008+static int __init setup_pax_weakuderef(char *str)
88009+{
88010+ if (clone_pgd_mask != ~(pgdval_t)0UL)
88011+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
88012+ return 1;
88013+}
88014+__setup("pax_weakuderef", setup_pax_weakuderef);
88015+#endif
88016+#endif
88017+
88018+#ifdef CONFIG_PAX_SOFTMODE
88019+int pax_softmode;
88020+
88021+static int __init setup_pax_softmode(char *str)
88022+{
88023+ get_option(&str, &pax_softmode);
88024+ return 1;
88025+}
88026+__setup("pax_softmode=", setup_pax_softmode);
88027+#endif
88028+
88029 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
88030 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
88031 static const char *panic_later, *panic_param;
88032@@ -735,7 +816,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
88033 struct blacklist_entry *entry;
88034 char *fn_name;
88035
88036- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
88037+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
88038 if (!fn_name)
88039 return false;
88040
88041@@ -787,7 +868,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
88042 {
88043 int count = preempt_count();
88044 int ret;
88045- char msgbuf[64];
88046+ const char *msg1 = "", *msg2 = "";
88047
88048 if (initcall_blacklisted(fn))
88049 return -EPERM;
88050@@ -797,18 +878,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
88051 else
88052 ret = fn();
88053
88054- msgbuf[0] = 0;
88055-
88056 if (preempt_count() != count) {
88057- sprintf(msgbuf, "preemption imbalance ");
88058+ msg1 = " preemption imbalance";
88059 preempt_count_set(count);
88060 }
88061 if (irqs_disabled()) {
88062- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
88063+ msg2 = " disabled interrupts";
88064 local_irq_enable();
88065 }
88066- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
88067+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
88068
88069+ add_latent_entropy();
88070 return ret;
88071 }
88072
88073@@ -914,8 +994,8 @@ static int run_init_process(const char *init_filename)
88074 {
88075 argv_init[0] = init_filename;
88076 return do_execve(getname_kernel(init_filename),
88077- (const char __user *const __user *)argv_init,
88078- (const char __user *const __user *)envp_init);
88079+ (const char __user *const __force_user *)argv_init,
88080+ (const char __user *const __force_user *)envp_init);
88081 }
88082
88083 static int try_to_run_init_process(const char *init_filename)
88084@@ -932,6 +1012,10 @@ static int try_to_run_init_process(const char *init_filename)
88085 return ret;
88086 }
88087
88088+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88089+extern int gr_init_ran;
88090+#endif
88091+
88092 static noinline void __init kernel_init_freeable(void);
88093
88094 static int __ref kernel_init(void *unused)
88095@@ -956,6 +1040,11 @@ static int __ref kernel_init(void *unused)
88096 ramdisk_execute_command, ret);
88097 }
88098
88099+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
88100+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
88101+ gr_init_ran = 1;
88102+#endif
88103+
88104 /*
88105 * We try each of these until one succeeds.
88106 *
88107@@ -1016,7 +1105,7 @@ static noinline void __init kernel_init_freeable(void)
88108 do_basic_setup();
88109
88110 /* Open the /dev/console on the rootfs, this should never fail */
88111- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88112+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88113 pr_err("Warning: unable to open an initial console.\n");
88114
88115 (void) sys_dup(0);
88116@@ -1029,11 +1118,13 @@ static noinline void __init kernel_init_freeable(void)
88117 if (!ramdisk_execute_command)
88118 ramdisk_execute_command = "/init";
88119
88120- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88121+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88122 ramdisk_execute_command = NULL;
88123 prepare_namespace();
88124 }
88125
88126+ grsecurity_init();
88127+
88128 /*
88129 * Ok, we have completed the initial bootup, and
88130 * we're essentially up and running. Get rid of the
88131diff --git a/ipc/compat.c b/ipc/compat.c
88132index 9b3c85f..1c4d897 100644
88133--- a/ipc/compat.c
88134+++ b/ipc/compat.c
88135@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88136 COMPAT_SHMLBA);
88137 if (err < 0)
88138 return err;
88139- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88140+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88141 }
88142 case SHMDT:
88143 return sys_shmdt(compat_ptr(ptr));
88144diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88145index 8ad93c2..efd80f8 100644
88146--- a/ipc/ipc_sysctl.c
88147+++ b/ipc/ipc_sysctl.c
88148@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88149 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88150 void __user *buffer, size_t *lenp, loff_t *ppos)
88151 {
88152- struct ctl_table ipc_table;
88153+ ctl_table_no_const ipc_table;
88154
88155 memcpy(&ipc_table, table, sizeof(ipc_table));
88156 ipc_table.data = get_ipc(table);
88157@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88158 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88159 void __user *buffer, size_t *lenp, loff_t *ppos)
88160 {
88161- struct ctl_table ipc_table;
88162+ ctl_table_no_const ipc_table;
88163
88164 memcpy(&ipc_table, table, sizeof(ipc_table));
88165 ipc_table.data = get_ipc(table);
88166@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88167 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88168 void __user *buffer, size_t *lenp, loff_t *ppos)
88169 {
88170- struct ctl_table ipc_table;
88171+ ctl_table_no_const ipc_table;
88172 memcpy(&ipc_table, table, sizeof(ipc_table));
88173 ipc_table.data = get_ipc(table);
88174
88175@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88176 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
88177 void __user *buffer, size_t *lenp, loff_t *ppos)
88178 {
88179- struct ctl_table ipc_table;
88180+ ctl_table_no_const ipc_table;
88181 int dummy = 0;
88182
88183 memcpy(&ipc_table, table, sizeof(ipc_table));
88184diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88185index 68d4e95..1477ded 100644
88186--- a/ipc/mq_sysctl.c
88187+++ b/ipc/mq_sysctl.c
88188@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88189 static int proc_mq_dointvec(struct ctl_table *table, int write,
88190 void __user *buffer, size_t *lenp, loff_t *ppos)
88191 {
88192- struct ctl_table mq_table;
88193+ ctl_table_no_const mq_table;
88194 memcpy(&mq_table, table, sizeof(mq_table));
88195 mq_table.data = get_mq(table);
88196
88197@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88198 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88199 void __user *buffer, size_t *lenp, loff_t *ppos)
88200 {
88201- struct ctl_table mq_table;
88202+ ctl_table_no_const mq_table;
88203 memcpy(&mq_table, table, sizeof(mq_table));
88204 mq_table.data = get_mq(table);
88205
88206diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88207index 7635a1c..7432cb6 100644
88208--- a/ipc/mqueue.c
88209+++ b/ipc/mqueue.c
88210@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88211 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88212 info->attr.mq_msgsize);
88213
88214+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88215 spin_lock(&mq_lock);
88216 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88217 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88218diff --git a/ipc/shm.c b/ipc/shm.c
88219index 19633b4..d454904 100644
88220--- a/ipc/shm.c
88221+++ b/ipc/shm.c
88222@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88223 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88224 #endif
88225
88226+#ifdef CONFIG_GRKERNSEC
88227+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88228+ const u64 shm_createtime, const kuid_t cuid,
88229+ const int shmid);
88230+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88231+ const u64 shm_createtime);
88232+#endif
88233+
88234 void shm_init_ns(struct ipc_namespace *ns)
88235 {
88236 ns->shm_ctlmax = SHMMAX;
88237@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88238 shp->shm_lprid = 0;
88239 shp->shm_atim = shp->shm_dtim = 0;
88240 shp->shm_ctim = get_seconds();
88241+#ifdef CONFIG_GRKERNSEC
88242+ shp->shm_createtime = ktime_get_ns();
88243+#endif
88244 shp->shm_segsz = size;
88245 shp->shm_nattch = 0;
88246 shp->shm_file = file;
88247@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88248 f_mode = FMODE_READ | FMODE_WRITE;
88249 }
88250 if (shmflg & SHM_EXEC) {
88251+
88252+#ifdef CONFIG_PAX_MPROTECT
88253+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88254+ goto out;
88255+#endif
88256+
88257 prot |= PROT_EXEC;
88258 acc_mode |= S_IXUGO;
88259 }
88260@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88261 if (err)
88262 goto out_unlock;
88263
88264+#ifdef CONFIG_GRKERNSEC
88265+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88266+ shp->shm_perm.cuid, shmid) ||
88267+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88268+ err = -EACCES;
88269+ goto out_unlock;
88270+ }
88271+#endif
88272+
88273 ipc_lock_object(&shp->shm_perm);
88274
88275 /* check if shm_destroy() is tearing down shp */
88276@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88277 path = shp->shm_file->f_path;
88278 path_get(&path);
88279 shp->shm_nattch++;
88280+#ifdef CONFIG_GRKERNSEC
88281+ shp->shm_lapid = current->pid;
88282+#endif
88283 size = i_size_read(path.dentry->d_inode);
88284 ipc_unlock_object(&shp->shm_perm);
88285 rcu_read_unlock();
88286diff --git a/ipc/util.c b/ipc/util.c
88287index 106bed0..f851429 100644
88288--- a/ipc/util.c
88289+++ b/ipc/util.c
88290@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88291 int (*show)(struct seq_file *, void *);
88292 };
88293
88294+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88295+
88296 /**
88297 * ipc_init - initialise ipc subsystem
88298 *
88299@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88300 granted_mode >>= 6;
88301 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88302 granted_mode >>= 3;
88303+
88304+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88305+ return -1;
88306+
88307 /* is there some bit set in requested_mode but not in granted_mode? */
88308 if ((requested_mode & ~granted_mode & 0007) &&
88309 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88310diff --git a/kernel/audit.c b/kernel/audit.c
88311index 72ab759..757deba 100644
88312--- a/kernel/audit.c
88313+++ b/kernel/audit.c
88314@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88315 3) suppressed due to audit_rate_limit
88316 4) suppressed due to audit_backlog_limit
88317 */
88318-static atomic_t audit_lost = ATOMIC_INIT(0);
88319+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88320
88321 /* The netlink socket. */
88322 static struct sock *audit_sock;
88323@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88324 unsigned long now;
88325 int print;
88326
88327- atomic_inc(&audit_lost);
88328+ atomic_inc_unchecked(&audit_lost);
88329
88330 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88331
88332@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88333 if (print) {
88334 if (printk_ratelimit())
88335 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88336- atomic_read(&audit_lost),
88337+ atomic_read_unchecked(&audit_lost),
88338 audit_rate_limit,
88339 audit_backlog_limit);
88340 audit_panic(message);
88341@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88342 s.pid = audit_pid;
88343 s.rate_limit = audit_rate_limit;
88344 s.backlog_limit = audit_backlog_limit;
88345- s.lost = atomic_read(&audit_lost);
88346+ s.lost = atomic_read_unchecked(&audit_lost);
88347 s.backlog = skb_queue_len(&audit_skb_queue);
88348 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88349 s.backlog_wait_time = audit_backlog_wait_time;
88350diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88351index 072566d..1190489 100644
88352--- a/kernel/auditsc.c
88353+++ b/kernel/auditsc.c
88354@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88355 }
88356
88357 /* global counter which is incremented every time something logs in */
88358-static atomic_t session_id = ATOMIC_INIT(0);
88359+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88360
88361 static int audit_set_loginuid_perm(kuid_t loginuid)
88362 {
88363@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88364
88365 /* are we setting or clearing? */
88366 if (uid_valid(loginuid))
88367- sessionid = (unsigned int)atomic_inc_return(&session_id);
88368+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88369
88370 task->sessionid = sessionid;
88371 task->loginuid = loginuid;
88372diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88373index a64e7a2..2e69448 100644
88374--- a/kernel/bpf/core.c
88375+++ b/kernel/bpf/core.c
88376@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88377 * random section of illegal instructions.
88378 */
88379 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88380- hdr = module_alloc(size);
88381+ hdr = module_alloc_exec(size);
88382 if (hdr == NULL)
88383 return NULL;
88384
88385 /* Fill space with illegal/arch-dep instructions. */
88386 bpf_fill_ill_insns(hdr, size);
88387
88388+ pax_open_kernel();
88389 hdr->pages = size / PAGE_SIZE;
88390+ pax_close_kernel();
88391+
88392 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88393 PAGE_SIZE - sizeof(*hdr));
88394 start = (prandom_u32() % hole) & ~(alignment - 1);
88395@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88396
88397 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88398 {
88399- module_memfree(hdr);
88400+ module_memfree_exec(hdr);
88401 }
88402 #endif /* CONFIG_BPF_JIT */
88403
88404diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88405index 536edc2..d28c85d 100644
88406--- a/kernel/bpf/syscall.c
88407+++ b/kernel/bpf/syscall.c
88408@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88409 int err;
88410
88411 /* the syscall is limited to root temporarily. This restriction will be
88412- * lifted when security audit is clean. Note that eBPF+tracing must have
88413- * this restriction, since it may pass kernel data to user space
88414+ * lifted by upstream when a half-assed security audit is clean. Note
88415+ * that eBPF+tracing must have this restriction, since it may pass
88416+ * kernel data to user space
88417 */
88418 if (!capable(CAP_SYS_ADMIN))
88419 return -EPERM;
88420+#ifdef CONFIG_GRKERNSEC
88421+ return -EPERM;
88422+#endif
88423
88424 if (!access_ok(VERIFY_READ, uattr, 1))
88425 return -EFAULT;
88426diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
88427index a28e09c..36508e6 100644
88428--- a/kernel/bpf/verifier.c
88429+++ b/kernel/bpf/verifier.c
88430@@ -1380,7 +1380,8 @@ peek_stack:
88431 /* tell verifier to check for equivalent states
88432 * after every call and jump
88433 */
88434- env->explored_states[t + 1] = STATE_LIST_MARK;
88435+ if (t + 1 < insn_cnt)
88436+ env->explored_states[t + 1] = STATE_LIST_MARK;
88437 } else {
88438 /* conditional jump with two edges */
88439 ret = push_insn(t, t + 1, FALLTHROUGH, env);
88440diff --git a/kernel/capability.c b/kernel/capability.c
88441index 989f5bf..d317ca0 100644
88442--- a/kernel/capability.c
88443+++ b/kernel/capability.c
88444@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88445 * before modification is attempted and the application
88446 * fails.
88447 */
88448+ if (tocopy > ARRAY_SIZE(kdata))
88449+ return -EFAULT;
88450+
88451 if (copy_to_user(dataptr, kdata, tocopy
88452 * sizeof(struct __user_cap_data_struct))) {
88453 return -EFAULT;
88454@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88455 int ret;
88456
88457 rcu_read_lock();
88458- ret = security_capable(__task_cred(t), ns, cap);
88459+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88460+ gr_task_is_capable(t, __task_cred(t), cap);
88461 rcu_read_unlock();
88462
88463- return (ret == 0);
88464+ return ret;
88465 }
88466
88467 /**
88468@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88469 int ret;
88470
88471 rcu_read_lock();
88472- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88473+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88474 rcu_read_unlock();
88475
88476- return (ret == 0);
88477+ return ret;
88478 }
88479
88480 /**
88481@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88482 BUG();
88483 }
88484
88485- if (security_capable(current_cred(), ns, cap) == 0) {
88486+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88487 current->flags |= PF_SUPERPRIV;
88488 return true;
88489 }
88490@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88491 }
88492 EXPORT_SYMBOL(ns_capable);
88493
88494+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88495+{
88496+ if (unlikely(!cap_valid(cap))) {
88497+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88498+ BUG();
88499+ }
88500+
88501+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88502+ current->flags |= PF_SUPERPRIV;
88503+ return true;
88504+ }
88505+ return false;
88506+}
88507+EXPORT_SYMBOL(ns_capable_nolog);
88508+
88509 /**
88510 * file_ns_capable - Determine if the file's opener had a capability in effect
88511 * @file: The file we want to check
88512@@ -427,6 +446,12 @@ bool capable(int cap)
88513 }
88514 EXPORT_SYMBOL(capable);
88515
88516+bool capable_nolog(int cap)
88517+{
88518+ return ns_capable_nolog(&init_user_ns, cap);
88519+}
88520+EXPORT_SYMBOL(capable_nolog);
88521+
88522 /**
88523 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88524 * @inode: The inode in question
88525@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88526 kgid_has_mapping(ns, inode->i_gid);
88527 }
88528 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88529+
88530+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88531+{
88532+ struct user_namespace *ns = current_user_ns();
88533+
88534+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88535+ kgid_has_mapping(ns, inode->i_gid);
88536+}
88537+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88538diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88539index 04cfe8a..adadcc0 100644
88540--- a/kernel/cgroup.c
88541+++ b/kernel/cgroup.c
88542@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88543 if (!pathbuf || !agentbuf)
88544 goto out;
88545
88546+ if (agentbuf[0] == '\0')
88547+ goto out;
88548+
88549 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88550 if (!path)
88551 goto out;
88552@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88553 struct task_struct *task;
88554 int count = 0;
88555
88556- seq_printf(seq, "css_set %p\n", cset);
88557+ seq_printf(seq, "css_set %pK\n", cset);
88558
88559 list_for_each_entry(task, &cset->tasks, cg_list) {
88560 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88561diff --git a/kernel/compat.c b/kernel/compat.c
88562index ebb3c36..1df606e 100644
88563--- a/kernel/compat.c
88564+++ b/kernel/compat.c
88565@@ -13,6 +13,7 @@
88566
88567 #include <linux/linkage.h>
88568 #include <linux/compat.h>
88569+#include <linux/module.h>
88570 #include <linux/errno.h>
88571 #include <linux/time.h>
88572 #include <linux/signal.h>
88573@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88574 mm_segment_t oldfs;
88575 long ret;
88576
88577- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88578+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88579 oldfs = get_fs();
88580 set_fs(KERNEL_DS);
88581 ret = hrtimer_nanosleep_restart(restart);
88582@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88583 oldfs = get_fs();
88584 set_fs(KERNEL_DS);
88585 ret = hrtimer_nanosleep(&tu,
88586- rmtp ? (struct timespec __user *)&rmt : NULL,
88587+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88588 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88589 set_fs(oldfs);
88590
88591@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88592 mm_segment_t old_fs = get_fs();
88593
88594 set_fs(KERNEL_DS);
88595- ret = sys_sigpending((old_sigset_t __user *) &s);
88596+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88597 set_fs(old_fs);
88598 if (ret == 0)
88599 ret = put_user(s, set);
88600@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88601 mm_segment_t old_fs = get_fs();
88602
88603 set_fs(KERNEL_DS);
88604- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88605+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88606 set_fs(old_fs);
88607
88608 if (!ret) {
88609@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88610 set_fs (KERNEL_DS);
88611 ret = sys_wait4(pid,
88612 (stat_addr ?
88613- (unsigned int __user *) &status : NULL),
88614- options, (struct rusage __user *) &r);
88615+ (unsigned int __force_user *) &status : NULL),
88616+ options, (struct rusage __force_user *) &r);
88617 set_fs (old_fs);
88618
88619 if (ret > 0) {
88620@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88621 memset(&info, 0, sizeof(info));
88622
88623 set_fs(KERNEL_DS);
88624- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88625- uru ? (struct rusage __user *)&ru : NULL);
88626+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88627+ uru ? (struct rusage __force_user *)&ru : NULL);
88628 set_fs(old_fs);
88629
88630 if ((ret < 0) || (info.si_signo == 0))
88631@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88632 oldfs = get_fs();
88633 set_fs(KERNEL_DS);
88634 err = sys_timer_settime(timer_id, flags,
88635- (struct itimerspec __user *) &newts,
88636- (struct itimerspec __user *) &oldts);
88637+ (struct itimerspec __force_user *) &newts,
88638+ (struct itimerspec __force_user *) &oldts);
88639 set_fs(oldfs);
88640 if (!err && old && put_compat_itimerspec(old, &oldts))
88641 return -EFAULT;
88642@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88643 oldfs = get_fs();
88644 set_fs(KERNEL_DS);
88645 err = sys_timer_gettime(timer_id,
88646- (struct itimerspec __user *) &ts);
88647+ (struct itimerspec __force_user *) &ts);
88648 set_fs(oldfs);
88649 if (!err && put_compat_itimerspec(setting, &ts))
88650 return -EFAULT;
88651@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88652 oldfs = get_fs();
88653 set_fs(KERNEL_DS);
88654 err = sys_clock_settime(which_clock,
88655- (struct timespec __user *) &ts);
88656+ (struct timespec __force_user *) &ts);
88657 set_fs(oldfs);
88658 return err;
88659 }
88660@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88661 oldfs = get_fs();
88662 set_fs(KERNEL_DS);
88663 err = sys_clock_gettime(which_clock,
88664- (struct timespec __user *) &ts);
88665+ (struct timespec __force_user *) &ts);
88666 set_fs(oldfs);
88667 if (!err && compat_put_timespec(&ts, tp))
88668 return -EFAULT;
88669@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88670
88671 oldfs = get_fs();
88672 set_fs(KERNEL_DS);
88673- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88674+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88675 set_fs(oldfs);
88676
88677 err = compat_put_timex(utp, &txc);
88678@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88679 oldfs = get_fs();
88680 set_fs(KERNEL_DS);
88681 err = sys_clock_getres(which_clock,
88682- (struct timespec __user *) &ts);
88683+ (struct timespec __force_user *) &ts);
88684 set_fs(oldfs);
88685 if (!err && tp && compat_put_timespec(&ts, tp))
88686 return -EFAULT;
88687@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88688 struct timespec tu;
88689 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88690
88691- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88692+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88693 oldfs = get_fs();
88694 set_fs(KERNEL_DS);
88695 err = clock_nanosleep_restart(restart);
88696@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88697 oldfs = get_fs();
88698 set_fs(KERNEL_DS);
88699 err = sys_clock_nanosleep(which_clock, flags,
88700- (struct timespec __user *) &in,
88701- (struct timespec __user *) &out);
88702+ (struct timespec __force_user *) &in,
88703+ (struct timespec __force_user *) &out);
88704 set_fs(oldfs);
88705
88706 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88707@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88708 mm_segment_t old_fs = get_fs();
88709
88710 set_fs(KERNEL_DS);
88711- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88712+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88713 set_fs(old_fs);
88714 if (compat_put_timespec(&t, interval))
88715 return -EFAULT;
88716diff --git a/kernel/configs.c b/kernel/configs.c
88717index c18b1f1..b9a0132 100644
88718--- a/kernel/configs.c
88719+++ b/kernel/configs.c
88720@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88721 struct proc_dir_entry *entry;
88722
88723 /* create the current config file */
88724+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88726+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88727+ &ikconfig_file_ops);
88728+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88729+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88730+ &ikconfig_file_ops);
88731+#endif
88732+#else
88733 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88734 &ikconfig_file_ops);
88735+#endif
88736+
88737 if (!entry)
88738 return -ENOMEM;
88739
88740diff --git a/kernel/cred.c b/kernel/cred.c
88741index e0573a4..26c0fd3 100644
88742--- a/kernel/cred.c
88743+++ b/kernel/cred.c
88744@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88745 validate_creds(cred);
88746 alter_cred_subscribers(cred, -1);
88747 put_cred(cred);
88748+
88749+#ifdef CONFIG_GRKERNSEC_SETXID
88750+ cred = (struct cred *) tsk->delayed_cred;
88751+ if (cred != NULL) {
88752+ tsk->delayed_cred = NULL;
88753+ validate_creds(cred);
88754+ alter_cred_subscribers(cred, -1);
88755+ put_cred(cred);
88756+ }
88757+#endif
88758 }
88759
88760 /**
88761@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88762 * Always returns 0 thus allowing this function to be tail-called at the end
88763 * of, say, sys_setgid().
88764 */
88765-int commit_creds(struct cred *new)
88766+static int __commit_creds(struct cred *new)
88767 {
88768 struct task_struct *task = current;
88769 const struct cred *old = task->real_cred;
88770@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88771
88772 get_cred(new); /* we will require a ref for the subj creds too */
88773
88774+ gr_set_role_label(task, new->uid, new->gid);
88775+
88776 /* dumpability changes */
88777 if (!uid_eq(old->euid, new->euid) ||
88778 !gid_eq(old->egid, new->egid) ||
88779@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88780 put_cred(old);
88781 return 0;
88782 }
88783+#ifdef CONFIG_GRKERNSEC_SETXID
88784+extern int set_user(struct cred *new);
88785+
88786+void gr_delayed_cred_worker(void)
88787+{
88788+ const struct cred *new = current->delayed_cred;
88789+ struct cred *ncred;
88790+
88791+ current->delayed_cred = NULL;
88792+
88793+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88794+ // from doing get_cred on it when queueing this
88795+ put_cred(new);
88796+ return;
88797+ } else if (new == NULL)
88798+ return;
88799+
88800+ ncred = prepare_creds();
88801+ if (!ncred)
88802+ goto die;
88803+ // uids
88804+ ncred->uid = new->uid;
88805+ ncred->euid = new->euid;
88806+ ncred->suid = new->suid;
88807+ ncred->fsuid = new->fsuid;
88808+ // gids
88809+ ncred->gid = new->gid;
88810+ ncred->egid = new->egid;
88811+ ncred->sgid = new->sgid;
88812+ ncred->fsgid = new->fsgid;
88813+ // groups
88814+ set_groups(ncred, new->group_info);
88815+ // caps
88816+ ncred->securebits = new->securebits;
88817+ ncred->cap_inheritable = new->cap_inheritable;
88818+ ncred->cap_permitted = new->cap_permitted;
88819+ ncred->cap_effective = new->cap_effective;
88820+ ncred->cap_bset = new->cap_bset;
88821+
88822+ if (set_user(ncred)) {
88823+ abort_creds(ncred);
88824+ goto die;
88825+ }
88826+
88827+ // from doing get_cred on it when queueing this
88828+ put_cred(new);
88829+
88830+ __commit_creds(ncred);
88831+ return;
88832+die:
88833+ // from doing get_cred on it when queueing this
88834+ put_cred(new);
88835+ do_group_exit(SIGKILL);
88836+}
88837+#endif
88838+
88839+int commit_creds(struct cred *new)
88840+{
88841+#ifdef CONFIG_GRKERNSEC_SETXID
88842+ int ret;
88843+ int schedule_it = 0;
88844+ struct task_struct *t;
88845+ unsigned oldsecurebits = current_cred()->securebits;
88846+
88847+ /* we won't get called with tasklist_lock held for writing
88848+ and interrupts disabled as the cred struct in that case is
88849+ init_cred
88850+ */
88851+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88852+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88853+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88854+ schedule_it = 1;
88855+ }
88856+ ret = __commit_creds(new);
88857+ if (schedule_it) {
88858+ rcu_read_lock();
88859+ read_lock(&tasklist_lock);
88860+ for (t = next_thread(current); t != current;
88861+ t = next_thread(t)) {
88862+ /* we'll check if the thread has uid 0 in
88863+ * the delayed worker routine
88864+ */
88865+ if (task_securebits(t) == oldsecurebits &&
88866+ t->delayed_cred == NULL) {
88867+ t->delayed_cred = get_cred(new);
88868+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88869+ set_tsk_need_resched(t);
88870+ }
88871+ }
88872+ read_unlock(&tasklist_lock);
88873+ rcu_read_unlock();
88874+ }
88875+
88876+ return ret;
88877+#else
88878+ return __commit_creds(new);
88879+#endif
88880+}
88881+
88882 EXPORT_SYMBOL(commit_creds);
88883
88884 /**
88885diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88886index ac5c0f9..4b1c6c2 100644
88887--- a/kernel/debug/debug_core.c
88888+++ b/kernel/debug/debug_core.c
88889@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88890 */
88891 static atomic_t masters_in_kgdb;
88892 static atomic_t slaves_in_kgdb;
88893-static atomic_t kgdb_break_tasklet_var;
88894+static atomic_unchecked_t kgdb_break_tasklet_var;
88895 atomic_t kgdb_setting_breakpoint;
88896
88897 struct task_struct *kgdb_usethread;
88898@@ -137,7 +137,7 @@ int kgdb_single_step;
88899 static pid_t kgdb_sstep_pid;
88900
88901 /* to keep track of the CPU which is doing the single stepping*/
88902-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88903+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88904
88905 /*
88906 * If you are debugging a problem where roundup (the collection of
88907@@ -552,7 +552,7 @@ return_normal:
88908 * kernel will only try for the value of sstep_tries before
88909 * giving up and continuing on.
88910 */
88911- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88912+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88913 (kgdb_info[cpu].task &&
88914 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88915 atomic_set(&kgdb_active, -1);
88916@@ -654,8 +654,8 @@ cpu_master_loop:
88917 }
88918
88919 kgdb_restore:
88920- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88921- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88922+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88923+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88924 if (kgdb_info[sstep_cpu].task)
88925 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88926 else
88927@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88928 static void kgdb_tasklet_bpt(unsigned long ing)
88929 {
88930 kgdb_breakpoint();
88931- atomic_set(&kgdb_break_tasklet_var, 0);
88932+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88933 }
88934
88935 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88936
88937 void kgdb_schedule_breakpoint(void)
88938 {
88939- if (atomic_read(&kgdb_break_tasklet_var) ||
88940+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88941 atomic_read(&kgdb_active) != -1 ||
88942 atomic_read(&kgdb_setting_breakpoint))
88943 return;
88944- atomic_inc(&kgdb_break_tasklet_var);
88945+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88946 tasklet_schedule(&kgdb_tasklet_breakpoint);
88947 }
88948 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88949diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88950index 60f6bb8..104bb07 100644
88951--- a/kernel/debug/kdb/kdb_main.c
88952+++ b/kernel/debug/kdb/kdb_main.c
88953@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88954 continue;
88955
88956 kdb_printf("%-20s%8u 0x%p ", mod->name,
88957- mod->core_size, (void *)mod);
88958+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88959 #ifdef CONFIG_MODULE_UNLOAD
88960 kdb_printf("%4d ", module_refcount(mod));
88961 #endif
88962@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88963 kdb_printf(" (Loading)");
88964 else
88965 kdb_printf(" (Live)");
88966- kdb_printf(" 0x%p", mod->module_core);
88967+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88968
88969 #ifdef CONFIG_MODULE_UNLOAD
88970 {
88971diff --git a/kernel/events/core.c b/kernel/events/core.c
88972index 7959624..c01b886 100644
88973--- a/kernel/events/core.c
88974+++ b/kernel/events/core.c
88975@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88976 * 0 - disallow raw tracepoint access for unpriv
88977 * 1 - disallow cpu events for unpriv
88978 * 2 - disallow kernel profiling for unpriv
88979+ * 3 - disallow all unpriv perf event use
88980 */
88981-int sysctl_perf_event_paranoid __read_mostly = 1;
88982+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88983+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88984+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88985+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88986+#else
88987+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88988+#endif
88989
88990 /* Minimum for 512 kiB + 1 user control page */
88991 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88992@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88993
88994 tmp *= sysctl_perf_cpu_time_max_percent;
88995 do_div(tmp, 100);
88996- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88997+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88998 }
88999
89000 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
89001@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
89002 }
89003 }
89004
89005-static atomic64_t perf_event_id;
89006+static atomic64_unchecked_t perf_event_id;
89007
89008 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
89009 enum event_type_t event_type);
89010@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
89011
89012 static inline u64 perf_event_count(struct perf_event *event)
89013 {
89014- return local64_read(&event->count) + atomic64_read(&event->child_count);
89015+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
89016 }
89017
89018 static u64 perf_event_read(struct perf_event *event)
89019@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
89020 mutex_lock(&event->child_mutex);
89021 total += perf_event_read(event);
89022 *enabled += event->total_time_enabled +
89023- atomic64_read(&event->child_total_time_enabled);
89024+ atomic64_read_unchecked(&event->child_total_time_enabled);
89025 *running += event->total_time_running +
89026- atomic64_read(&event->child_total_time_running);
89027+ atomic64_read_unchecked(&event->child_total_time_running);
89028
89029 list_for_each_entry(child, &event->child_list, child_list) {
89030 total += perf_event_read(child);
89031@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
89032 userpg->offset -= local64_read(&event->hw.prev_count);
89033
89034 userpg->time_enabled = enabled +
89035- atomic64_read(&event->child_total_time_enabled);
89036+ atomic64_read_unchecked(&event->child_total_time_enabled);
89037
89038 userpg->time_running = running +
89039- atomic64_read(&event->child_total_time_running);
89040+ atomic64_read_unchecked(&event->child_total_time_running);
89041
89042 arch_perf_update_userpage(userpg, now);
89043
89044@@ -4578,7 +4585,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
89045
89046 /* Data. */
89047 sp = perf_user_stack_pointer(regs);
89048- rem = __output_copy_user(handle, (void *) sp, dump_size);
89049+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
89050 dyn_size = dump_size - rem;
89051
89052 perf_output_skip(handle, rem);
89053@@ -4669,11 +4676,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
89054 values[n++] = perf_event_count(event);
89055 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
89056 values[n++] = enabled +
89057- atomic64_read(&event->child_total_time_enabled);
89058+ atomic64_read_unchecked(&event->child_total_time_enabled);
89059 }
89060 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89061 values[n++] = running +
89062- atomic64_read(&event->child_total_time_running);
89063+ atomic64_read_unchecked(&event->child_total_time_running);
89064 }
89065 if (read_format & PERF_FORMAT_ID)
89066 values[n++] = primary_event_id(event);
89067@@ -7004,7 +7011,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89068 event->parent = parent_event;
89069
89070 event->ns = get_pid_ns(task_active_pid_ns(current));
89071- event->id = atomic64_inc_return(&perf_event_id);
89072+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89073
89074 event->state = PERF_EVENT_STATE_INACTIVE;
89075
89076@@ -7285,6 +7292,11 @@ SYSCALL_DEFINE5(perf_event_open,
89077 if (flags & ~PERF_FLAG_ALL)
89078 return -EINVAL;
89079
89080+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89081+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89082+ return -EACCES;
89083+#endif
89084+
89085 err = perf_copy_attr(attr_uptr, &attr);
89086 if (err)
89087 return err;
89088@@ -7652,10 +7664,10 @@ static void sync_child_event(struct perf_event *child_event,
89089 /*
89090 * Add back the child's count to the parent's count:
89091 */
89092- atomic64_add(child_val, &parent_event->child_count);
89093- atomic64_add(child_event->total_time_enabled,
89094+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89095+ atomic64_add_unchecked(child_event->total_time_enabled,
89096 &parent_event->child_total_time_enabled);
89097- atomic64_add(child_event->total_time_running,
89098+ atomic64_add_unchecked(child_event->total_time_running,
89099 &parent_event->child_total_time_running);
89100
89101 /*
89102diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89103index 569b2187..19940d9 100644
89104--- a/kernel/events/internal.h
89105+++ b/kernel/events/internal.h
89106@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89107 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89108 }
89109
89110-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89111+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89112 static inline unsigned long \
89113 func_name(struct perf_output_handle *handle, \
89114- const void *buf, unsigned long len) \
89115+ const void user *buf, unsigned long len) \
89116 { \
89117 unsigned long size, written; \
89118 \
89119@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89120 return 0;
89121 }
89122
89123-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89124+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89125
89126 static inline unsigned long
89127 memcpy_skip(void *dst, const void *src, unsigned long n)
89128@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89129 return 0;
89130 }
89131
89132-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89133+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89134
89135 #ifndef arch_perf_out_copy_user
89136 #define arch_perf_out_copy_user arch_perf_out_copy_user
89137@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89138 }
89139 #endif
89140
89141-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89142+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89143
89144 /* Callchain handling */
89145 extern struct perf_callchain_entry *
89146diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89147index cb346f2..e4dc317 100644
89148--- a/kernel/events/uprobes.c
89149+++ b/kernel/events/uprobes.c
89150@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89151 {
89152 struct page *page;
89153 uprobe_opcode_t opcode;
89154- int result;
89155+ long result;
89156
89157 pagefault_disable();
89158 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89159diff --git a/kernel/exit.c b/kernel/exit.c
89160index 6806c55..a5fb128 100644
89161--- a/kernel/exit.c
89162+++ b/kernel/exit.c
89163@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
89164 struct task_struct *leader;
89165 int zap_leader;
89166 repeat:
89167+#ifdef CONFIG_NET
89168+ gr_del_task_from_ip_table(p);
89169+#endif
89170+
89171 /* don't need to get the RCU readlock here - the process is dead and
89172 * can't be modifying its own credentials. But shut RCU-lockdep up */
89173 rcu_read_lock();
89174@@ -655,6 +659,8 @@ void do_exit(long code)
89175 int group_dead;
89176 TASKS_RCU(int tasks_rcu_i);
89177
89178+ set_fs(USER_DS);
89179+
89180 profile_task_exit(tsk);
89181
89182 WARN_ON(blk_needs_flush_plug(tsk));
89183@@ -671,7 +677,6 @@ void do_exit(long code)
89184 * mm_release()->clear_child_tid() from writing to a user-controlled
89185 * kernel address.
89186 */
89187- set_fs(USER_DS);
89188
89189 ptrace_event(PTRACE_EVENT_EXIT, code);
89190
89191@@ -729,6 +734,9 @@ void do_exit(long code)
89192 tsk->exit_code = code;
89193 taskstats_exit(tsk, group_dead);
89194
89195+ gr_acl_handle_psacct(tsk, code);
89196+ gr_acl_handle_exit();
89197+
89198 exit_mm(tsk);
89199
89200 if (group_dead)
89201@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89202 * Take down every thread in the group. This is called by fatal signals
89203 * as well as by sys_exit_group (below).
89204 */
89205-void
89206+__noreturn void
89207 do_group_exit(int exit_code)
89208 {
89209 struct signal_struct *sig = current->signal;
89210diff --git a/kernel/fork.c b/kernel/fork.c
89211index 4dc2dda..651add0 100644
89212--- a/kernel/fork.c
89213+++ b/kernel/fork.c
89214@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
89215 void thread_info_cache_init(void)
89216 {
89217 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
89218- THREAD_SIZE, 0, NULL);
89219+ THREAD_SIZE, SLAB_USERCOPY, NULL);
89220 BUG_ON(thread_info_cache == NULL);
89221 }
89222 # endif
89223 #endif
89224
89225+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89226+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89227+ int node, void **lowmem_stack)
89228+{
89229+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89230+ void *ret = NULL;
89231+ unsigned int i;
89232+
89233+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89234+ if (*lowmem_stack == NULL)
89235+ goto out;
89236+
89237+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89238+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89239+
89240+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89241+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89242+ if (ret == NULL) {
89243+ free_thread_info(*lowmem_stack);
89244+ *lowmem_stack = NULL;
89245+ }
89246+
89247+out:
89248+ return ret;
89249+}
89250+
89251+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89252+{
89253+ unmap_process_stacks(tsk);
89254+}
89255+#else
89256+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89257+ int node, void **lowmem_stack)
89258+{
89259+ return alloc_thread_info_node(tsk, node);
89260+}
89261+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89262+{
89263+ free_thread_info(ti);
89264+}
89265+#endif
89266+
89267 /* SLAB cache for signal_struct structures (tsk->signal) */
89268 static struct kmem_cache *signal_cachep;
89269
89270@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89271 /* SLAB cache for mm_struct structures (tsk->mm) */
89272 static struct kmem_cache *mm_cachep;
89273
89274-static void account_kernel_stack(struct thread_info *ti, int account)
89275+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89276 {
89277+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89278+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89279+#else
89280 struct zone *zone = page_zone(virt_to_page(ti));
89281+#endif
89282
89283 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89284 }
89285
89286 void free_task(struct task_struct *tsk)
89287 {
89288- account_kernel_stack(tsk->stack, -1);
89289+ account_kernel_stack(tsk, tsk->stack, -1);
89290 arch_release_thread_info(tsk->stack);
89291- free_thread_info(tsk->stack);
89292+ gr_free_thread_info(tsk, tsk->stack);
89293 rt_mutex_debug_task_free(tsk);
89294 ftrace_graph_exit_task(tsk);
89295 put_seccomp_filter(tsk);
89296@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89297 {
89298 struct task_struct *tsk;
89299 struct thread_info *ti;
89300+ void *lowmem_stack;
89301 int node = tsk_fork_get_node(orig);
89302 int err;
89303
89304@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89305 if (!tsk)
89306 return NULL;
89307
89308- ti = alloc_thread_info_node(tsk, node);
89309+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89310 if (!ti)
89311 goto free_tsk;
89312
89313@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89314 goto free_ti;
89315
89316 tsk->stack = ti;
89317+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89318+ tsk->lowmem_stack = lowmem_stack;
89319+#endif
89320 #ifdef CONFIG_SECCOMP
89321 /*
89322 * We must handle setting up seccomp filters once we're under
89323@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89324 set_task_stack_end_magic(tsk);
89325
89326 #ifdef CONFIG_CC_STACKPROTECTOR
89327- tsk->stack_canary = get_random_int();
89328+ tsk->stack_canary = pax_get_random_long();
89329 #endif
89330
89331 /*
89332@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89333 tsk->splice_pipe = NULL;
89334 tsk->task_frag.page = NULL;
89335
89336- account_kernel_stack(ti, 1);
89337+ account_kernel_stack(tsk, ti, 1);
89338
89339 return tsk;
89340
89341 free_ti:
89342- free_thread_info(ti);
89343+ gr_free_thread_info(tsk, ti);
89344 free_tsk:
89345 free_task_struct(tsk);
89346 return NULL;
89347 }
89348
89349 #ifdef CONFIG_MMU
89350-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89351+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89352+{
89353+ struct vm_area_struct *tmp;
89354+ unsigned long charge;
89355+ struct file *file;
89356+ int retval;
89357+
89358+ charge = 0;
89359+ if (mpnt->vm_flags & VM_ACCOUNT) {
89360+ unsigned long len = vma_pages(mpnt);
89361+
89362+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89363+ goto fail_nomem;
89364+ charge = len;
89365+ }
89366+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89367+ if (!tmp)
89368+ goto fail_nomem;
89369+ *tmp = *mpnt;
89370+ tmp->vm_mm = mm;
89371+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89372+ retval = vma_dup_policy(mpnt, tmp);
89373+ if (retval)
89374+ goto fail_nomem_policy;
89375+ if (anon_vma_fork(tmp, mpnt))
89376+ goto fail_nomem_anon_vma_fork;
89377+ tmp->vm_flags &= ~VM_LOCKED;
89378+ tmp->vm_next = tmp->vm_prev = NULL;
89379+ tmp->vm_mirror = NULL;
89380+ file = tmp->vm_file;
89381+ if (file) {
89382+ struct inode *inode = file_inode(file);
89383+ struct address_space *mapping = file->f_mapping;
89384+
89385+ get_file(file);
89386+ if (tmp->vm_flags & VM_DENYWRITE)
89387+ atomic_dec(&inode->i_writecount);
89388+ i_mmap_lock_write(mapping);
89389+ if (tmp->vm_flags & VM_SHARED)
89390+ atomic_inc(&mapping->i_mmap_writable);
89391+ flush_dcache_mmap_lock(mapping);
89392+ /* insert tmp into the share list, just after mpnt */
89393+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89394+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89395+ else
89396+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89397+ flush_dcache_mmap_unlock(mapping);
89398+ i_mmap_unlock_write(mapping);
89399+ }
89400+
89401+ /*
89402+ * Clear hugetlb-related page reserves for children. This only
89403+ * affects MAP_PRIVATE mappings. Faults generated by the child
89404+ * are not guaranteed to succeed, even if read-only
89405+ */
89406+ if (is_vm_hugetlb_page(tmp))
89407+ reset_vma_resv_huge_pages(tmp);
89408+
89409+ return tmp;
89410+
89411+fail_nomem_anon_vma_fork:
89412+ mpol_put(vma_policy(tmp));
89413+fail_nomem_policy:
89414+ kmem_cache_free(vm_area_cachep, tmp);
89415+fail_nomem:
89416+ vm_unacct_memory(charge);
89417+ return NULL;
89418+}
89419+
89420+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89421 {
89422 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89423 struct rb_node **rb_link, *rb_parent;
89424 int retval;
89425- unsigned long charge;
89426
89427 uprobe_start_dup_mmap();
89428 down_write(&oldmm->mmap_sem);
89429@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89430
89431 prev = NULL;
89432 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89433- struct file *file;
89434-
89435 if (mpnt->vm_flags & VM_DONTCOPY) {
89436 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89437 -vma_pages(mpnt));
89438 continue;
89439 }
89440- charge = 0;
89441- if (mpnt->vm_flags & VM_ACCOUNT) {
89442- unsigned long len = vma_pages(mpnt);
89443-
89444- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89445- goto fail_nomem;
89446- charge = len;
89447- }
89448- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89449- if (!tmp)
89450- goto fail_nomem;
89451- *tmp = *mpnt;
89452- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89453- retval = vma_dup_policy(mpnt, tmp);
89454- if (retval)
89455- goto fail_nomem_policy;
89456- tmp->vm_mm = mm;
89457- if (anon_vma_fork(tmp, mpnt))
89458- goto fail_nomem_anon_vma_fork;
89459- tmp->vm_flags &= ~VM_LOCKED;
89460- tmp->vm_next = tmp->vm_prev = NULL;
89461- file = tmp->vm_file;
89462- if (file) {
89463- struct inode *inode = file_inode(file);
89464- struct address_space *mapping = file->f_mapping;
89465-
89466- get_file(file);
89467- if (tmp->vm_flags & VM_DENYWRITE)
89468- atomic_dec(&inode->i_writecount);
89469- i_mmap_lock_write(mapping);
89470- if (tmp->vm_flags & VM_SHARED)
89471- atomic_inc(&mapping->i_mmap_writable);
89472- flush_dcache_mmap_lock(mapping);
89473- /* insert tmp into the share list, just after mpnt */
89474- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89475- vma_nonlinear_insert(tmp,
89476- &mapping->i_mmap_nonlinear);
89477- else
89478- vma_interval_tree_insert_after(tmp, mpnt,
89479- &mapping->i_mmap);
89480- flush_dcache_mmap_unlock(mapping);
89481- i_mmap_unlock_write(mapping);
89482+ tmp = dup_vma(mm, oldmm, mpnt);
89483+ if (!tmp) {
89484+ retval = -ENOMEM;
89485+ goto out;
89486 }
89487
89488 /*
89489@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89490 if (retval)
89491 goto out;
89492 }
89493+
89494+#ifdef CONFIG_PAX_SEGMEXEC
89495+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89496+ struct vm_area_struct *mpnt_m;
89497+
89498+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89499+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89500+
89501+ if (!mpnt->vm_mirror)
89502+ continue;
89503+
89504+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89505+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89506+ mpnt->vm_mirror = mpnt_m;
89507+ } else {
89508+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89509+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89510+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89511+ mpnt->vm_mirror->vm_mirror = mpnt;
89512+ }
89513+ }
89514+ BUG_ON(mpnt_m);
89515+ }
89516+#endif
89517+
89518 /* a new mm has just been created */
89519 arch_dup_mmap(oldmm, mm);
89520 retval = 0;
89521@@ -486,14 +589,6 @@ out:
89522 up_write(&oldmm->mmap_sem);
89523 uprobe_end_dup_mmap();
89524 return retval;
89525-fail_nomem_anon_vma_fork:
89526- mpol_put(vma_policy(tmp));
89527-fail_nomem_policy:
89528- kmem_cache_free(vm_area_cachep, tmp);
89529-fail_nomem:
89530- retval = -ENOMEM;
89531- vm_unacct_memory(charge);
89532- goto out;
89533 }
89534
89535 static inline int mm_alloc_pgd(struct mm_struct *mm)
89536@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89537 return ERR_PTR(err);
89538
89539 mm = get_task_mm(task);
89540- if (mm && mm != current->mm &&
89541- !ptrace_may_access(task, mode)) {
89542+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89543+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89544 mmput(mm);
89545 mm = ERR_PTR(-EACCES);
89546 }
89547@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89548 spin_unlock(&fs->lock);
89549 return -EAGAIN;
89550 }
89551- fs->users++;
89552+ atomic_inc(&fs->users);
89553 spin_unlock(&fs->lock);
89554 return 0;
89555 }
89556 tsk->fs = copy_fs_struct(fs);
89557 if (!tsk->fs)
89558 return -ENOMEM;
89559+ /* Carry through gr_chroot_dentry and is_chrooted instead
89560+ of recomputing it here. Already copied when the task struct
89561+ is duplicated. This allows pivot_root to not be treated as
89562+ a chroot
89563+ */
89564+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89565+
89566 return 0;
89567 }
89568
89569@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89570 * parts of the process environment (as per the clone
89571 * flags). The actual kick-off is left to the caller.
89572 */
89573-static struct task_struct *copy_process(unsigned long clone_flags,
89574+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89575 unsigned long stack_start,
89576 unsigned long stack_size,
89577 int __user *child_tidptr,
89578@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89579 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89580 #endif
89581 retval = -EAGAIN;
89582+
89583+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89584+
89585 if (atomic_read(&p->real_cred->user->processes) >=
89586 task_rlimit(p, RLIMIT_NPROC)) {
89587 if (p->real_cred->user != INIT_USER &&
89588@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89589 goto bad_fork_free_pid;
89590 }
89591
89592+ /* synchronizes with gr_set_acls()
89593+ we need to call this past the point of no return for fork()
89594+ */
89595+ gr_copy_label(p);
89596+
89597 if (likely(p->pid)) {
89598 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89599
89600@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89601 bad_fork_free:
89602 free_task(p);
89603 fork_out:
89604+ gr_log_forkfail(retval);
89605+
89606 return ERR_PTR(retval);
89607 }
89608
89609@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89610
89611 p = copy_process(clone_flags, stack_start, stack_size,
89612 child_tidptr, NULL, trace);
89613+ add_latent_entropy();
89614 /*
89615 * Do this prior waking up the new thread - the thread pointer
89616 * might get invalid after that point, if the thread exits quickly.
89617@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89618 if (clone_flags & CLONE_PARENT_SETTID)
89619 put_user(nr, parent_tidptr);
89620
89621+ gr_handle_brute_check();
89622+
89623 if (clone_flags & CLONE_VFORK) {
89624 p->vfork_done = &vfork;
89625 init_completion(&vfork);
89626@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89627 mm_cachep = kmem_cache_create("mm_struct",
89628 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89629 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89630- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89631+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89632 mmap_init();
89633 nsproxy_cache_init();
89634 }
89635@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89636 return 0;
89637
89638 /* don't need lock here; in the worst case we'll do useless copy */
89639- if (fs->users == 1)
89640+ if (atomic_read(&fs->users) == 1)
89641 return 0;
89642
89643 *new_fsp = copy_fs_struct(fs);
89644@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89645 fs = current->fs;
89646 spin_lock(&fs->lock);
89647 current->fs = new_fs;
89648- if (--fs->users)
89649+ gr_set_chroot_entries(current, &current->fs->root);
89650+ if (atomic_dec_return(&fs->users))
89651 new_fs = NULL;
89652 else
89653 new_fs = fs;
89654diff --git a/kernel/futex.c b/kernel/futex.c
89655index 63678b5..512f9af 100644
89656--- a/kernel/futex.c
89657+++ b/kernel/futex.c
89658@@ -201,7 +201,7 @@ struct futex_pi_state {
89659 atomic_t refcount;
89660
89661 union futex_key key;
89662-};
89663+} __randomize_layout;
89664
89665 /**
89666 * struct futex_q - The hashed futex queue entry, one per waiting task
89667@@ -235,7 +235,7 @@ struct futex_q {
89668 struct rt_mutex_waiter *rt_waiter;
89669 union futex_key *requeue_pi_key;
89670 u32 bitset;
89671-};
89672+} __randomize_layout;
89673
89674 static const struct futex_q futex_q_init = {
89675 /* list gets initialized in queue_me()*/
89676@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89677 struct page *page, *page_head;
89678 int err, ro = 0;
89679
89680+#ifdef CONFIG_PAX_SEGMEXEC
89681+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89682+ return -EFAULT;
89683+#endif
89684+
89685 /*
89686 * The futex address must be "naturally" aligned.
89687 */
89688@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89689
89690 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89691 {
89692- int ret;
89693+ unsigned long ret;
89694
89695 pagefault_disable();
89696 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89697@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89698 {
89699 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89700 u32 curval;
89701+ mm_segment_t oldfs;
89702
89703 /*
89704 * This will fail and we want it. Some arch implementations do
89705@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89706 * implementation, the non-functional ones will return
89707 * -ENOSYS.
89708 */
89709+ oldfs = get_fs();
89710+ set_fs(USER_DS);
89711 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89712 futex_cmpxchg_enabled = 1;
89713+ set_fs(oldfs);
89714 #endif
89715 }
89716
89717diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89718index 55c8c93..9ba7ad6 100644
89719--- a/kernel/futex_compat.c
89720+++ b/kernel/futex_compat.c
89721@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89722 return 0;
89723 }
89724
89725-static void __user *futex_uaddr(struct robust_list __user *entry,
89726+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89727 compat_long_t futex_offset)
89728 {
89729 compat_uptr_t base = ptr_to_compat(entry);
89730diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89731index b358a80..fc25240 100644
89732--- a/kernel/gcov/base.c
89733+++ b/kernel/gcov/base.c
89734@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89735 }
89736
89737 #ifdef CONFIG_MODULES
89738-static inline int within(void *addr, void *start, unsigned long size)
89739-{
89740- return ((addr >= start) && (addr < start + size));
89741-}
89742-
89743 /* Update list and generate events when modules are unloaded. */
89744 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89745 void *data)
89746@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89747
89748 /* Remove entries located in module from linked list. */
89749 while ((info = gcov_info_next(info))) {
89750- if (within(info, mod->module_core, mod->core_size)) {
89751+ if (within_module_core_rw((unsigned long)info, mod)) {
89752 gcov_info_unlink(prev, info);
89753 if (gcov_events_enabled)
89754 gcov_event(GCOV_REMOVE, info);
89755diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89756index 8069237..fe712d0 100644
89757--- a/kernel/irq/manage.c
89758+++ b/kernel/irq/manage.c
89759@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89760
89761 action_ret = handler_fn(desc, action);
89762 if (action_ret == IRQ_HANDLED)
89763- atomic_inc(&desc->threads_handled);
89764+ atomic_inc_unchecked(&desc->threads_handled);
89765
89766 wake_threads_waitq(desc);
89767 }
89768diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89769index e2514b0..de3dfe0 100644
89770--- a/kernel/irq/spurious.c
89771+++ b/kernel/irq/spurious.c
89772@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89773 * count. We just care about the count being
89774 * different than the one we saw before.
89775 */
89776- handled = atomic_read(&desc->threads_handled);
89777+ handled = atomic_read_unchecked(&desc->threads_handled);
89778 handled |= SPURIOUS_DEFERRED;
89779 if (handled != desc->threads_handled_last) {
89780 action_ret = IRQ_HANDLED;
89781diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89782index 9019f15..9a3c42e 100644
89783--- a/kernel/jump_label.c
89784+++ b/kernel/jump_label.c
89785@@ -14,6 +14,7 @@
89786 #include <linux/err.h>
89787 #include <linux/static_key.h>
89788 #include <linux/jump_label_ratelimit.h>
89789+#include <linux/mm.h>
89790
89791 #ifdef HAVE_JUMP_LABEL
89792
89793@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89794
89795 size = (((unsigned long)stop - (unsigned long)start)
89796 / sizeof(struct jump_entry));
89797+ pax_open_kernel();
89798 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89799+ pax_close_kernel();
89800 }
89801
89802 static void jump_label_update(struct static_key *key, int enable);
89803@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89804 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89805 struct jump_entry *iter;
89806
89807+ pax_open_kernel();
89808 for (iter = iter_start; iter < iter_stop; iter++) {
89809 if (within_module_init(iter->code, mod))
89810 iter->code = 0;
89811 }
89812+ pax_close_kernel();
89813 }
89814
89815 static int
89816diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89817index 5c5987f..bc502b0 100644
89818--- a/kernel/kallsyms.c
89819+++ b/kernel/kallsyms.c
89820@@ -11,6 +11,9 @@
89821 * Changed the compression method from stem compression to "table lookup"
89822 * compression (see scripts/kallsyms.c for a more complete description)
89823 */
89824+#ifdef CONFIG_GRKERNSEC_HIDESYM
89825+#define __INCLUDED_BY_HIDESYM 1
89826+#endif
89827 #include <linux/kallsyms.h>
89828 #include <linux/module.h>
89829 #include <linux/init.h>
89830@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89831
89832 static inline int is_kernel_inittext(unsigned long addr)
89833 {
89834+ if (system_state != SYSTEM_BOOTING)
89835+ return 0;
89836+
89837 if (addr >= (unsigned long)_sinittext
89838 && addr <= (unsigned long)_einittext)
89839 return 1;
89840 return 0;
89841 }
89842
89843+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89844+#ifdef CONFIG_MODULES
89845+static inline int is_module_text(unsigned long addr)
89846+{
89847+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89848+ return 1;
89849+
89850+ addr = ktla_ktva(addr);
89851+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89852+}
89853+#else
89854+static inline int is_module_text(unsigned long addr)
89855+{
89856+ return 0;
89857+}
89858+#endif
89859+#endif
89860+
89861 static inline int is_kernel_text(unsigned long addr)
89862 {
89863 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89864@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89865
89866 static inline int is_kernel(unsigned long addr)
89867 {
89868+
89869+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89870+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89871+ return 1;
89872+
89873+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89874+#else
89875 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89876+#endif
89877+
89878 return 1;
89879 return in_gate_area_no_mm(addr);
89880 }
89881
89882 static int is_ksym_addr(unsigned long addr)
89883 {
89884+
89885+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89886+ if (is_module_text(addr))
89887+ return 0;
89888+#endif
89889+
89890 if (all_var)
89891 return is_kernel(addr);
89892
89893@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89894
89895 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89896 {
89897- iter->name[0] = '\0';
89898 iter->nameoff = get_symbol_offset(new_pos);
89899 iter->pos = new_pos;
89900 }
89901@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89902 {
89903 struct kallsym_iter *iter = m->private;
89904
89905+#ifdef CONFIG_GRKERNSEC_HIDESYM
89906+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89907+ return 0;
89908+#endif
89909+
89910 /* Some debugging symbols have no name. Ignore them. */
89911 if (!iter->name[0])
89912 return 0;
89913@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89914 */
89915 type = iter->exported ? toupper(iter->type) :
89916 tolower(iter->type);
89917+
89918 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89919 type, iter->name, iter->module_name);
89920 } else
89921diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89922index 0aa69ea..a7fcafb 100644
89923--- a/kernel/kcmp.c
89924+++ b/kernel/kcmp.c
89925@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89926 struct task_struct *task1, *task2;
89927 int ret;
89928
89929+#ifdef CONFIG_GRKERNSEC
89930+ return -ENOSYS;
89931+#endif
89932+
89933 rcu_read_lock();
89934
89935 /*
89936diff --git a/kernel/kexec.c b/kernel/kexec.c
89937index 9a8a01a..3c35dd6 100644
89938--- a/kernel/kexec.c
89939+++ b/kernel/kexec.c
89940@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89941 compat_ulong_t, flags)
89942 {
89943 struct compat_kexec_segment in;
89944- struct kexec_segment out, __user *ksegments;
89945+ struct kexec_segment out;
89946+ struct kexec_segment __user *ksegments;
89947 unsigned long i, result;
89948
89949 /* Don't allow clients that don't understand the native
89950diff --git a/kernel/kmod.c b/kernel/kmod.c
89951index 2777f40..a689506 100644
89952--- a/kernel/kmod.c
89953+++ b/kernel/kmod.c
89954@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89955 kfree(info->argv);
89956 }
89957
89958-static int call_modprobe(char *module_name, int wait)
89959+static int call_modprobe(char *module_name, char *module_param, int wait)
89960 {
89961 struct subprocess_info *info;
89962 static char *envp[] = {
89963@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89964 NULL
89965 };
89966
89967- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89968+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89969 if (!argv)
89970 goto out;
89971
89972@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89973 argv[1] = "-q";
89974 argv[2] = "--";
89975 argv[3] = module_name; /* check free_modprobe_argv() */
89976- argv[4] = NULL;
89977+ argv[4] = module_param;
89978+ argv[5] = NULL;
89979
89980 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89981 NULL, free_modprobe_argv, NULL);
89982@@ -122,9 +123,8 @@ out:
89983 * If module auto-loading support is disabled then this function
89984 * becomes a no-operation.
89985 */
89986-int __request_module(bool wait, const char *fmt, ...)
89987+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89988 {
89989- va_list args;
89990 char module_name[MODULE_NAME_LEN];
89991 unsigned int max_modprobes;
89992 int ret;
89993@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89994 if (!modprobe_path[0])
89995 return 0;
89996
89997- va_start(args, fmt);
89998- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89999- va_end(args);
90000+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
90001 if (ret >= MODULE_NAME_LEN)
90002 return -ENAMETOOLONG;
90003
90004@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
90005 if (ret)
90006 return ret;
90007
90008+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90009+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90010+ /* hack to workaround consolekit/udisks stupidity */
90011+ read_lock(&tasklist_lock);
90012+ if (!strcmp(current->comm, "mount") &&
90013+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
90014+ read_unlock(&tasklist_lock);
90015+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
90016+ return -EPERM;
90017+ }
90018+ read_unlock(&tasklist_lock);
90019+ }
90020+#endif
90021+
90022 /* If modprobe needs a service that is in a module, we get a recursive
90023 * loop. Limit the number of running kmod threads to max_threads/2 or
90024 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
90025@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
90026
90027 trace_module_request(module_name, wait, _RET_IP_);
90028
90029- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90030+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
90031
90032 atomic_dec(&kmod_concurrent);
90033 return ret;
90034 }
90035+
90036+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
90037+{
90038+ va_list args;
90039+ int ret;
90040+
90041+ va_start(args, fmt);
90042+ ret = ____request_module(wait, module_param, fmt, args);
90043+ va_end(args);
90044+
90045+ return ret;
90046+}
90047+
90048+int __request_module(bool wait, const char *fmt, ...)
90049+{
90050+ va_list args;
90051+ int ret;
90052+
90053+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90054+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
90055+ char module_param[MODULE_NAME_LEN];
90056+
90057+ memset(module_param, 0, sizeof(module_param));
90058+
90059+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
90060+
90061+ va_start(args, fmt);
90062+ ret = ____request_module(wait, module_param, fmt, args);
90063+ va_end(args);
90064+
90065+ return ret;
90066+ }
90067+#endif
90068+
90069+ va_start(args, fmt);
90070+ ret = ____request_module(wait, NULL, fmt, args);
90071+ va_end(args);
90072+
90073+ return ret;
90074+}
90075+
90076 EXPORT_SYMBOL(__request_module);
90077 #endif /* CONFIG_MODULES */
90078
90079 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90080 {
90081+#ifdef CONFIG_GRKERNSEC
90082+ kfree(info->path);
90083+ info->path = info->origpath;
90084+#endif
90085 if (info->cleanup)
90086 (*info->cleanup)(info);
90087 kfree(info);
90088@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
90089 */
90090 set_user_nice(current, 0);
90091
90092+#ifdef CONFIG_GRKERNSEC
90093+ /* this is race-free as far as userland is concerned as we copied
90094+ out the path to be used prior to this point and are now operating
90095+ on that copy
90096+ */
90097+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90098+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90099+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
90100+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90101+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
90102+ retval = -EPERM;
90103+ goto out;
90104+ }
90105+#endif
90106+
90107 retval = -ENOMEM;
90108 new = prepare_kernel_cred(current);
90109 if (!new)
90110@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
90111 commit_creds(new);
90112
90113 retval = do_execve(getname_kernel(sub_info->path),
90114- (const char __user *const __user *)sub_info->argv,
90115- (const char __user *const __user *)sub_info->envp);
90116+ (const char __user *const __force_user *)sub_info->argv,
90117+ (const char __user *const __force_user *)sub_info->envp);
90118 out:
90119 sub_info->retval = retval;
90120 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
90121@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
90122 *
90123 * Thus the __user pointer cast is valid here.
90124 */
90125- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90126+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90127
90128 /*
90129 * If ret is 0, either ____call_usermodehelper failed and the
90130@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90131 goto out;
90132
90133 INIT_WORK(&sub_info->work, __call_usermodehelper);
90134+#ifdef CONFIG_GRKERNSEC
90135+ sub_info->origpath = path;
90136+ sub_info->path = kstrdup(path, gfp_mask);
90137+#else
90138 sub_info->path = path;
90139+#endif
90140 sub_info->argv = argv;
90141 sub_info->envp = envp;
90142
90143@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90144 static int proc_cap_handler(struct ctl_table *table, int write,
90145 void __user *buffer, size_t *lenp, loff_t *ppos)
90146 {
90147- struct ctl_table t;
90148+ ctl_table_no_const t;
90149 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90150 kernel_cap_t new_cap;
90151 int err, i;
90152diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90153index ee61992..62142b1 100644
90154--- a/kernel/kprobes.c
90155+++ b/kernel/kprobes.c
90156@@ -31,6 +31,9 @@
90157 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90158 * <prasanna@in.ibm.com> added function-return probes.
90159 */
90160+#ifdef CONFIG_GRKERNSEC_HIDESYM
90161+#define __INCLUDED_BY_HIDESYM 1
90162+#endif
90163 #include <linux/kprobes.h>
90164 #include <linux/hash.h>
90165 #include <linux/init.h>
90166@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90167
90168 static void *alloc_insn_page(void)
90169 {
90170- return module_alloc(PAGE_SIZE);
90171+ return module_alloc_exec(PAGE_SIZE);
90172 }
90173
90174 static void free_insn_page(void *page)
90175 {
90176- module_memfree(page);
90177+ module_memfree_exec(page);
90178 }
90179
90180 struct kprobe_insn_cache kprobe_insn_slots = {
90181@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90182 kprobe_type = "k";
90183
90184 if (sym)
90185- seq_printf(pi, "%p %s %s+0x%x %s ",
90186+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90187 p->addr, kprobe_type, sym, offset,
90188 (modname ? modname : " "));
90189 else
90190- seq_printf(pi, "%p %s %p ",
90191+ seq_printf(pi, "%pK %s %pK ",
90192 p->addr, kprobe_type, p->addr);
90193
90194 if (!pp)
90195diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90196index 6683cce..daf8999 100644
90197--- a/kernel/ksysfs.c
90198+++ b/kernel/ksysfs.c
90199@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90200 {
90201 if (count+1 > UEVENT_HELPER_PATH_LEN)
90202 return -ENOENT;
90203+ if (!capable(CAP_SYS_ADMIN))
90204+ return -EPERM;
90205 memcpy(uevent_helper, buf, count);
90206 uevent_helper[count] = '\0';
90207 if (count && uevent_helper[count-1] == '\n')
90208@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90209 return count;
90210 }
90211
90212-static struct bin_attribute notes_attr = {
90213+static bin_attribute_no_const notes_attr __read_only = {
90214 .attr = {
90215 .name = "notes",
90216 .mode = S_IRUGO,
90217diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90218index 88d0d44..e9ce0ee 100644
90219--- a/kernel/locking/lockdep.c
90220+++ b/kernel/locking/lockdep.c
90221@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90222 end = (unsigned long) &_end,
90223 addr = (unsigned long) obj;
90224
90225+#ifdef CONFIG_PAX_KERNEXEC
90226+ start = ktla_ktva(start);
90227+#endif
90228+
90229 /*
90230 * static variable?
90231 */
90232@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90233 if (!static_obj(lock->key)) {
90234 debug_locks_off();
90235 printk("INFO: trying to register non-static key.\n");
90236+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90237 printk("the code is fine but needs lockdep annotation.\n");
90238 printk("turning off the locking correctness validator.\n");
90239 dump_stack();
90240@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90241 if (!class)
90242 return 0;
90243 }
90244- atomic_inc((atomic_t *)&class->ops);
90245+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90246 if (very_verbose(class)) {
90247 printk("\nacquire class [%p] %s", class->key, class->name);
90248 if (class->name_version > 1)
90249diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90250index ef43ac4..2720dfa 100644
90251--- a/kernel/locking/lockdep_proc.c
90252+++ b/kernel/locking/lockdep_proc.c
90253@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90254 return 0;
90255 }
90256
90257- seq_printf(m, "%p", class->key);
90258+ seq_printf(m, "%pK", class->key);
90259 #ifdef CONFIG_DEBUG_LOCKDEP
90260 seq_printf(m, " OPS:%8ld", class->ops);
90261 #endif
90262@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90263
90264 list_for_each_entry(entry, &class->locks_after, entry) {
90265 if (entry->distance == 1) {
90266- seq_printf(m, " -> [%p] ", entry->class->key);
90267+ seq_printf(m, " -> [%pK] ", entry->class->key);
90268 print_name(m, entry->class);
90269 seq_puts(m, "\n");
90270 }
90271@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90272 if (!class->key)
90273 continue;
90274
90275- seq_printf(m, "[%p] ", class->key);
90276+ seq_printf(m, "[%pK] ", class->key);
90277 print_name(m, class);
90278 seq_puts(m, "\n");
90279 }
90280@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90281 if (!i)
90282 seq_line(m, '-', 40-namelen, namelen);
90283
90284- snprintf(ip, sizeof(ip), "[<%p>]",
90285+ snprintf(ip, sizeof(ip), "[<%pK>]",
90286 (void *)class->contention_point[i]);
90287 seq_printf(m, "%40s %14lu %29s %pS\n",
90288 name, stats->contention_point[i],
90289@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90290 if (!i)
90291 seq_line(m, '-', 40-namelen, namelen);
90292
90293- snprintf(ip, sizeof(ip), "[<%p>]",
90294+ snprintf(ip, sizeof(ip), "[<%pK>]",
90295 (void *)class->contending_point[i]);
90296 seq_printf(m, "%40s %14lu %29s %pS\n",
90297 name, stats->contending_point[i],
90298diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90299index 9887a90..0cd2b1d 100644
90300--- a/kernel/locking/mcs_spinlock.c
90301+++ b/kernel/locking/mcs_spinlock.c
90302@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90303
90304 prev = decode_cpu(old);
90305 node->prev = prev;
90306- ACCESS_ONCE(prev->next) = node;
90307+ ACCESS_ONCE_RW(prev->next) = node;
90308
90309 /*
90310 * Normally @prev is untouchable after the above store; because at that
90311@@ -172,8 +172,8 @@ unqueue:
90312 * it will wait in Step-A.
90313 */
90314
90315- ACCESS_ONCE(next->prev) = prev;
90316- ACCESS_ONCE(prev->next) = next;
90317+ ACCESS_ONCE_RW(next->prev) = prev;
90318+ ACCESS_ONCE_RW(prev->next) = next;
90319
90320 return false;
90321 }
90322@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90323 node = this_cpu_ptr(&osq_node);
90324 next = xchg(&node->next, NULL);
90325 if (next) {
90326- ACCESS_ONCE(next->locked) = 1;
90327+ ACCESS_ONCE_RW(next->locked) = 1;
90328 return;
90329 }
90330
90331 next = osq_wait_next(lock, node, NULL);
90332 if (next)
90333- ACCESS_ONCE(next->locked) = 1;
90334+ ACCESS_ONCE_RW(next->locked) = 1;
90335 }
90336
90337 #endif
90338diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90339index 4d60986..5d351c1 100644
90340--- a/kernel/locking/mcs_spinlock.h
90341+++ b/kernel/locking/mcs_spinlock.h
90342@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90343 */
90344 return;
90345 }
90346- ACCESS_ONCE(prev->next) = node;
90347+ ACCESS_ONCE_RW(prev->next) = node;
90348
90349 /* Wait until the lock holder passes the lock down. */
90350 arch_mcs_spin_lock_contended(&node->locked);
90351diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90352index 3ef3736..9c951fa 100644
90353--- a/kernel/locking/mutex-debug.c
90354+++ b/kernel/locking/mutex-debug.c
90355@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90356 }
90357
90358 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90359- struct thread_info *ti)
90360+ struct task_struct *task)
90361 {
90362 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90363
90364 /* Mark the current thread as blocked on the lock: */
90365- ti->task->blocked_on = waiter;
90366+ task->blocked_on = waiter;
90367 }
90368
90369 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90370- struct thread_info *ti)
90371+ struct task_struct *task)
90372 {
90373 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90374- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90375- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90376- ti->task->blocked_on = NULL;
90377+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90378+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90379+ task->blocked_on = NULL;
90380
90381 list_del_init(&waiter->list);
90382 waiter->task = NULL;
90383diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90384index 0799fd3..d06ae3b 100644
90385--- a/kernel/locking/mutex-debug.h
90386+++ b/kernel/locking/mutex-debug.h
90387@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90388 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90389 extern void debug_mutex_add_waiter(struct mutex *lock,
90390 struct mutex_waiter *waiter,
90391- struct thread_info *ti);
90392+ struct task_struct *task);
90393 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90394- struct thread_info *ti);
90395+ struct task_struct *task);
90396 extern void debug_mutex_unlock(struct mutex *lock);
90397 extern void debug_mutex_init(struct mutex *lock, const char *name,
90398 struct lock_class_key *key);
90399diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90400index 4541951..39fe90a 100644
90401--- a/kernel/locking/mutex.c
90402+++ b/kernel/locking/mutex.c
90403@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90404 goto skip_wait;
90405
90406 debug_mutex_lock_common(lock, &waiter);
90407- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90408+ debug_mutex_add_waiter(lock, &waiter, task);
90409
90410 /* add waiting tasks to the end of the waitqueue (FIFO): */
90411 list_add_tail(&waiter.list, &lock->wait_list);
90412@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90413 schedule_preempt_disabled();
90414 spin_lock_mutex(&lock->wait_lock, flags);
90415 }
90416- mutex_remove_waiter(lock, &waiter, current_thread_info());
90417+ mutex_remove_waiter(lock, &waiter, task);
90418 /* set it to 0 if there are no waiters left: */
90419 if (likely(list_empty(&lock->wait_list)))
90420 atomic_set(&lock->count, 0);
90421@@ -606,7 +606,7 @@ skip_wait:
90422 return 0;
90423
90424 err:
90425- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90426+ mutex_remove_waiter(lock, &waiter, task);
90427 spin_unlock_mutex(&lock->wait_lock, flags);
90428 debug_mutex_free_waiter(&waiter);
90429 mutex_release(&lock->dep_map, 1, ip);
90430diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90431index 1d96dd0..994ff19 100644
90432--- a/kernel/locking/rtmutex-tester.c
90433+++ b/kernel/locking/rtmutex-tester.c
90434@@ -22,7 +22,7 @@
90435 #define MAX_RT_TEST_MUTEXES 8
90436
90437 static spinlock_t rttest_lock;
90438-static atomic_t rttest_event;
90439+static atomic_unchecked_t rttest_event;
90440
90441 struct test_thread_data {
90442 int opcode;
90443@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90444
90445 case RTTEST_LOCKCONT:
90446 td->mutexes[td->opdata] = 1;
90447- td->event = atomic_add_return(1, &rttest_event);
90448+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90449 return 0;
90450
90451 case RTTEST_RESET:
90452@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90453 return 0;
90454
90455 case RTTEST_RESETEVENT:
90456- atomic_set(&rttest_event, 0);
90457+ atomic_set_unchecked(&rttest_event, 0);
90458 return 0;
90459
90460 default:
90461@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90462 return ret;
90463
90464 td->mutexes[id] = 1;
90465- td->event = atomic_add_return(1, &rttest_event);
90466+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90467 rt_mutex_lock(&mutexes[id]);
90468- td->event = atomic_add_return(1, &rttest_event);
90469+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90470 td->mutexes[id] = 4;
90471 return 0;
90472
90473@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90474 return ret;
90475
90476 td->mutexes[id] = 1;
90477- td->event = atomic_add_return(1, &rttest_event);
90478+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90479 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90480- td->event = atomic_add_return(1, &rttest_event);
90481+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90482 td->mutexes[id] = ret ? 0 : 4;
90483 return ret ? -EINTR : 0;
90484
90485@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90486 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90487 return ret;
90488
90489- td->event = atomic_add_return(1, &rttest_event);
90490+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90491 rt_mutex_unlock(&mutexes[id]);
90492- td->event = atomic_add_return(1, &rttest_event);
90493+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90494 td->mutexes[id] = 0;
90495 return 0;
90496
90497@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90498 break;
90499
90500 td->mutexes[dat] = 2;
90501- td->event = atomic_add_return(1, &rttest_event);
90502+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90503 break;
90504
90505 default:
90506@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90507 return;
90508
90509 td->mutexes[dat] = 3;
90510- td->event = atomic_add_return(1, &rttest_event);
90511+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90512 break;
90513
90514 case RTTEST_LOCKNOWAIT:
90515@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90516 return;
90517
90518 td->mutexes[dat] = 1;
90519- td->event = atomic_add_return(1, &rttest_event);
90520+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90521 return;
90522
90523 default:
90524diff --git a/kernel/module.c b/kernel/module.c
90525index d856e96..b82225c 100644
90526--- a/kernel/module.c
90527+++ b/kernel/module.c
90528@@ -59,6 +59,7 @@
90529 #include <linux/jump_label.h>
90530 #include <linux/pfn.h>
90531 #include <linux/bsearch.h>
90532+#include <linux/grsecurity.h>
90533 #include <uapi/linux/module.h>
90534 #include "module-internal.h"
90535
90536@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90537
90538 /* Bounds of module allocation, for speeding __module_address.
90539 * Protected by module_mutex. */
90540-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90541+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90542+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90543
90544 int register_module_notifier(struct notifier_block *nb)
90545 {
90546@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90547 return true;
90548
90549 list_for_each_entry_rcu(mod, &modules, list) {
90550- struct symsearch arr[] = {
90551+ struct symsearch modarr[] = {
90552 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90553 NOT_GPL_ONLY, false },
90554 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90555@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90556 if (mod->state == MODULE_STATE_UNFORMED)
90557 continue;
90558
90559- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90560+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90561 return true;
90562 }
90563 return false;
90564@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90565 if (!pcpusec->sh_size)
90566 return 0;
90567
90568- if (align > PAGE_SIZE) {
90569+ if (align-1 >= PAGE_SIZE) {
90570 pr_warn("%s: per-cpu alignment %li > %li\n",
90571 mod->name, align, PAGE_SIZE);
90572 align = PAGE_SIZE;
90573@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90574 static ssize_t show_coresize(struct module_attribute *mattr,
90575 struct module_kobject *mk, char *buffer)
90576 {
90577- return sprintf(buffer, "%u\n", mk->mod->core_size);
90578+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90579 }
90580
90581 static struct module_attribute modinfo_coresize =
90582@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90583 static ssize_t show_initsize(struct module_attribute *mattr,
90584 struct module_kobject *mk, char *buffer)
90585 {
90586- return sprintf(buffer, "%u\n", mk->mod->init_size);
90587+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90588 }
90589
90590 static struct module_attribute modinfo_initsize =
90591@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90592 goto bad_version;
90593 }
90594
90595+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90596+ /*
90597+ * avoid potentially printing jibberish on attempted load
90598+ * of a module randomized with a different seed
90599+ */
90600+ pr_warn("no symbol version for %s\n", symname);
90601+#else
90602 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90603+#endif
90604 return 0;
90605
90606 bad_version:
90607+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90608+ /*
90609+ * avoid potentially printing jibberish on attempted load
90610+ * of a module randomized with a different seed
90611+ */
90612+ pr_warn("attempted module disagrees about version of symbol %s\n",
90613+ symname);
90614+#else
90615 pr_warn("%s: disagrees about version of symbol %s\n",
90616 mod->name, symname);
90617+#endif
90618 return 0;
90619 }
90620
90621@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90622 */
90623 #ifdef CONFIG_SYSFS
90624
90625-#ifdef CONFIG_KALLSYMS
90626+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90627 static inline bool sect_empty(const Elf_Shdr *sect)
90628 {
90629 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90630@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90631 {
90632 unsigned int notes, loaded, i;
90633 struct module_notes_attrs *notes_attrs;
90634- struct bin_attribute *nattr;
90635+ bin_attribute_no_const *nattr;
90636
90637 /* failed to create section attributes, so can't create notes */
90638 if (!mod->sect_attrs)
90639@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90640 static int module_add_modinfo_attrs(struct module *mod)
90641 {
90642 struct module_attribute *attr;
90643- struct module_attribute *temp_attr;
90644+ module_attribute_no_const *temp_attr;
90645 int error = 0;
90646 int i;
90647
90648@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90649
90650 static void unset_module_core_ro_nx(struct module *mod)
90651 {
90652- set_page_attributes(mod->module_core + mod->core_text_size,
90653- mod->module_core + mod->core_size,
90654+ set_page_attributes(mod->module_core_rw,
90655+ mod->module_core_rw + mod->core_size_rw,
90656 set_memory_x);
90657- set_page_attributes(mod->module_core,
90658- mod->module_core + mod->core_ro_size,
90659+ set_page_attributes(mod->module_core_rx,
90660+ mod->module_core_rx + mod->core_size_rx,
90661 set_memory_rw);
90662 }
90663
90664 static void unset_module_init_ro_nx(struct module *mod)
90665 {
90666- set_page_attributes(mod->module_init + mod->init_text_size,
90667- mod->module_init + mod->init_size,
90668+ set_page_attributes(mod->module_init_rw,
90669+ mod->module_init_rw + mod->init_size_rw,
90670 set_memory_x);
90671- set_page_attributes(mod->module_init,
90672- mod->module_init + mod->init_ro_size,
90673+ set_page_attributes(mod->module_init_rx,
90674+ mod->module_init_rx + mod->init_size_rx,
90675 set_memory_rw);
90676 }
90677
90678@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90679 list_for_each_entry_rcu(mod, &modules, list) {
90680 if (mod->state == MODULE_STATE_UNFORMED)
90681 continue;
90682- if ((mod->module_core) && (mod->core_text_size)) {
90683- set_page_attributes(mod->module_core,
90684- mod->module_core + mod->core_text_size,
90685+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90686+ set_page_attributes(mod->module_core_rx,
90687+ mod->module_core_rx + mod->core_size_rx,
90688 set_memory_rw);
90689 }
90690- if ((mod->module_init) && (mod->init_text_size)) {
90691- set_page_attributes(mod->module_init,
90692- mod->module_init + mod->init_text_size,
90693+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90694+ set_page_attributes(mod->module_init_rx,
90695+ mod->module_init_rx + mod->init_size_rx,
90696 set_memory_rw);
90697 }
90698 }
90699@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90700 list_for_each_entry_rcu(mod, &modules, list) {
90701 if (mod->state == MODULE_STATE_UNFORMED)
90702 continue;
90703- if ((mod->module_core) && (mod->core_text_size)) {
90704- set_page_attributes(mod->module_core,
90705- mod->module_core + mod->core_text_size,
90706+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90707+ set_page_attributes(mod->module_core_rx,
90708+ mod->module_core_rx + mod->core_size_rx,
90709 set_memory_ro);
90710 }
90711- if ((mod->module_init) && (mod->init_text_size)) {
90712- set_page_attributes(mod->module_init,
90713- mod->module_init + mod->init_text_size,
90714+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90715+ set_page_attributes(mod->module_init_rx,
90716+ mod->module_init_rx + mod->init_size_rx,
90717 set_memory_ro);
90718 }
90719 }
90720@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90721 #else
90722 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90723 static void unset_module_core_ro_nx(struct module *mod) { }
90724-static void unset_module_init_ro_nx(struct module *mod) { }
90725+static void unset_module_init_ro_nx(struct module *mod)
90726+{
90727+
90728+#ifdef CONFIG_PAX_KERNEXEC
90729+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90730+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90731+#endif
90732+
90733+}
90734 #endif
90735
90736 void __weak module_memfree(void *module_region)
90737@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90738 /* This may be NULL, but that's OK */
90739 unset_module_init_ro_nx(mod);
90740 module_arch_freeing_init(mod);
90741- module_memfree(mod->module_init);
90742+ module_memfree(mod->module_init_rw);
90743+ module_memfree_exec(mod->module_init_rx);
90744 kfree(mod->args);
90745 percpu_modfree(mod);
90746
90747 /* Free lock-classes: */
90748- lockdep_free_key_range(mod->module_core, mod->core_size);
90749+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90750+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90751
90752 /* Finally, free the core (containing the module structure) */
90753 unset_module_core_ro_nx(mod);
90754- module_memfree(mod->module_core);
90755+ module_memfree_exec(mod->module_core_rx);
90756+ module_memfree(mod->module_core_rw);
90757
90758 #ifdef CONFIG_MPU
90759 update_protections(current->mm);
90760@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90761 int ret = 0;
90762 const struct kernel_symbol *ksym;
90763
90764+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90765+ int is_fs_load = 0;
90766+ int register_filesystem_found = 0;
90767+ char *p;
90768+
90769+ p = strstr(mod->args, "grsec_modharden_fs");
90770+ if (p) {
90771+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90772+ /* copy \0 as well */
90773+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90774+ is_fs_load = 1;
90775+ }
90776+#endif
90777+
90778 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90779 const char *name = info->strtab + sym[i].st_name;
90780
90781+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90782+ /* it's a real shame this will never get ripped and copied
90783+ upstream! ;(
90784+ */
90785+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90786+ register_filesystem_found = 1;
90787+#endif
90788+
90789 switch (sym[i].st_shndx) {
90790 case SHN_COMMON:
90791 /* Ignore common symbols */
90792@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90793 ksym = resolve_symbol_wait(mod, info, name);
90794 /* Ok if resolved. */
90795 if (ksym && !IS_ERR(ksym)) {
90796+ pax_open_kernel();
90797 sym[i].st_value = ksym->value;
90798+ pax_close_kernel();
90799 break;
90800 }
90801
90802@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90803 secbase = (unsigned long)mod_percpu(mod);
90804 else
90805 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90806+ pax_open_kernel();
90807 sym[i].st_value += secbase;
90808+ pax_close_kernel();
90809 break;
90810 }
90811 }
90812
90813+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90814+ if (is_fs_load && !register_filesystem_found) {
90815+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90816+ ret = -EPERM;
90817+ }
90818+#endif
90819+
90820 return ret;
90821 }
90822
90823@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90824 || s->sh_entsize != ~0UL
90825 || strstarts(sname, ".init"))
90826 continue;
90827- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90828+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90829+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90830+ else
90831+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90832 pr_debug("\t%s\n", sname);
90833 }
90834- switch (m) {
90835- case 0: /* executable */
90836- mod->core_size = debug_align(mod->core_size);
90837- mod->core_text_size = mod->core_size;
90838- break;
90839- case 1: /* RO: text and ro-data */
90840- mod->core_size = debug_align(mod->core_size);
90841- mod->core_ro_size = mod->core_size;
90842- break;
90843- case 3: /* whole core */
90844- mod->core_size = debug_align(mod->core_size);
90845- break;
90846- }
90847 }
90848
90849 pr_debug("Init section allocation order:\n");
90850@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90851 || s->sh_entsize != ~0UL
90852 || !strstarts(sname, ".init"))
90853 continue;
90854- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90855- | INIT_OFFSET_MASK);
90856+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90857+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90858+ else
90859+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90860+ s->sh_entsize |= INIT_OFFSET_MASK;
90861 pr_debug("\t%s\n", sname);
90862 }
90863- switch (m) {
90864- case 0: /* executable */
90865- mod->init_size = debug_align(mod->init_size);
90866- mod->init_text_size = mod->init_size;
90867- break;
90868- case 1: /* RO: text and ro-data */
90869- mod->init_size = debug_align(mod->init_size);
90870- mod->init_ro_size = mod->init_size;
90871- break;
90872- case 3: /* whole init */
90873- mod->init_size = debug_align(mod->init_size);
90874- break;
90875- }
90876 }
90877 }
90878
90879@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90880
90881 /* Put symbol section at end of init part of module. */
90882 symsect->sh_flags |= SHF_ALLOC;
90883- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90884+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90885 info->index.sym) | INIT_OFFSET_MASK;
90886 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90887
90888@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90889 }
90890
90891 /* Append room for core symbols at end of core part. */
90892- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90893- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90894- mod->core_size += strtab_size;
90895+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90896+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90897+ mod->core_size_rx += strtab_size;
90898
90899 /* Put string table section at end of init part of module. */
90900 strsect->sh_flags |= SHF_ALLOC;
90901- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90902+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90903 info->index.str) | INIT_OFFSET_MASK;
90904 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90905 }
90906@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90907 /* Make sure we get permanent strtab: don't use info->strtab. */
90908 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90909
90910+ pax_open_kernel();
90911+
90912 /* Set types up while we still have access to sections. */
90913 for (i = 0; i < mod->num_symtab; i++)
90914 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90915
90916- mod->core_symtab = dst = mod->module_core + info->symoffs;
90917- mod->core_strtab = s = mod->module_core + info->stroffs;
90918+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90919+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90920 src = mod->symtab;
90921 for (ndst = i = 0; i < mod->num_symtab; i++) {
90922 if (i == 0 ||
90923@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90924 }
90925 }
90926 mod->core_num_syms = ndst;
90927+
90928+ pax_close_kernel();
90929 }
90930 #else
90931 static inline void layout_symtab(struct module *mod, struct load_info *info)
90932@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90933 return vmalloc_exec(size);
90934 }
90935
90936-static void *module_alloc_update_bounds(unsigned long size)
90937+static void *module_alloc_update_bounds_rw(unsigned long size)
90938 {
90939 void *ret = module_alloc(size);
90940
90941 if (ret) {
90942 mutex_lock(&module_mutex);
90943 /* Update module bounds. */
90944- if ((unsigned long)ret < module_addr_min)
90945- module_addr_min = (unsigned long)ret;
90946- if ((unsigned long)ret + size > module_addr_max)
90947- module_addr_max = (unsigned long)ret + size;
90948+ if ((unsigned long)ret < module_addr_min_rw)
90949+ module_addr_min_rw = (unsigned long)ret;
90950+ if ((unsigned long)ret + size > module_addr_max_rw)
90951+ module_addr_max_rw = (unsigned long)ret + size;
90952+ mutex_unlock(&module_mutex);
90953+ }
90954+ return ret;
90955+}
90956+
90957+static void *module_alloc_update_bounds_rx(unsigned long size)
90958+{
90959+ void *ret = module_alloc_exec(size);
90960+
90961+ if (ret) {
90962+ mutex_lock(&module_mutex);
90963+ /* Update module bounds. */
90964+ if ((unsigned long)ret < module_addr_min_rx)
90965+ module_addr_min_rx = (unsigned long)ret;
90966+ if ((unsigned long)ret + size > module_addr_max_rx)
90967+ module_addr_max_rx = (unsigned long)ret + size;
90968 mutex_unlock(&module_mutex);
90969 }
90970 return ret;
90971@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90972 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90973
90974 if (info->index.sym == 0) {
90975+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90976+ /*
90977+ * avoid potentially printing jibberish on attempted load
90978+ * of a module randomized with a different seed
90979+ */
90980+ pr_warn("module has no symbols (stripped?)\n");
90981+#else
90982 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90983+#endif
90984 return ERR_PTR(-ENOEXEC);
90985 }
90986
90987@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90988 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90989 {
90990 const char *modmagic = get_modinfo(info, "vermagic");
90991+ const char *license = get_modinfo(info, "license");
90992 int err;
90993
90994+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90995+ if (!license || !license_is_gpl_compatible(license))
90996+ return -ENOEXEC;
90997+#endif
90998+
90999 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
91000 modmagic = NULL;
91001
91002@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
91003 }
91004
91005 /* Set up license info based on the info section */
91006- set_license(mod, get_modinfo(info, "license"));
91007+ set_license(mod, license);
91008
91009 return 0;
91010 }
91011@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
91012 void *ptr;
91013
91014 /* Do the allocs. */
91015- ptr = module_alloc_update_bounds(mod->core_size);
91016+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91017 /*
91018 * The pointer to this block is stored in the module structure
91019 * which is inside the block. Just mark it as not being a
91020@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
91021 if (!ptr)
91022 return -ENOMEM;
91023
91024- memset(ptr, 0, mod->core_size);
91025- mod->module_core = ptr;
91026+ memset(ptr, 0, mod->core_size_rw);
91027+ mod->module_core_rw = ptr;
91028
91029- if (mod->init_size) {
91030- ptr = module_alloc_update_bounds(mod->init_size);
91031+ if (mod->init_size_rw) {
91032+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91033 /*
91034 * The pointer to this block is stored in the module structure
91035 * which is inside the block. This block doesn't need to be
91036@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
91037 */
91038 kmemleak_ignore(ptr);
91039 if (!ptr) {
91040- module_memfree(mod->module_core);
91041+ module_memfree(mod->module_core_rw);
91042 return -ENOMEM;
91043 }
91044- memset(ptr, 0, mod->init_size);
91045- mod->module_init = ptr;
91046+ memset(ptr, 0, mod->init_size_rw);
91047+ mod->module_init_rw = ptr;
91048 } else
91049- mod->module_init = NULL;
91050+ mod->module_init_rw = NULL;
91051+
91052+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91053+ kmemleak_not_leak(ptr);
91054+ if (!ptr) {
91055+ if (mod->module_init_rw)
91056+ module_memfree(mod->module_init_rw);
91057+ module_memfree(mod->module_core_rw);
91058+ return -ENOMEM;
91059+ }
91060+
91061+ pax_open_kernel();
91062+ memset(ptr, 0, mod->core_size_rx);
91063+ pax_close_kernel();
91064+ mod->module_core_rx = ptr;
91065+
91066+ if (mod->init_size_rx) {
91067+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91068+ kmemleak_ignore(ptr);
91069+ if (!ptr && mod->init_size_rx) {
91070+ module_memfree_exec(mod->module_core_rx);
91071+ if (mod->module_init_rw)
91072+ module_memfree(mod->module_init_rw);
91073+ module_memfree(mod->module_core_rw);
91074+ return -ENOMEM;
91075+ }
91076+
91077+ pax_open_kernel();
91078+ memset(ptr, 0, mod->init_size_rx);
91079+ pax_close_kernel();
91080+ mod->module_init_rx = ptr;
91081+ } else
91082+ mod->module_init_rx = NULL;
91083
91084 /* Transfer each section which specifies SHF_ALLOC */
91085 pr_debug("final section addresses:\n");
91086@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
91087 if (!(shdr->sh_flags & SHF_ALLOC))
91088 continue;
91089
91090- if (shdr->sh_entsize & INIT_OFFSET_MASK)
91091- dest = mod->module_init
91092- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91093- else
91094- dest = mod->module_core + shdr->sh_entsize;
91095+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
91096+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91097+ dest = mod->module_init_rw
91098+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91099+ else
91100+ dest = mod->module_init_rx
91101+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91102+ } else {
91103+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91104+ dest = mod->module_core_rw + shdr->sh_entsize;
91105+ else
91106+ dest = mod->module_core_rx + shdr->sh_entsize;
91107+ }
91108+
91109+ if (shdr->sh_type != SHT_NOBITS) {
91110+
91111+#ifdef CONFIG_PAX_KERNEXEC
91112+#ifdef CONFIG_X86_64
91113+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91114+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91115+#endif
91116+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91117+ pax_open_kernel();
91118+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91119+ pax_close_kernel();
91120+ } else
91121+#endif
91122
91123- if (shdr->sh_type != SHT_NOBITS)
91124 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91125+ }
91126 /* Update sh_addr to point to copy in image. */
91127- shdr->sh_addr = (unsigned long)dest;
91128+
91129+#ifdef CONFIG_PAX_KERNEXEC
91130+ if (shdr->sh_flags & SHF_EXECINSTR)
91131+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91132+ else
91133+#endif
91134+
91135+ shdr->sh_addr = (unsigned long)dest;
91136 pr_debug("\t0x%lx %s\n",
91137 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91138 }
91139@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
91140 * Do it before processing of module parameters, so the module
91141 * can provide parameter accessor functions of its own.
91142 */
91143- if (mod->module_init)
91144- flush_icache_range((unsigned long)mod->module_init,
91145- (unsigned long)mod->module_init
91146- + mod->init_size);
91147- flush_icache_range((unsigned long)mod->module_core,
91148- (unsigned long)mod->module_core + mod->core_size);
91149+ if (mod->module_init_rx)
91150+ flush_icache_range((unsigned long)mod->module_init_rx,
91151+ (unsigned long)mod->module_init_rx
91152+ + mod->init_size_rx);
91153+ flush_icache_range((unsigned long)mod->module_core_rx,
91154+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91155
91156 set_fs(old_fs);
91157 }
91158@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
91159 {
91160 percpu_modfree(mod);
91161 module_arch_freeing_init(mod);
91162- module_memfree(mod->module_init);
91163- module_memfree(mod->module_core);
91164+ module_memfree_exec(mod->module_init_rx);
91165+ module_memfree_exec(mod->module_core_rx);
91166+ module_memfree(mod->module_init_rw);
91167+ module_memfree(mod->module_core_rw);
91168 }
91169
91170 int __weak module_finalize(const Elf_Ehdr *hdr,
91171@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91172 static int post_relocation(struct module *mod, const struct load_info *info)
91173 {
91174 /* Sort exception table now relocations are done. */
91175+ pax_open_kernel();
91176 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91177+ pax_close_kernel();
91178
91179 /* Copy relocated percpu area over. */
91180 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91181@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
91182 /* For freeing module_init on success, in case kallsyms traversing */
91183 struct mod_initfree {
91184 struct rcu_head rcu;
91185- void *module_init;
91186+ void *module_init_rw;
91187+ void *module_init_rx;
91188 };
91189
91190 static void do_free_init(struct rcu_head *head)
91191 {
91192 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
91193- module_memfree(m->module_init);
91194+ module_memfree(m->module_init_rw);
91195+ module_memfree_exec(m->module_init_rx);
91196 kfree(m);
91197 }
91198
91199@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
91200 ret = -ENOMEM;
91201 goto fail;
91202 }
91203- freeinit->module_init = mod->module_init;
91204+ freeinit->module_init_rw = mod->module_init_rw;
91205+ freeinit->module_init_rx = mod->module_init_rx;
91206
91207 /*
91208 * We want to find out whether @mod uses async during init. Clear
91209@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
91210 #endif
91211 unset_module_init_ro_nx(mod);
91212 module_arch_freeing_init(mod);
91213- mod->module_init = NULL;
91214- mod->init_size = 0;
91215- mod->init_ro_size = 0;
91216- mod->init_text_size = 0;
91217+ mod->module_init_rw = NULL;
91218+ mod->module_init_rx = NULL;
91219+ mod->init_size_rw = 0;
91220+ mod->init_size_rx = 0;
91221 /*
91222 * We want to free module_init, but be aware that kallsyms may be
91223 * walking this with preempt disabled. In all the failure paths,
91224@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91225 module_bug_finalize(info->hdr, info->sechdrs, mod);
91226
91227 /* Set RO and NX regions for core */
91228- set_section_ro_nx(mod->module_core,
91229- mod->core_text_size,
91230- mod->core_ro_size,
91231- mod->core_size);
91232+ set_section_ro_nx(mod->module_core_rx,
91233+ mod->core_size_rx,
91234+ mod->core_size_rx,
91235+ mod->core_size_rx);
91236
91237 /* Set RO and NX regions for init */
91238- set_section_ro_nx(mod->module_init,
91239- mod->init_text_size,
91240- mod->init_ro_size,
91241- mod->init_size);
91242+ set_section_ro_nx(mod->module_init_rx,
91243+ mod->init_size_rx,
91244+ mod->init_size_rx,
91245+ mod->init_size_rx);
91246
91247 /* Mark state as coming so strong_try_module_get() ignores us,
91248 * but kallsyms etc. can see us. */
91249@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91250 if (err)
91251 goto free_unload;
91252
91253+ /* Now copy in args */
91254+ mod->args = strndup_user(uargs, ~0UL >> 1);
91255+ if (IS_ERR(mod->args)) {
91256+ err = PTR_ERR(mod->args);
91257+ goto free_unload;
91258+ }
91259+
91260 /* Set up MODINFO_ATTR fields */
91261 setup_modinfo(mod, info);
91262
91263+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91264+ {
91265+ char *p, *p2;
91266+
91267+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91268+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91269+ err = -EPERM;
91270+ goto free_modinfo;
91271+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91272+ p += sizeof("grsec_modharden_normal") - 1;
91273+ p2 = strstr(p, "_");
91274+ if (p2) {
91275+ *p2 = '\0';
91276+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91277+ *p2 = '_';
91278+ }
91279+ err = -EPERM;
91280+ goto free_modinfo;
91281+ }
91282+ }
91283+#endif
91284+
91285 /* Fix up syms, so that st_value is a pointer to location. */
91286 err = simplify_symbols(mod, info);
91287 if (err < 0)
91288@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91289
91290 flush_module_icache(mod);
91291
91292- /* Now copy in args */
91293- mod->args = strndup_user(uargs, ~0UL >> 1);
91294- if (IS_ERR(mod->args)) {
91295- err = PTR_ERR(mod->args);
91296- goto free_arch_cleanup;
91297- }
91298-
91299 dynamic_debug_setup(info->debug, info->num_debug);
91300
91301 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91302@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91303 ddebug_cleanup:
91304 dynamic_debug_remove(info->debug);
91305 synchronize_sched();
91306- kfree(mod->args);
91307- free_arch_cleanup:
91308 module_arch_cleanup(mod);
91309 free_modinfo:
91310 free_modinfo(mod);
91311+ kfree(mod->args);
91312 free_unload:
91313 module_unload_free(mod);
91314 unlink_mod:
91315@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
91316 unsigned long nextval;
91317
91318 /* At worse, next value is at end of module */
91319- if (within_module_init(addr, mod))
91320- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91321+ if (within_module_init_rx(addr, mod))
91322+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91323+ else if (within_module_init_rw(addr, mod))
91324+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91325+ else if (within_module_core_rx(addr, mod))
91326+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91327+ else if (within_module_core_rw(addr, mod))
91328+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91329 else
91330- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91331+ return NULL;
91332
91333 /* Scan for closest preceding symbol, and next symbol. (ELF
91334 starts real symbols at 1). */
91335@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91336 return 0;
91337
91338 seq_printf(m, "%s %u",
91339- mod->name, mod->init_size + mod->core_size);
91340+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91341 print_unload_info(m, mod);
91342
91343 /* Informative for users. */
91344@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91345 mod->state == MODULE_STATE_COMING ? "Loading" :
91346 "Live");
91347 /* Used by oprofile and other similar tools. */
91348- seq_printf(m, " 0x%pK", mod->module_core);
91349+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91350
91351 /* Taints info */
91352 if (mod->taints)
91353@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91354
91355 static int __init proc_modules_init(void)
91356 {
91357+#ifndef CONFIG_GRKERNSEC_HIDESYM
91358+#ifdef CONFIG_GRKERNSEC_PROC_USER
91359+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91360+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91361+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91362+#else
91363 proc_create("modules", 0, NULL, &proc_modules_operations);
91364+#endif
91365+#else
91366+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91367+#endif
91368 return 0;
91369 }
91370 module_init(proc_modules_init);
91371@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91372 {
91373 struct module *mod;
91374
91375- if (addr < module_addr_min || addr > module_addr_max)
91376+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91377+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91378 return NULL;
91379
91380 list_for_each_entry_rcu(mod, &modules, list) {
91381@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91382 */
91383 struct module *__module_text_address(unsigned long addr)
91384 {
91385- struct module *mod = __module_address(addr);
91386+ struct module *mod;
91387+
91388+#ifdef CONFIG_X86_32
91389+ addr = ktla_ktva(addr);
91390+#endif
91391+
91392+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91393+ return NULL;
91394+
91395+ mod = __module_address(addr);
91396+
91397 if (mod) {
91398 /* Make sure it's within the text section. */
91399- if (!within(addr, mod->module_init, mod->init_text_size)
91400- && !within(addr, mod->module_core, mod->core_text_size))
91401+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91402 mod = NULL;
91403 }
91404 return mod;
91405diff --git a/kernel/notifier.c b/kernel/notifier.c
91406index 4803da6..1c5eea6 100644
91407--- a/kernel/notifier.c
91408+++ b/kernel/notifier.c
91409@@ -5,6 +5,7 @@
91410 #include <linux/rcupdate.h>
91411 #include <linux/vmalloc.h>
91412 #include <linux/reboot.h>
91413+#include <linux/mm.h>
91414
91415 /*
91416 * Notifier list for kernel code which wants to be called
91417@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91418 while ((*nl) != NULL) {
91419 if (n->priority > (*nl)->priority)
91420 break;
91421- nl = &((*nl)->next);
91422+ nl = (struct notifier_block **)&((*nl)->next);
91423 }
91424- n->next = *nl;
91425+ pax_open_kernel();
91426+ *(const void **)&n->next = *nl;
91427 rcu_assign_pointer(*nl, n);
91428+ pax_close_kernel();
91429 return 0;
91430 }
91431
91432@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91433 return 0;
91434 if (n->priority > (*nl)->priority)
91435 break;
91436- nl = &((*nl)->next);
91437+ nl = (struct notifier_block **)&((*nl)->next);
91438 }
91439- n->next = *nl;
91440+ pax_open_kernel();
91441+ *(const void **)&n->next = *nl;
91442 rcu_assign_pointer(*nl, n);
91443+ pax_close_kernel();
91444 return 0;
91445 }
91446
91447@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91448 {
91449 while ((*nl) != NULL) {
91450 if ((*nl) == n) {
91451+ pax_open_kernel();
91452 rcu_assign_pointer(*nl, n->next);
91453+ pax_close_kernel();
91454 return 0;
91455 }
91456- nl = &((*nl)->next);
91457+ nl = (struct notifier_block **)&((*nl)->next);
91458 }
91459 return -ENOENT;
91460 }
91461diff --git a/kernel/padata.c b/kernel/padata.c
91462index 161402f..598814c 100644
91463--- a/kernel/padata.c
91464+++ b/kernel/padata.c
91465@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91466 * seq_nr mod. number of cpus in use.
91467 */
91468
91469- seq_nr = atomic_inc_return(&pd->seq_nr);
91470+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91471 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91472
91473 return padata_index_to_cpu(pd, cpu_index);
91474@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91475 padata_init_pqueues(pd);
91476 padata_init_squeues(pd);
91477 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91478- atomic_set(&pd->seq_nr, -1);
91479+ atomic_set_unchecked(&pd->seq_nr, -1);
91480 atomic_set(&pd->reorder_objects, 0);
91481 atomic_set(&pd->refcnt, 0);
91482 pd->pinst = pinst;
91483diff --git a/kernel/panic.c b/kernel/panic.c
91484index 4d8d6f9..97b9b9c 100644
91485--- a/kernel/panic.c
91486+++ b/kernel/panic.c
91487@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91488 /*
91489 * Stop ourself in panic -- architecture code may override this
91490 */
91491-void __weak panic_smp_self_stop(void)
91492+void __weak __noreturn panic_smp_self_stop(void)
91493 {
91494 while (1)
91495 cpu_relax();
91496@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91497 disable_trace_on_warning();
91498
91499 pr_warn("------------[ cut here ]------------\n");
91500- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91501+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91502 raw_smp_processor_id(), current->pid, file, line, caller);
91503
91504 if (args)
91505@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91506 */
91507 __visible void __stack_chk_fail(void)
91508 {
91509- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91510+ dump_stack();
91511+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91512 __builtin_return_address(0));
91513 }
91514 EXPORT_SYMBOL(__stack_chk_fail);
91515diff --git a/kernel/pid.c b/kernel/pid.c
91516index cd36a5e..11f185d 100644
91517--- a/kernel/pid.c
91518+++ b/kernel/pid.c
91519@@ -33,6 +33,7 @@
91520 #include <linux/rculist.h>
91521 #include <linux/bootmem.h>
91522 #include <linux/hash.h>
91523+#include <linux/security.h>
91524 #include <linux/pid_namespace.h>
91525 #include <linux/init_task.h>
91526 #include <linux/syscalls.h>
91527@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91528
91529 int pid_max = PID_MAX_DEFAULT;
91530
91531-#define RESERVED_PIDS 300
91532+#define RESERVED_PIDS 500
91533
91534 int pid_max_min = RESERVED_PIDS + 1;
91535 int pid_max_max = PID_MAX_LIMIT;
91536@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91537 */
91538 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91539 {
91540+ struct task_struct *task;
91541+
91542 rcu_lockdep_assert(rcu_read_lock_held(),
91543 "find_task_by_pid_ns() needs rcu_read_lock()"
91544 " protection");
91545- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91546+
91547+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91548+
91549+ if (gr_pid_is_chrooted(task))
91550+ return NULL;
91551+
91552+ return task;
91553 }
91554
91555 struct task_struct *find_task_by_vpid(pid_t vnr)
91556@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91557 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91558 }
91559
91560+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91561+{
91562+ rcu_lockdep_assert(rcu_read_lock_held(),
91563+ "find_task_by_pid_ns() needs rcu_read_lock()"
91564+ " protection");
91565+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91566+}
91567+
91568 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91569 {
91570 struct pid *pid;
91571diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91572index a65ba13..f600dbb 100644
91573--- a/kernel/pid_namespace.c
91574+++ b/kernel/pid_namespace.c
91575@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91576 void __user *buffer, size_t *lenp, loff_t *ppos)
91577 {
91578 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91579- struct ctl_table tmp = *table;
91580+ ctl_table_no_const tmp = *table;
91581
91582 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91583 return -EPERM;
91584diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91585index 48b28d3..c63ccaf 100644
91586--- a/kernel/power/Kconfig
91587+++ b/kernel/power/Kconfig
91588@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91589 config HIBERNATION
91590 bool "Hibernation (aka 'suspend to disk')"
91591 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91592+ depends on !GRKERNSEC_KMEM
91593+ depends on !PAX_MEMORY_SANITIZE
91594 select HIBERNATE_CALLBACKS
91595 select LZO_COMPRESS
91596 select LZO_DECOMPRESS
91597diff --git a/kernel/power/process.c b/kernel/power/process.c
91598index 5a6ec86..3a8c884 100644
91599--- a/kernel/power/process.c
91600+++ b/kernel/power/process.c
91601@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91602 unsigned int elapsed_msecs;
91603 bool wakeup = false;
91604 int sleep_usecs = USEC_PER_MSEC;
91605+ bool timedout = false;
91606
91607 do_gettimeofday(&start);
91608
91609@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91610
91611 while (true) {
91612 todo = 0;
91613+ if (time_after(jiffies, end_time))
91614+ timedout = true;
91615 read_lock(&tasklist_lock);
91616 for_each_process_thread(g, p) {
91617 if (p == current || !freeze_task(p))
91618 continue;
91619
91620- if (!freezer_should_skip(p))
91621+ if (!freezer_should_skip(p)) {
91622 todo++;
91623+ if (timedout) {
91624+ printk(KERN_ERR "Task refusing to freeze:\n");
91625+ sched_show_task(p);
91626+ }
91627+ }
91628 }
91629 read_unlock(&tasklist_lock);
91630
91631@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91632 todo += wq_busy;
91633 }
91634
91635- if (!todo || time_after(jiffies, end_time))
91636+ if (!todo || timedout)
91637 break;
91638
91639 if (pm_wakeup_pending()) {
91640diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91641index 2cdd353..7df1786 100644
91642--- a/kernel/printk/printk.c
91643+++ b/kernel/printk/printk.c
91644@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91645 if (from_file && type != SYSLOG_ACTION_OPEN)
91646 return 0;
91647
91648+#ifdef CONFIG_GRKERNSEC_DMESG
91649+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91650+ return -EPERM;
91651+#endif
91652+
91653 if (syslog_action_restricted(type)) {
91654 if (capable(CAP_SYSLOG))
91655 return 0;
91656diff --git a/kernel/profile.c b/kernel/profile.c
91657index 54bf5ba..df6e0a2 100644
91658--- a/kernel/profile.c
91659+++ b/kernel/profile.c
91660@@ -37,7 +37,7 @@ struct profile_hit {
91661 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91662 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91663
91664-static atomic_t *prof_buffer;
91665+static atomic_unchecked_t *prof_buffer;
91666 static unsigned long prof_len, prof_shift;
91667
91668 int prof_on __read_mostly;
91669@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91670 hits[i].pc = 0;
91671 continue;
91672 }
91673- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91674+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91675 hits[i].hits = hits[i].pc = 0;
91676 }
91677 }
91678@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91679 * Add the current hit(s) and flush the write-queue out
91680 * to the global buffer:
91681 */
91682- atomic_add(nr_hits, &prof_buffer[pc]);
91683+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91684 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91685- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91686+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91687 hits[i].pc = hits[i].hits = 0;
91688 }
91689 out:
91690@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91691 {
91692 unsigned long pc;
91693 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91694- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91695+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91696 }
91697 #endif /* !CONFIG_SMP */
91698
91699@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91700 return -EFAULT;
91701 buf++; p++; count--; read++;
91702 }
91703- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91704+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91705 if (copy_to_user(buf, (void *)pnt, count))
91706 return -EFAULT;
91707 read += count;
91708@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91709 }
91710 #endif
91711 profile_discard_flip_buffers();
91712- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91713+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91714 return count;
91715 }
91716
91717diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91718index 1eb9d90..d40d21e 100644
91719--- a/kernel/ptrace.c
91720+++ b/kernel/ptrace.c
91721@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91722 if (seize)
91723 flags |= PT_SEIZED;
91724 rcu_read_lock();
91725- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91726+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91727 flags |= PT_PTRACE_CAP;
91728 rcu_read_unlock();
91729 task->ptrace = flags;
91730@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91731 break;
91732 return -EIO;
91733 }
91734- if (copy_to_user(dst, buf, retval))
91735+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91736 return -EFAULT;
91737 copied += retval;
91738 src += retval;
91739@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91740 bool seized = child->ptrace & PT_SEIZED;
91741 int ret = -EIO;
91742 siginfo_t siginfo, *si;
91743- void __user *datavp = (void __user *) data;
91744+ void __user *datavp = (__force void __user *) data;
91745 unsigned long __user *datalp = datavp;
91746 unsigned long flags;
91747
91748@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91749 goto out;
91750 }
91751
91752+ if (gr_handle_ptrace(child, request)) {
91753+ ret = -EPERM;
91754+ goto out_put_task_struct;
91755+ }
91756+
91757 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91758 ret = ptrace_attach(child, request, addr, data);
91759 /*
91760 * Some architectures need to do book-keeping after
91761 * a ptrace attach.
91762 */
91763- if (!ret)
91764+ if (!ret) {
91765 arch_ptrace_attach(child);
91766+ gr_audit_ptrace(child);
91767+ }
91768 goto out_put_task_struct;
91769 }
91770
91771@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91772 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91773 if (copied != sizeof(tmp))
91774 return -EIO;
91775- return put_user(tmp, (unsigned long __user *)data);
91776+ return put_user(tmp, (__force unsigned long __user *)data);
91777 }
91778
91779 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91780@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91781 }
91782
91783 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91784- compat_long_t, addr, compat_long_t, data)
91785+ compat_ulong_t, addr, compat_ulong_t, data)
91786 {
91787 struct task_struct *child;
91788 long ret;
91789@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91790 goto out;
91791 }
91792
91793+ if (gr_handle_ptrace(child, request)) {
91794+ ret = -EPERM;
91795+ goto out_put_task_struct;
91796+ }
91797+
91798 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91799 ret = ptrace_attach(child, request, addr, data);
91800 /*
91801 * Some architectures need to do book-keeping after
91802 * a ptrace attach.
91803 */
91804- if (!ret)
91805+ if (!ret) {
91806 arch_ptrace_attach(child);
91807+ gr_audit_ptrace(child);
91808+ }
91809 goto out_put_task_struct;
91810 }
91811
91812diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91813index 4d559ba..053da37 100644
91814--- a/kernel/rcu/rcutorture.c
91815+++ b/kernel/rcu/rcutorture.c
91816@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91817 rcu_torture_count) = { 0 };
91818 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91819 rcu_torture_batch) = { 0 };
91820-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91821-static atomic_t n_rcu_torture_alloc;
91822-static atomic_t n_rcu_torture_alloc_fail;
91823-static atomic_t n_rcu_torture_free;
91824-static atomic_t n_rcu_torture_mberror;
91825-static atomic_t n_rcu_torture_error;
91826+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91827+static atomic_unchecked_t n_rcu_torture_alloc;
91828+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91829+static atomic_unchecked_t n_rcu_torture_free;
91830+static atomic_unchecked_t n_rcu_torture_mberror;
91831+static atomic_unchecked_t n_rcu_torture_error;
91832 static long n_rcu_torture_barrier_error;
91833 static long n_rcu_torture_boost_ktrerror;
91834 static long n_rcu_torture_boost_rterror;
91835@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91836 static long n_rcu_torture_timers;
91837 static long n_barrier_attempts;
91838 static long n_barrier_successes;
91839-static atomic_long_t n_cbfloods;
91840+static atomic_long_unchecked_t n_cbfloods;
91841 static struct list_head rcu_torture_removed;
91842
91843 static int rcu_torture_writer_state;
91844@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91845
91846 spin_lock_bh(&rcu_torture_lock);
91847 if (list_empty(&rcu_torture_freelist)) {
91848- atomic_inc(&n_rcu_torture_alloc_fail);
91849+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91850 spin_unlock_bh(&rcu_torture_lock);
91851 return NULL;
91852 }
91853- atomic_inc(&n_rcu_torture_alloc);
91854+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91855 p = rcu_torture_freelist.next;
91856 list_del_init(p);
91857 spin_unlock_bh(&rcu_torture_lock);
91858@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91859 static void
91860 rcu_torture_free(struct rcu_torture *p)
91861 {
91862- atomic_inc(&n_rcu_torture_free);
91863+ atomic_inc_unchecked(&n_rcu_torture_free);
91864 spin_lock_bh(&rcu_torture_lock);
91865 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91866 spin_unlock_bh(&rcu_torture_lock);
91867@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91868 i = rp->rtort_pipe_count;
91869 if (i > RCU_TORTURE_PIPE_LEN)
91870 i = RCU_TORTURE_PIPE_LEN;
91871- atomic_inc(&rcu_torture_wcount[i]);
91872+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91873 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91874 rp->rtort_mbtest = 0;
91875 return true;
91876@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91877 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91878 do {
91879 schedule_timeout_interruptible(cbflood_inter_holdoff);
91880- atomic_long_inc(&n_cbfloods);
91881+ atomic_long_inc_unchecked(&n_cbfloods);
91882 WARN_ON(signal_pending(current));
91883 for (i = 0; i < cbflood_n_burst; i++) {
91884 for (j = 0; j < cbflood_n_per_burst; j++) {
91885@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91886 i = old_rp->rtort_pipe_count;
91887 if (i > RCU_TORTURE_PIPE_LEN)
91888 i = RCU_TORTURE_PIPE_LEN;
91889- atomic_inc(&rcu_torture_wcount[i]);
91890+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91891 old_rp->rtort_pipe_count++;
91892 switch (synctype[torture_random(&rand) % nsynctypes]) {
91893 case RTWS_DEF_FREE:
91894@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91895 return;
91896 }
91897 if (p->rtort_mbtest == 0)
91898- atomic_inc(&n_rcu_torture_mberror);
91899+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91900 spin_lock(&rand_lock);
91901 cur_ops->read_delay(&rand);
91902 n_rcu_torture_timers++;
91903@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91904 continue;
91905 }
91906 if (p->rtort_mbtest == 0)
91907- atomic_inc(&n_rcu_torture_mberror);
91908+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91909 cur_ops->read_delay(&rand);
91910 preempt_disable();
91911 pipe_count = p->rtort_pipe_count;
91912@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91913 rcu_torture_current,
91914 rcu_torture_current_version,
91915 list_empty(&rcu_torture_freelist),
91916- atomic_read(&n_rcu_torture_alloc),
91917- atomic_read(&n_rcu_torture_alloc_fail),
91918- atomic_read(&n_rcu_torture_free));
91919+ atomic_read_unchecked(&n_rcu_torture_alloc),
91920+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91921+ atomic_read_unchecked(&n_rcu_torture_free));
91922 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91923- atomic_read(&n_rcu_torture_mberror),
91924+ atomic_read_unchecked(&n_rcu_torture_mberror),
91925 n_rcu_torture_boost_ktrerror,
91926 n_rcu_torture_boost_rterror);
91927 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91928@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91929 n_barrier_successes,
91930 n_barrier_attempts,
91931 n_rcu_torture_barrier_error);
91932- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91933+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91934
91935 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91936- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91937+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91938 n_rcu_torture_barrier_error != 0 ||
91939 n_rcu_torture_boost_ktrerror != 0 ||
91940 n_rcu_torture_boost_rterror != 0 ||
91941 n_rcu_torture_boost_failure != 0 ||
91942 i > 1) {
91943 pr_cont("%s", "!!! ");
91944- atomic_inc(&n_rcu_torture_error);
91945+ atomic_inc_unchecked(&n_rcu_torture_error);
91946 WARN_ON_ONCE(1);
91947 }
91948 pr_cont("Reader Pipe: ");
91949@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91950 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91951 pr_cont("Free-Block Circulation: ");
91952 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91953- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91954+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91955 }
91956 pr_cont("\n");
91957
91958@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91959
91960 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91961
91962- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91963+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91964 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91965 else if (torture_onoff_failures())
91966 rcu_torture_print_module_parms(cur_ops,
91967@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91968
91969 rcu_torture_current = NULL;
91970 rcu_torture_current_version = 0;
91971- atomic_set(&n_rcu_torture_alloc, 0);
91972- atomic_set(&n_rcu_torture_alloc_fail, 0);
91973- atomic_set(&n_rcu_torture_free, 0);
91974- atomic_set(&n_rcu_torture_mberror, 0);
91975- atomic_set(&n_rcu_torture_error, 0);
91976+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91977+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91978+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91979+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91980+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91981 n_rcu_torture_barrier_error = 0;
91982 n_rcu_torture_boost_ktrerror = 0;
91983 n_rcu_torture_boost_rterror = 0;
91984 n_rcu_torture_boost_failure = 0;
91985 n_rcu_torture_boosts = 0;
91986 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91987- atomic_set(&rcu_torture_wcount[i], 0);
91988+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91989 for_each_possible_cpu(cpu) {
91990 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91991 per_cpu(rcu_torture_count, cpu)[i] = 0;
91992diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91993index 0db5649..e6ec167 100644
91994--- a/kernel/rcu/tiny.c
91995+++ b/kernel/rcu/tiny.c
91996@@ -42,7 +42,7 @@
91997 /* Forward declarations for tiny_plugin.h. */
91998 struct rcu_ctrlblk;
91999 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
92000-static void rcu_process_callbacks(struct softirq_action *unused);
92001+static void rcu_process_callbacks(void);
92002 static void __call_rcu(struct rcu_head *head,
92003 void (*func)(struct rcu_head *rcu),
92004 struct rcu_ctrlblk *rcp);
92005@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
92006 false));
92007 }
92008
92009-static void rcu_process_callbacks(struct softirq_action *unused)
92010+static __latent_entropy void rcu_process_callbacks(void)
92011 {
92012 __rcu_process_callbacks(&rcu_sched_ctrlblk);
92013 __rcu_process_callbacks(&rcu_bh_ctrlblk);
92014diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
92015index 858c565..7efd915 100644
92016--- a/kernel/rcu/tiny_plugin.h
92017+++ b/kernel/rcu/tiny_plugin.h
92018@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
92019 dump_stack();
92020 }
92021 if (*rcp->curtail && ULONG_CMP_GE(j, js))
92022- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
92023+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
92024 3 * rcu_jiffies_till_stall_check() + 3;
92025 else if (ULONG_CMP_GE(j, js))
92026- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92027+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92028 }
92029
92030 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
92031 {
92032 rcp->ticks_this_gp = 0;
92033 rcp->gp_start = jiffies;
92034- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92035+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
92036 }
92037
92038 static void check_cpu_stalls(void)
92039diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
92040index 7680fc2..b8e9161 100644
92041--- a/kernel/rcu/tree.c
92042+++ b/kernel/rcu/tree.c
92043@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
92044 */
92045 rdtp = this_cpu_ptr(&rcu_dynticks);
92046 smp_mb__before_atomic(); /* Earlier stuff before QS. */
92047- atomic_add(2, &rdtp->dynticks); /* QS. */
92048+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
92049 smp_mb__after_atomic(); /* Later stuff after QS. */
92050 break;
92051 }
92052@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
92053 rcu_prepare_for_idle();
92054 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92055 smp_mb__before_atomic(); /* See above. */
92056- atomic_inc(&rdtp->dynticks);
92057+ atomic_inc_unchecked(&rdtp->dynticks);
92058 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
92059- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92060+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92061 rcu_dynticks_task_enter();
92062
92063 /*
92064@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
92065
92066 rcu_dynticks_task_exit();
92067 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
92068- atomic_inc(&rdtp->dynticks);
92069+ atomic_inc_unchecked(&rdtp->dynticks);
92070 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92071 smp_mb__after_atomic(); /* See above. */
92072- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92073+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92074 rcu_cleanup_after_idle();
92075 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
92076 if (!user && !is_idle_task(current)) {
92077@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
92078 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
92079
92080 if (rdtp->dynticks_nmi_nesting == 0 &&
92081- (atomic_read(&rdtp->dynticks) & 0x1))
92082+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
92083 return;
92084 rdtp->dynticks_nmi_nesting++;
92085 smp_mb__before_atomic(); /* Force delay from prior write. */
92086- atomic_inc(&rdtp->dynticks);
92087+ atomic_inc_unchecked(&rdtp->dynticks);
92088 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
92089 smp_mb__after_atomic(); /* See above. */
92090- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
92091+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
92092 }
92093
92094 /**
92095@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
92096 return;
92097 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
92098 smp_mb__before_atomic(); /* See above. */
92099- atomic_inc(&rdtp->dynticks);
92100+ atomic_inc_unchecked(&rdtp->dynticks);
92101 smp_mb__after_atomic(); /* Force delay to next write. */
92102- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
92103+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
92104 }
92105
92106 /**
92107@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
92108 */
92109 bool notrace __rcu_is_watching(void)
92110 {
92111- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92112+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
92113 }
92114
92115 /**
92116@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
92117 static int dyntick_save_progress_counter(struct rcu_data *rdp,
92118 bool *isidle, unsigned long *maxj)
92119 {
92120- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
92121+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92122 rcu_sysidle_check_cpu(rdp, isidle, maxj);
92123 if ((rdp->dynticks_snap & 0x1) == 0) {
92124 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
92125@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92126 int *rcrmp;
92127 unsigned int snap;
92128
92129- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
92130+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
92131 snap = (unsigned int)rdp->dynticks_snap;
92132
92133 /*
92134@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
92135 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
92136 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
92137 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
92138- ACCESS_ONCE(rdp->cond_resched_completed) =
92139+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92140 ACCESS_ONCE(rdp->mynode->completed);
92141 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92142- ACCESS_ONCE(*rcrmp) =
92143+ ACCESS_ONCE_RW(*rcrmp) =
92144 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92145 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92146 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92147@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92148 rsp->gp_start = j;
92149 smp_wmb(); /* Record start time before stall time. */
92150 j1 = rcu_jiffies_till_stall_check();
92151- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92152+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92153 rsp->jiffies_resched = j + j1 / 2;
92154 }
92155
92156@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
92157 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92158 return;
92159 }
92160- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92161+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92162 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92163
92164 /*
92165@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92166
92167 raw_spin_lock_irqsave(&rnp->lock, flags);
92168 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92169- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92170+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92171 3 * rcu_jiffies_till_stall_check() + 3;
92172 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92173
92174@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
92175 struct rcu_state *rsp;
92176
92177 for_each_rcu_flavor(rsp)
92178- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92179+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92180 }
92181
92182 /*
92183@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92184 raw_spin_unlock_irq(&rnp->lock);
92185 return 0;
92186 }
92187- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92188+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92189
92190 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92191 /*
92192@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92193 rdp = this_cpu_ptr(rsp->rda);
92194 rcu_preempt_check_blocked_tasks(rnp);
92195 rnp->qsmask = rnp->qsmaskinit;
92196- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92197+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92198 WARN_ON_ONCE(rnp->completed != rsp->completed);
92199- ACCESS_ONCE(rnp->completed) = rsp->completed;
92200+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92201 if (rnp == rdp->mynode)
92202 (void)__note_gp_changes(rsp, rnp, rdp);
92203 rcu_preempt_boost_start_gp(rnp);
92204@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92205 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92206 raw_spin_lock_irq(&rnp->lock);
92207 smp_mb__after_unlock_lock();
92208- ACCESS_ONCE(rsp->gp_flags) =
92209+ ACCESS_ONCE_RW(rsp->gp_flags) =
92210 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
92211 raw_spin_unlock_irq(&rnp->lock);
92212 }
92213@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92214 rcu_for_each_node_breadth_first(rsp, rnp) {
92215 raw_spin_lock_irq(&rnp->lock);
92216 smp_mb__after_unlock_lock();
92217- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92218+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92219 rdp = this_cpu_ptr(rsp->rda);
92220 if (rnp == rdp->mynode)
92221 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92222@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92223 rcu_nocb_gp_set(rnp, nocb);
92224
92225 /* Declare grace period done. */
92226- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92227+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92228 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92229 rsp->fqs_state = RCU_GP_IDLE;
92230 rdp = this_cpu_ptr(rsp->rda);
92231 /* Advance CBs to reduce false positives below. */
92232 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92233 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92234- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92235+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92236 trace_rcu_grace_period(rsp->name,
92237 ACCESS_ONCE(rsp->gpnum),
92238 TPS("newreq"));
92239@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92240 */
92241 return false;
92242 }
92243- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92244+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92245 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92246 TPS("newreq"));
92247
92248@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92249 rsp->qlen += rdp->qlen;
92250 rdp->n_cbs_orphaned += rdp->qlen;
92251 rdp->qlen_lazy = 0;
92252- ACCESS_ONCE(rdp->qlen) = 0;
92253+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92254 }
92255
92256 /*
92257@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92258 }
92259 smp_mb(); /* List handling before counting for rcu_barrier(). */
92260 rdp->qlen_lazy -= count_lazy;
92261- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92262+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92263 rdp->n_cbs_invoked += count;
92264
92265 /* Reinstate batch limit if we have worked down the excess. */
92266@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92267 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92268 return; /* Someone beat us to it. */
92269 }
92270- ACCESS_ONCE(rsp->gp_flags) =
92271+ ACCESS_ONCE_RW(rsp->gp_flags) =
92272 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92273 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92274 rcu_gp_kthread_wake(rsp);
92275@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92276 /*
92277 * Do RCU core processing for the current CPU.
92278 */
92279-static void rcu_process_callbacks(struct softirq_action *unused)
92280+static void rcu_process_callbacks(void)
92281 {
92282 struct rcu_state *rsp;
92283
92284@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92285 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92286 if (debug_rcu_head_queue(head)) {
92287 /* Probable double call_rcu(), so leak the callback. */
92288- ACCESS_ONCE(head->func) = rcu_leak_callback;
92289+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92290 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92291 return;
92292 }
92293@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92294 local_irq_restore(flags);
92295 return;
92296 }
92297- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92298+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92299 if (lazy)
92300 rdp->qlen_lazy++;
92301 else
92302@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
92303 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92304 * course be required on a 64-bit system.
92305 */
92306- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92307+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92308 (ulong)atomic_long_read(&rsp->expedited_done) +
92309 ULONG_MAX / 8)) {
92310 synchronize_sched();
92311- atomic_long_inc(&rsp->expedited_wrap);
92312+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92313 return;
92314 }
92315
92316@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
92317 * Take a ticket. Note that atomic_inc_return() implies a
92318 * full memory barrier.
92319 */
92320- snap = atomic_long_inc_return(&rsp->expedited_start);
92321+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92322 firstsnap = snap;
92323 if (!try_get_online_cpus()) {
92324 /* CPU hotplug operation in flight, fall back to normal GP. */
92325 wait_rcu_gp(call_rcu_sched);
92326- atomic_long_inc(&rsp->expedited_normal);
92327+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92328 return;
92329 }
92330 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92331@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92332 for_each_cpu(cpu, cm) {
92333 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92334
92335- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92336+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92337 cpumask_clear_cpu(cpu, cm);
92338 }
92339 if (cpumask_weight(cm) == 0)
92340@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92341 synchronize_sched_expedited_cpu_stop,
92342 NULL) == -EAGAIN) {
92343 put_online_cpus();
92344- atomic_long_inc(&rsp->expedited_tryfail);
92345+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92346
92347 /* Check to see if someone else did our work for us. */
92348 s = atomic_long_read(&rsp->expedited_done);
92349 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92350 /* ensure test happens before caller kfree */
92351 smp_mb__before_atomic(); /* ^^^ */
92352- atomic_long_inc(&rsp->expedited_workdone1);
92353+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92354 free_cpumask_var(cm);
92355 return;
92356 }
92357@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92358 udelay(trycount * num_online_cpus());
92359 } else {
92360 wait_rcu_gp(call_rcu_sched);
92361- atomic_long_inc(&rsp->expedited_normal);
92362+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92363 free_cpumask_var(cm);
92364 return;
92365 }
92366@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92367 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92368 /* ensure test happens before caller kfree */
92369 smp_mb__before_atomic(); /* ^^^ */
92370- atomic_long_inc(&rsp->expedited_workdone2);
92371+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92372 free_cpumask_var(cm);
92373 return;
92374 }
92375@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92376 if (!try_get_online_cpus()) {
92377 /* CPU hotplug operation in flight, use normal GP. */
92378 wait_rcu_gp(call_rcu_sched);
92379- atomic_long_inc(&rsp->expedited_normal);
92380+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92381 free_cpumask_var(cm);
92382 return;
92383 }
92384- snap = atomic_long_read(&rsp->expedited_start);
92385+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92386 smp_mb(); /* ensure read is before try_stop_cpus(). */
92387 }
92388- atomic_long_inc(&rsp->expedited_stoppedcpus);
92389+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92390
92391 all_cpus_idle:
92392 free_cpumask_var(cm);
92393@@ -3072,16 +3072,16 @@ all_cpus_idle:
92394 * than we did already did their update.
92395 */
92396 do {
92397- atomic_long_inc(&rsp->expedited_done_tries);
92398+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92399 s = atomic_long_read(&rsp->expedited_done);
92400 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92401 /* ensure test happens before caller kfree */
92402 smp_mb__before_atomic(); /* ^^^ */
92403- atomic_long_inc(&rsp->expedited_done_lost);
92404+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92405 break;
92406 }
92407 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92408- atomic_long_inc(&rsp->expedited_done_exit);
92409+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92410
92411 put_online_cpus();
92412 }
92413@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92414 * ACCESS_ONCE() to prevent the compiler from speculating
92415 * the increment to precede the early-exit check.
92416 */
92417- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92418+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92419 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92420 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92421 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92422@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92423
92424 /* Increment ->n_barrier_done to prevent duplicate work. */
92425 smp_mb(); /* Keep increment after above mechanism. */
92426- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92427+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92428 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92429 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92430 smp_mb(); /* Keep increment before caller's subsequent code. */
92431@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92432 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92433 init_callback_list(rdp);
92434 rdp->qlen_lazy = 0;
92435- ACCESS_ONCE(rdp->qlen) = 0;
92436+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92437 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92438 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92439- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92440+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92441 rdp->cpu = cpu;
92442 rdp->rsp = rsp;
92443 rcu_boot_init_nocb_percpu_data(rdp);
92444@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92445 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92446 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92447 rcu_sysidle_init_percpu_data(rdp->dynticks);
92448- atomic_set(&rdp->dynticks->dynticks,
92449- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92450+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92451+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92452 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92453
92454 /* Add CPU to rcu_node bitmasks. */
92455diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92456index 8e7b184..9c55768 100644
92457--- a/kernel/rcu/tree.h
92458+++ b/kernel/rcu/tree.h
92459@@ -87,11 +87,11 @@ struct rcu_dynticks {
92460 long long dynticks_nesting; /* Track irq/process nesting level. */
92461 /* Process level is worth LLONG_MAX/2. */
92462 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92463- atomic_t dynticks; /* Even value for idle, else odd. */
92464+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92465 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92466 long long dynticks_idle_nesting;
92467 /* irq/process nesting level from idle. */
92468- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92469+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92470 /* "Idle" excludes userspace execution. */
92471 unsigned long dynticks_idle_jiffies;
92472 /* End of last non-NMI non-idle period. */
92473@@ -466,17 +466,17 @@ struct rcu_state {
92474 /* _rcu_barrier(). */
92475 /* End of fields guarded by barrier_mutex. */
92476
92477- atomic_long_t expedited_start; /* Starting ticket. */
92478- atomic_long_t expedited_done; /* Done ticket. */
92479- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92480- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92481- atomic_long_t expedited_workdone1; /* # done by others #1. */
92482- atomic_long_t expedited_workdone2; /* # done by others #2. */
92483- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92484- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92485- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92486- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92487- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92488+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92489+ atomic_long_t expedited_done; /* Done ticket. */
92490+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92491+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92492+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92493+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92494+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92495+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92496+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92497+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92498+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92499
92500 unsigned long jiffies_force_qs; /* Time at which to invoke */
92501 /* force_quiescent_state(). */
92502diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92503index 3ec85cb..3687925 100644
92504--- a/kernel/rcu/tree_plugin.h
92505+++ b/kernel/rcu/tree_plugin.h
92506@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92507 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92508 {
92509 return !rcu_preempted_readers_exp(rnp) &&
92510- ACCESS_ONCE(rnp->expmask) == 0;
92511+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92512 }
92513
92514 /*
92515@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92516
92517 /* Clean up and exit. */
92518 smp_mb(); /* ensure expedited GP seen before counter increment. */
92519- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92520+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92521 sync_rcu_preempt_exp_count + 1;
92522 unlock_mb_ret:
92523 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92524@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92525 free_cpumask_var(cm);
92526 }
92527
92528-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92529+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92530 .store = &rcu_cpu_kthread_task,
92531 .thread_should_run = rcu_cpu_kthread_should_run,
92532 .thread_fn = rcu_cpu_kthread,
92533@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92534 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92535 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92536 cpu, ticks_value, ticks_title,
92537- atomic_read(&rdtp->dynticks) & 0xfff,
92538+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92539 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92540 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92541 fast_no_hz);
92542@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92543 return;
92544 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92545 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92546- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92547+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92548 wake_up(&rdp_leader->nocb_wq);
92549 }
92550 }
92551@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92552
92553 /* Enqueue the callback on the nocb list and update counts. */
92554 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92555- ACCESS_ONCE(*old_rhpp) = rhp;
92556+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92557 atomic_long_add(rhcount, &rdp->nocb_q_count);
92558 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92559 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92560@@ -2286,7 +2286,7 @@ wait_again:
92561 continue; /* No CBs here, try next follower. */
92562
92563 /* Move callbacks to wait-for-GP list, which is empty. */
92564- ACCESS_ONCE(rdp->nocb_head) = NULL;
92565+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92566 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92567 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92568 rdp->nocb_gp_count_lazy =
92569@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92570 list = ACCESS_ONCE(rdp->nocb_follower_head);
92571 BUG_ON(!list);
92572 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92573- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92574+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92575 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92576 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92577 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92578@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92579 list = next;
92580 }
92581 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92582- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92583- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92584+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92585+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92586 rdp->nocb_p_count_lazy - cl;
92587 rdp->n_nocbs_invoked += c;
92588 }
92589@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92590 if (!rcu_nocb_need_deferred_wakeup(rdp))
92591 return;
92592 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92593- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92594+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92595 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92596 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92597 }
92598@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92599 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92600 "rcuo%c/%d", rsp->abbr, cpu);
92601 BUG_ON(IS_ERR(t));
92602- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92603+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92604 }
92605
92606 /*
92607@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92608
92609 /* Record start of fully idle period. */
92610 j = jiffies;
92611- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92612+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92613 smp_mb__before_atomic();
92614- atomic_inc(&rdtp->dynticks_idle);
92615+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92616 smp_mb__after_atomic();
92617- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92618+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92619 }
92620
92621 /*
92622@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92623
92624 /* Record end of idle period. */
92625 smp_mb__before_atomic();
92626- atomic_inc(&rdtp->dynticks_idle);
92627+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92628 smp_mb__after_atomic();
92629- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92630+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92631
92632 /*
92633 * If we are the timekeeping CPU, we are permitted to be non-idle
92634@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92635 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92636
92637 /* Pick up current idle and NMI-nesting counter and check. */
92638- cur = atomic_read(&rdtp->dynticks_idle);
92639+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92640 if (cur & 0x1) {
92641 *isidle = false; /* We are not idle! */
92642 return;
92643@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92644 case RCU_SYSIDLE_NOT:
92645
92646 /* First time all are idle, so note a short idle period. */
92647- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92648+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92649 break;
92650
92651 case RCU_SYSIDLE_SHORT:
92652@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92653 {
92654 smp_mb();
92655 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92656- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92657+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92658 }
92659
92660 /*
92661@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92662 smp_mb(); /* grace period precedes setting inuse. */
92663
92664 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92665- ACCESS_ONCE(rshp->inuse) = 0;
92666+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92667 }
92668
92669 /*
92670@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92671 static void rcu_dynticks_task_enter(void)
92672 {
92673 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92674- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92675+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92676 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92677 }
92678
92679@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92680 static void rcu_dynticks_task_exit(void)
92681 {
92682 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92683- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92684+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92685 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92686 }
92687diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92688index 5cdc62e..cc52e88 100644
92689--- a/kernel/rcu/tree_trace.c
92690+++ b/kernel/rcu/tree_trace.c
92691@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92692 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92693 rdp->passed_quiesce, rdp->qs_pending);
92694 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92695- atomic_read(&rdp->dynticks->dynticks),
92696+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92697 rdp->dynticks->dynticks_nesting,
92698 rdp->dynticks->dynticks_nmi_nesting,
92699 rdp->dynticks_fqs);
92700@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92701 struct rcu_state *rsp = (struct rcu_state *)m->private;
92702
92703 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92704- atomic_long_read(&rsp->expedited_start),
92705+ atomic_long_read_unchecked(&rsp->expedited_start),
92706 atomic_long_read(&rsp->expedited_done),
92707- atomic_long_read(&rsp->expedited_wrap),
92708- atomic_long_read(&rsp->expedited_tryfail),
92709- atomic_long_read(&rsp->expedited_workdone1),
92710- atomic_long_read(&rsp->expedited_workdone2),
92711- atomic_long_read(&rsp->expedited_normal),
92712- atomic_long_read(&rsp->expedited_stoppedcpus),
92713- atomic_long_read(&rsp->expedited_done_tries),
92714- atomic_long_read(&rsp->expedited_done_lost),
92715- atomic_long_read(&rsp->expedited_done_exit));
92716+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92717+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92718+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92719+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92720+ atomic_long_read_unchecked(&rsp->expedited_normal),
92721+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92722+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92723+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92724+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92725 return 0;
92726 }
92727
92728diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92729index e0d31a3..f4dafe3 100644
92730--- a/kernel/rcu/update.c
92731+++ b/kernel/rcu/update.c
92732@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92733 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92734 */
92735 if (till_stall_check < 3) {
92736- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92737+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92738 till_stall_check = 3;
92739 } else if (till_stall_check > 300) {
92740- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92741+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92742 till_stall_check = 300;
92743 }
92744 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92745@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92746 !ACCESS_ONCE(t->on_rq) ||
92747 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92748 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92749- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92750+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92751 list_del_init(&t->rcu_tasks_holdout_list);
92752 put_task_struct(t);
92753 return;
92754@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92755 !is_idle_task(t)) {
92756 get_task_struct(t);
92757 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92758- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92759+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92760 list_add(&t->rcu_tasks_holdout_list,
92761 &rcu_tasks_holdouts);
92762 }
92763@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92764 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92765 BUG_ON(IS_ERR(t));
92766 smp_mb(); /* Ensure others see full kthread. */
92767- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92768+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92769 mutex_unlock(&rcu_tasks_kthread_mutex);
92770 }
92771
92772diff --git a/kernel/resource.c b/kernel/resource.c
92773index 0bcebff..e7cd5b2 100644
92774--- a/kernel/resource.c
92775+++ b/kernel/resource.c
92776@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92777
92778 static int __init ioresources_init(void)
92779 {
92780+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92781+#ifdef CONFIG_GRKERNSEC_PROC_USER
92782+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92783+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92784+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92785+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92786+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92787+#endif
92788+#else
92789 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92790 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92791+#endif
92792 return 0;
92793 }
92794 __initcall(ioresources_init);
92795diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92796index eae160d..c9aa22e 100644
92797--- a/kernel/sched/auto_group.c
92798+++ b/kernel/sched/auto_group.c
92799@@ -11,7 +11,7 @@
92800
92801 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92802 static struct autogroup autogroup_default;
92803-static atomic_t autogroup_seq_nr;
92804+static atomic_unchecked_t autogroup_seq_nr;
92805
92806 void __init autogroup_init(struct task_struct *init_task)
92807 {
92808@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92809
92810 kref_init(&ag->kref);
92811 init_rwsem(&ag->lock);
92812- ag->id = atomic_inc_return(&autogroup_seq_nr);
92813+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92814 ag->tg = tg;
92815 #ifdef CONFIG_RT_GROUP_SCHED
92816 /*
92817diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92818index 607f852..486bc87 100644
92819--- a/kernel/sched/completion.c
92820+++ b/kernel/sched/completion.c
92821@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92822 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92823 * or number of jiffies left till timeout) if completed.
92824 */
92825-long __sched
92826+long __sched __intentional_overflow(-1)
92827 wait_for_completion_interruptible_timeout(struct completion *x,
92828 unsigned long timeout)
92829 {
92830@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92831 *
92832 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92833 */
92834-int __sched wait_for_completion_killable(struct completion *x)
92835+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92836 {
92837 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92838 if (t == -ERESTARTSYS)
92839@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92840 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92841 * or number of jiffies left till timeout) if completed.
92842 */
92843-long __sched
92844+long __sched __intentional_overflow(-1)
92845 wait_for_completion_killable_timeout(struct completion *x,
92846 unsigned long timeout)
92847 {
92848diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92849index d400c82..50fca96 100644
92850--- a/kernel/sched/core.c
92851+++ b/kernel/sched/core.c
92852@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92853 int sysctl_numa_balancing(struct ctl_table *table, int write,
92854 void __user *buffer, size_t *lenp, loff_t *ppos)
92855 {
92856- struct ctl_table t;
92857+ ctl_table_no_const t;
92858 int err;
92859 int state = numabalancing_enabled;
92860
92861@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92862 next->active_mm = oldmm;
92863 atomic_inc(&oldmm->mm_count);
92864 enter_lazy_tlb(oldmm, next);
92865- } else
92866+ } else {
92867 switch_mm(oldmm, mm, next);
92868+ populate_stack();
92869+ }
92870
92871 if (!prev->mm) {
92872 prev->active_mm = NULL;
92873@@ -3154,6 +3156,8 @@ int can_nice(const struct task_struct *p, const int nice)
92874 /* convert nice value [19,-20] to rlimit style value [1,40] */
92875 int nice_rlim = nice_to_rlimit(nice);
92876
92877+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92878+
92879 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92880 capable(CAP_SYS_NICE));
92881 }
92882@@ -3180,7 +3184,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92883 nice = task_nice(current) + increment;
92884
92885 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92886- if (increment < 0 && !can_nice(current, nice))
92887+ if (increment < 0 && (!can_nice(current, nice) ||
92888+ gr_handle_chroot_nice()))
92889 return -EPERM;
92890
92891 retval = security_task_setnice(current, nice);
92892@@ -3475,6 +3480,7 @@ recheck:
92893 if (policy != p->policy && !rlim_rtprio)
92894 return -EPERM;
92895
92896+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92897 /* can't increase priority */
92898 if (attr->sched_priority > p->rt_priority &&
92899 attr->sched_priority > rlim_rtprio)
92900@@ -4975,6 +4981,7 @@ void idle_task_exit(void)
92901
92902 if (mm != &init_mm) {
92903 switch_mm(mm, &init_mm, current);
92904+ populate_stack();
92905 finish_arch_post_lock_switch();
92906 }
92907 mmdrop(mm);
92908@@ -5070,7 +5077,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92909
92910 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92911
92912-static struct ctl_table sd_ctl_dir[] = {
92913+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92914 {
92915 .procname = "sched_domain",
92916 .mode = 0555,
92917@@ -5087,17 +5094,17 @@ static struct ctl_table sd_ctl_root[] = {
92918 {}
92919 };
92920
92921-static struct ctl_table *sd_alloc_ctl_entry(int n)
92922+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92923 {
92924- struct ctl_table *entry =
92925+ ctl_table_no_const *entry =
92926 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92927
92928 return entry;
92929 }
92930
92931-static void sd_free_ctl_entry(struct ctl_table **tablep)
92932+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92933 {
92934- struct ctl_table *entry;
92935+ ctl_table_no_const *entry;
92936
92937 /*
92938 * In the intermediate directories, both the child directory and
92939@@ -5105,22 +5112,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92940 * will always be set. In the lowest directory the names are
92941 * static strings and all have proc handlers.
92942 */
92943- for (entry = *tablep; entry->mode; entry++) {
92944- if (entry->child)
92945- sd_free_ctl_entry(&entry->child);
92946+ for (entry = tablep; entry->mode; entry++) {
92947+ if (entry->child) {
92948+ sd_free_ctl_entry(entry->child);
92949+ pax_open_kernel();
92950+ entry->child = NULL;
92951+ pax_close_kernel();
92952+ }
92953 if (entry->proc_handler == NULL)
92954 kfree(entry->procname);
92955 }
92956
92957- kfree(*tablep);
92958- *tablep = NULL;
92959+ kfree(tablep);
92960 }
92961
92962 static int min_load_idx = 0;
92963 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92964
92965 static void
92966-set_table_entry(struct ctl_table *entry,
92967+set_table_entry(ctl_table_no_const *entry,
92968 const char *procname, void *data, int maxlen,
92969 umode_t mode, proc_handler *proc_handler,
92970 bool load_idx)
92971@@ -5140,7 +5150,7 @@ set_table_entry(struct ctl_table *entry,
92972 static struct ctl_table *
92973 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92974 {
92975- struct ctl_table *table = sd_alloc_ctl_entry(14);
92976+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92977
92978 if (table == NULL)
92979 return NULL;
92980@@ -5178,9 +5188,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92981 return table;
92982 }
92983
92984-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92985+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92986 {
92987- struct ctl_table *entry, *table;
92988+ ctl_table_no_const *entry, *table;
92989 struct sched_domain *sd;
92990 int domain_num = 0, i;
92991 char buf[32];
92992@@ -5207,11 +5217,13 @@ static struct ctl_table_header *sd_sysctl_header;
92993 static void register_sched_domain_sysctl(void)
92994 {
92995 int i, cpu_num = num_possible_cpus();
92996- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92997+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92998 char buf[32];
92999
93000 WARN_ON(sd_ctl_dir[0].child);
93001+ pax_open_kernel();
93002 sd_ctl_dir[0].child = entry;
93003+ pax_close_kernel();
93004
93005 if (entry == NULL)
93006 return;
93007@@ -5234,8 +5246,12 @@ static void unregister_sched_domain_sysctl(void)
93008 if (sd_sysctl_header)
93009 unregister_sysctl_table(sd_sysctl_header);
93010 sd_sysctl_header = NULL;
93011- if (sd_ctl_dir[0].child)
93012- sd_free_ctl_entry(&sd_ctl_dir[0].child);
93013+ if (sd_ctl_dir[0].child) {
93014+ sd_free_ctl_entry(sd_ctl_dir[0].child);
93015+ pax_open_kernel();
93016+ sd_ctl_dir[0].child = NULL;
93017+ pax_close_kernel();
93018+ }
93019 }
93020 #else
93021 static void register_sched_domain_sysctl(void)
93022diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
93023index fe331fc..29d620e 100644
93024--- a/kernel/sched/fair.c
93025+++ b/kernel/sched/fair.c
93026@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
93027
93028 static void reset_ptenuma_scan(struct task_struct *p)
93029 {
93030- ACCESS_ONCE(p->mm->numa_scan_seq)++;
93031+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
93032 p->mm->numa_scan_offset = 0;
93033 }
93034
93035@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
93036 * run_rebalance_domains is triggered when needed from the scheduler tick.
93037 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
93038 */
93039-static void run_rebalance_domains(struct softirq_action *h)
93040+static __latent_entropy void run_rebalance_domains(void)
93041 {
93042 struct rq *this_rq = this_rq();
93043 enum cpu_idle_type idle = this_rq->idle_balance ?
93044diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
93045index 9a2a45c..bb91ace 100644
93046--- a/kernel/sched/sched.h
93047+++ b/kernel/sched/sched.h
93048@@ -1182,7 +1182,7 @@ struct sched_class {
93049 #ifdef CONFIG_FAIR_GROUP_SCHED
93050 void (*task_move_group) (struct task_struct *p, int on_rq);
93051 #endif
93052-};
93053+} __do_const;
93054
93055 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
93056 {
93057diff --git a/kernel/seccomp.c b/kernel/seccomp.c
93058index 4ef9687..4f44028 100644
93059--- a/kernel/seccomp.c
93060+++ b/kernel/seccomp.c
93061@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
93062
93063 switch (action) {
93064 case SECCOMP_RET_ERRNO:
93065- /* Set the low-order 16-bits as a errno. */
93066+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
93067+ if (data > MAX_ERRNO)
93068+ data = MAX_ERRNO;
93069 syscall_set_return_value(current, task_pt_regs(current),
93070 -data, 0);
93071 goto skip;
93072diff --git a/kernel/signal.c b/kernel/signal.c
93073index 16a30529..25ad033 100644
93074--- a/kernel/signal.c
93075+++ b/kernel/signal.c
93076@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
93077
93078 int print_fatal_signals __read_mostly;
93079
93080-static void __user *sig_handler(struct task_struct *t, int sig)
93081+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93082 {
93083 return t->sighand->action[sig - 1].sa.sa_handler;
93084 }
93085
93086-static int sig_handler_ignored(void __user *handler, int sig)
93087+static int sig_handler_ignored(__sighandler_t handler, int sig)
93088 {
93089 /* Is it explicitly or implicitly ignored? */
93090 return handler == SIG_IGN ||
93091@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93092
93093 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
93094 {
93095- void __user *handler;
93096+ __sighandler_t handler;
93097
93098 handler = sig_handler(t, sig);
93099
93100@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
93101 atomic_inc(&user->sigpending);
93102 rcu_read_unlock();
93103
93104+ if (!override_rlimit)
93105+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93106+
93107 if (override_rlimit ||
93108 atomic_read(&user->sigpending) <=
93109 task_rlimit(t, RLIMIT_SIGPENDING)) {
93110@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93111
93112 int unhandled_signal(struct task_struct *tsk, int sig)
93113 {
93114- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93115+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93116 if (is_global_init(tsk))
93117 return 1;
93118 if (handler != SIG_IGN && handler != SIG_DFL)
93119@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93120 }
93121 }
93122
93123+ /* allow glibc communication via tgkill to other threads in our
93124+ thread group */
93125+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93126+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93127+ && gr_handle_signal(t, sig))
93128+ return -EPERM;
93129+
93130 return security_task_kill(t, info, sig, 0);
93131 }
93132
93133@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93134 return send_signal(sig, info, p, 1);
93135 }
93136
93137-static int
93138+int
93139 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93140 {
93141 return send_signal(sig, info, t, 0);
93142@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93143 unsigned long int flags;
93144 int ret, blocked, ignored;
93145 struct k_sigaction *action;
93146+ int is_unhandled = 0;
93147
93148 spin_lock_irqsave(&t->sighand->siglock, flags);
93149 action = &t->sighand->action[sig-1];
93150@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93151 }
93152 if (action->sa.sa_handler == SIG_DFL)
93153 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93154+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93155+ is_unhandled = 1;
93156 ret = specific_send_sig_info(sig, info, t);
93157 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93158
93159+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93160+ normal operation */
93161+ if (is_unhandled) {
93162+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93163+ gr_handle_crash(t, sig);
93164+ }
93165+
93166 return ret;
93167 }
93168
93169@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93170 ret = check_kill_permission(sig, info, p);
93171 rcu_read_unlock();
93172
93173- if (!ret && sig)
93174+ if (!ret && sig) {
93175 ret = do_send_sig_info(sig, info, p, true);
93176+ if (!ret)
93177+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93178+ }
93179
93180 return ret;
93181 }
93182@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93183 int error = -ESRCH;
93184
93185 rcu_read_lock();
93186- p = find_task_by_vpid(pid);
93187+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93188+ /* allow glibc communication via tgkill to other threads in our
93189+ thread group */
93190+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93191+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93192+ p = find_task_by_vpid_unrestricted(pid);
93193+ else
93194+#endif
93195+ p = find_task_by_vpid(pid);
93196 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93197 error = check_kill_permission(sig, info, p);
93198 /*
93199@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93200 }
93201 seg = get_fs();
93202 set_fs(KERNEL_DS);
93203- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93204- (stack_t __force __user *) &uoss,
93205+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93206+ (stack_t __force_user *) &uoss,
93207 compat_user_stack_pointer());
93208 set_fs(seg);
93209 if (ret >= 0 && uoss_ptr) {
93210diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93211index 40190f2..8861d40 100644
93212--- a/kernel/smpboot.c
93213+++ b/kernel/smpboot.c
93214@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93215 }
93216 smpboot_unpark_thread(plug_thread, cpu);
93217 }
93218- list_add(&plug_thread->list, &hotplug_threads);
93219+ pax_list_add(&plug_thread->list, &hotplug_threads);
93220 out:
93221 mutex_unlock(&smpboot_threads_lock);
93222 put_online_cpus();
93223@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93224 {
93225 get_online_cpus();
93226 mutex_lock(&smpboot_threads_lock);
93227- list_del(&plug_thread->list);
93228+ pax_list_del(&plug_thread->list);
93229 smpboot_destroy_threads(plug_thread);
93230 mutex_unlock(&smpboot_threads_lock);
93231 put_online_cpus();
93232diff --git a/kernel/softirq.c b/kernel/softirq.c
93233index c497fcd..e8f90a9 100644
93234--- a/kernel/softirq.c
93235+++ b/kernel/softirq.c
93236@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93237 EXPORT_SYMBOL(irq_stat);
93238 #endif
93239
93240-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93241+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93242
93243 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93244
93245@@ -266,7 +266,7 @@ restart:
93246 kstat_incr_softirqs_this_cpu(vec_nr);
93247
93248 trace_softirq_entry(vec_nr);
93249- h->action(h);
93250+ h->action();
93251 trace_softirq_exit(vec_nr);
93252 if (unlikely(prev_count != preempt_count())) {
93253 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93254@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93255 or_softirq_pending(1UL << nr);
93256 }
93257
93258-void open_softirq(int nr, void (*action)(struct softirq_action *))
93259+void __init open_softirq(int nr, void (*action)(void))
93260 {
93261 softirq_vec[nr].action = action;
93262 }
93263@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93264 }
93265 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93266
93267-static void tasklet_action(struct softirq_action *a)
93268+static void tasklet_action(void)
93269 {
93270 struct tasklet_struct *list;
93271
93272@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93273 }
93274 }
93275
93276-static void tasklet_hi_action(struct softirq_action *a)
93277+static __latent_entropy void tasklet_hi_action(void)
93278 {
93279 struct tasklet_struct *list;
93280
93281@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
93282 .notifier_call = cpu_callback
93283 };
93284
93285-static struct smp_hotplug_thread softirq_threads = {
93286+static struct smp_hotplug_thread softirq_threads __read_only = {
93287 .store = &ksoftirqd,
93288 .thread_should_run = ksoftirqd_should_run,
93289 .thread_fn = run_ksoftirqd,
93290diff --git a/kernel/sys.c b/kernel/sys.c
93291index ea9c881..2194af5 100644
93292--- a/kernel/sys.c
93293+++ b/kernel/sys.c
93294@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93295 error = -EACCES;
93296 goto out;
93297 }
93298+
93299+ if (gr_handle_chroot_setpriority(p, niceval)) {
93300+ error = -EACCES;
93301+ goto out;
93302+ }
93303+
93304 no_nice = security_task_setnice(p, niceval);
93305 if (no_nice) {
93306 error = no_nice;
93307@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93308 goto error;
93309 }
93310
93311+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93312+ goto error;
93313+
93314+ if (!gid_eq(new->gid, old->gid)) {
93315+ /* make sure we generate a learn log for what will
93316+ end up being a role transition after a full-learning
93317+ policy is generated
93318+ CAP_SETGID is required to perform a transition
93319+ we may not log a CAP_SETGID check above, e.g.
93320+ in the case where new rgid = old egid
93321+ */
93322+ gr_learn_cap(current, new, CAP_SETGID);
93323+ }
93324+
93325 if (rgid != (gid_t) -1 ||
93326 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93327 new->sgid = new->egid;
93328@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93329 old = current_cred();
93330
93331 retval = -EPERM;
93332+
93333+ if (gr_check_group_change(kgid, kgid, kgid))
93334+ goto error;
93335+
93336 if (ns_capable(old->user_ns, CAP_SETGID))
93337 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93338 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93339@@ -411,7 +435,7 @@ error:
93340 /*
93341 * change the user struct in a credentials set to match the new UID
93342 */
93343-static int set_user(struct cred *new)
93344+int set_user(struct cred *new)
93345 {
93346 struct user_struct *new_user;
93347
93348@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93349 goto error;
93350 }
93351
93352+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93353+ goto error;
93354+
93355 if (!uid_eq(new->uid, old->uid)) {
93356+ /* make sure we generate a learn log for what will
93357+ end up being a role transition after a full-learning
93358+ policy is generated
93359+ CAP_SETUID is required to perform a transition
93360+ we may not log a CAP_SETUID check above, e.g.
93361+ in the case where new ruid = old euid
93362+ */
93363+ gr_learn_cap(current, new, CAP_SETUID);
93364 retval = set_user(new);
93365 if (retval < 0)
93366 goto error;
93367@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93368 old = current_cred();
93369
93370 retval = -EPERM;
93371+
93372+ if (gr_check_crash_uid(kuid))
93373+ goto error;
93374+ if (gr_check_user_change(kuid, kuid, kuid))
93375+ goto error;
93376+
93377 if (ns_capable(old->user_ns, CAP_SETUID)) {
93378 new->suid = new->uid = kuid;
93379 if (!uid_eq(kuid, old->uid)) {
93380@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93381 goto error;
93382 }
93383
93384+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93385+ goto error;
93386+
93387 if (ruid != (uid_t) -1) {
93388 new->uid = kruid;
93389 if (!uid_eq(kruid, old->uid)) {
93390@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93391 goto error;
93392 }
93393
93394+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93395+ goto error;
93396+
93397 if (rgid != (gid_t) -1)
93398 new->gid = krgid;
93399 if (egid != (gid_t) -1)
93400@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93401 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93402 ns_capable(old->user_ns, CAP_SETUID)) {
93403 if (!uid_eq(kuid, old->fsuid)) {
93404+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93405+ goto error;
93406+
93407 new->fsuid = kuid;
93408 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93409 goto change_okay;
93410 }
93411 }
93412
93413+error:
93414 abort_creds(new);
93415 return old_fsuid;
93416
93417@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93418 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93419 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93420 ns_capable(old->user_ns, CAP_SETGID)) {
93421+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93422+ goto error;
93423+
93424 if (!gid_eq(kgid, old->fsgid)) {
93425 new->fsgid = kgid;
93426 goto change_okay;
93427 }
93428 }
93429
93430+error:
93431 abort_creds(new);
93432 return old_fsgid;
93433
93434@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93435 return -EFAULT;
93436
93437 down_read(&uts_sem);
93438- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93439+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93440 __OLD_UTS_LEN);
93441 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93442- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93443+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93444 __OLD_UTS_LEN);
93445 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93446- error |= __copy_to_user(&name->release, &utsname()->release,
93447+ error |= __copy_to_user(name->release, &utsname()->release,
93448 __OLD_UTS_LEN);
93449 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93450- error |= __copy_to_user(&name->version, &utsname()->version,
93451+ error |= __copy_to_user(name->version, &utsname()->version,
93452 __OLD_UTS_LEN);
93453 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93454- error |= __copy_to_user(&name->machine, &utsname()->machine,
93455+ error |= __copy_to_user(name->machine, &utsname()->machine,
93456 __OLD_UTS_LEN);
93457 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93458 up_read(&uts_sem);
93459@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93460 */
93461 new_rlim->rlim_cur = 1;
93462 }
93463+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93464+ is changed to a lower value. Since tasks can be created by the same
93465+ user in between this limit change and an execve by this task, force
93466+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93467+ */
93468+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93469+ tsk->flags |= PF_NPROC_EXCEEDED;
93470 }
93471 if (!retval) {
93472 if (old_rlim)
93473diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93474index 88ea2d6..88acc77 100644
93475--- a/kernel/sysctl.c
93476+++ b/kernel/sysctl.c
93477@@ -94,7 +94,6 @@
93478
93479
93480 #if defined(CONFIG_SYSCTL)
93481-
93482 /* External variables not in a header file. */
93483 extern int max_threads;
93484 extern int suid_dumpable;
93485@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93486
93487 /* Constants used for minimum and maximum */
93488 #ifdef CONFIG_LOCKUP_DETECTOR
93489-static int sixty = 60;
93490+static int sixty __read_only = 60;
93491 #endif
93492
93493-static int __maybe_unused neg_one = -1;
93494+static int __maybe_unused neg_one __read_only = -1;
93495
93496-static int zero;
93497-static int __maybe_unused one = 1;
93498-static int __maybe_unused two = 2;
93499-static int __maybe_unused four = 4;
93500-static unsigned long one_ul = 1;
93501-static int one_hundred = 100;
93502+static int zero __read_only = 0;
93503+static int __maybe_unused one __read_only = 1;
93504+static int __maybe_unused two __read_only = 2;
93505+static int __maybe_unused three __read_only = 3;
93506+static int __maybe_unused four __read_only = 4;
93507+static unsigned long one_ul __read_only = 1;
93508+static int one_hundred __read_only = 100;
93509 #ifdef CONFIG_PRINTK
93510-static int ten_thousand = 10000;
93511+static int ten_thousand __read_only = 10000;
93512 #endif
93513
93514 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93515@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93516 void __user *buffer, size_t *lenp, loff_t *ppos);
93517 #endif
93518
93519-#ifdef CONFIG_PRINTK
93520 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93521 void __user *buffer, size_t *lenp, loff_t *ppos);
93522-#endif
93523
93524 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93525 void __user *buffer, size_t *lenp, loff_t *ppos);
93526@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93527
93528 #endif
93529
93530+extern struct ctl_table grsecurity_table[];
93531+
93532 static struct ctl_table kern_table[];
93533 static struct ctl_table vm_table[];
93534 static struct ctl_table fs_table[];
93535@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93536 int sysctl_legacy_va_layout;
93537 #endif
93538
93539+#ifdef CONFIG_PAX_SOFTMODE
93540+static struct ctl_table pax_table[] = {
93541+ {
93542+ .procname = "softmode",
93543+ .data = &pax_softmode,
93544+ .maxlen = sizeof(unsigned int),
93545+ .mode = 0600,
93546+ .proc_handler = &proc_dointvec,
93547+ },
93548+
93549+ { }
93550+};
93551+#endif
93552+
93553 /* The default sysctl tables: */
93554
93555 static struct ctl_table sysctl_base_table[] = {
93556@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93557 #endif
93558
93559 static struct ctl_table kern_table[] = {
93560+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93561+ {
93562+ .procname = "grsecurity",
93563+ .mode = 0500,
93564+ .child = grsecurity_table,
93565+ },
93566+#endif
93567+
93568+#ifdef CONFIG_PAX_SOFTMODE
93569+ {
93570+ .procname = "pax",
93571+ .mode = 0500,
93572+ .child = pax_table,
93573+ },
93574+#endif
93575+
93576 {
93577 .procname = "sched_child_runs_first",
93578 .data = &sysctl_sched_child_runs_first,
93579@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93580 .data = &modprobe_path,
93581 .maxlen = KMOD_PATH_LEN,
93582 .mode = 0644,
93583- .proc_handler = proc_dostring,
93584+ .proc_handler = proc_dostring_modpriv,
93585 },
93586 {
93587 .procname = "modules_disabled",
93588@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93589 .extra1 = &zero,
93590 .extra2 = &one,
93591 },
93592+#endif
93593 {
93594 .procname = "kptr_restrict",
93595 .data = &kptr_restrict,
93596 .maxlen = sizeof(int),
93597 .mode = 0644,
93598 .proc_handler = proc_dointvec_minmax_sysadmin,
93599+#ifdef CONFIG_GRKERNSEC_HIDESYM
93600+ .extra1 = &two,
93601+#else
93602 .extra1 = &zero,
93603+#endif
93604 .extra2 = &two,
93605 },
93606-#endif
93607 {
93608 .procname = "ngroups_max",
93609 .data = &ngroups_max,
93610@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93611 */
93612 {
93613 .procname = "perf_event_paranoid",
93614- .data = &sysctl_perf_event_paranoid,
93615- .maxlen = sizeof(sysctl_perf_event_paranoid),
93616+ .data = &sysctl_perf_event_legitimately_concerned,
93617+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93618 .mode = 0644,
93619- .proc_handler = proc_dointvec,
93620+ /* go ahead, be a hero */
93621+ .proc_handler = proc_dointvec_minmax_sysadmin,
93622+ .extra1 = &neg_one,
93623+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93624+ .extra2 = &three,
93625+#else
93626+ .extra2 = &two,
93627+#endif
93628 },
93629 {
93630 .procname = "perf_event_mlock_kb",
93631@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93632 .proc_handler = proc_dointvec_minmax,
93633 .extra1 = &zero,
93634 },
93635+ {
93636+ .procname = "heap_stack_gap",
93637+ .data = &sysctl_heap_stack_gap,
93638+ .maxlen = sizeof(sysctl_heap_stack_gap),
93639+ .mode = 0644,
93640+ .proc_handler = proc_doulongvec_minmax,
93641+ },
93642 #else
93643 {
93644 .procname = "nr_trim_pages",
93645@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93646 (char __user *)buffer, lenp, ppos);
93647 }
93648
93649+int proc_dostring_modpriv(struct ctl_table *table, int write,
93650+ void __user *buffer, size_t *lenp, loff_t *ppos)
93651+{
93652+ if (write && !capable(CAP_SYS_MODULE))
93653+ return -EPERM;
93654+
93655+ return _proc_do_string(table->data, table->maxlen, write,
93656+ buffer, lenp, ppos);
93657+}
93658+
93659 static size_t proc_skip_spaces(char **buf)
93660 {
93661 size_t ret;
93662@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93663 len = strlen(tmp);
93664 if (len > *size)
93665 len = *size;
93666+ if (len > sizeof(tmp))
93667+ len = sizeof(tmp);
93668 if (copy_to_user(*buf, tmp, len))
93669 return -EFAULT;
93670 *size -= len;
93671@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93672 static int proc_taint(struct ctl_table *table, int write,
93673 void __user *buffer, size_t *lenp, loff_t *ppos)
93674 {
93675- struct ctl_table t;
93676+ ctl_table_no_const t;
93677 unsigned long tmptaint = get_taint();
93678 int err;
93679
93680@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93681 return err;
93682 }
93683
93684-#ifdef CONFIG_PRINTK
93685 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93686 void __user *buffer, size_t *lenp, loff_t *ppos)
93687 {
93688@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93689
93690 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93691 }
93692-#endif
93693
93694 struct do_proc_dointvec_minmax_conv_param {
93695 int *min;
93696@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93697 return -ENOSYS;
93698 }
93699
93700+int proc_dostring_modpriv(struct ctl_table *table, int write,
93701+ void __user *buffer, size_t *lenp, loff_t *ppos)
93702+{
93703+ return -ENOSYS;
93704+}
93705+
93706 int proc_dointvec(struct ctl_table *table, int write,
93707 void __user *buffer, size_t *lenp, loff_t *ppos)
93708 {
93709@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93710 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93711 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93712 EXPORT_SYMBOL(proc_dostring);
93713+EXPORT_SYMBOL(proc_dostring_modpriv);
93714 EXPORT_SYMBOL(proc_doulongvec_minmax);
93715 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93716diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93717index 670fff8..a247812 100644
93718--- a/kernel/taskstats.c
93719+++ b/kernel/taskstats.c
93720@@ -28,9 +28,12 @@
93721 #include <linux/fs.h>
93722 #include <linux/file.h>
93723 #include <linux/pid_namespace.h>
93724+#include <linux/grsecurity.h>
93725 #include <net/genetlink.h>
93726 #include <linux/atomic.h>
93727
93728+extern int gr_is_taskstats_denied(int pid);
93729+
93730 /*
93731 * Maximum length of a cpumask that can be specified in
93732 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93733@@ -576,6 +579,9 @@ err:
93734
93735 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93736 {
93737+ if (gr_is_taskstats_denied(current->pid))
93738+ return -EACCES;
93739+
93740 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93741 return cmd_attr_register_cpumask(info);
93742 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93743diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93744index a7077d3..dd48a49 100644
93745--- a/kernel/time/alarmtimer.c
93746+++ b/kernel/time/alarmtimer.c
93747@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93748 struct platform_device *pdev;
93749 int error = 0;
93750 int i;
93751- struct k_clock alarm_clock = {
93752+ static struct k_clock alarm_clock = {
93753 .clock_getres = alarm_clock_getres,
93754 .clock_get = alarm_clock_get,
93755 .timer_create = alarm_timer_create,
93756diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93757index d8c724c..6b331a4 100644
93758--- a/kernel/time/hrtimer.c
93759+++ b/kernel/time/hrtimer.c
93760@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93761 local_irq_restore(flags);
93762 }
93763
93764-static void run_hrtimer_softirq(struct softirq_action *h)
93765+static __latent_entropy void run_hrtimer_softirq(void)
93766 {
93767 hrtimer_peek_ahead_timers();
93768 }
93769diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93770index a16b678..8c5bd9d 100644
93771--- a/kernel/time/posix-cpu-timers.c
93772+++ b/kernel/time/posix-cpu-timers.c
93773@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93774
93775 static __init int init_posix_cpu_timers(void)
93776 {
93777- struct k_clock process = {
93778+ static struct k_clock process = {
93779 .clock_getres = process_cpu_clock_getres,
93780 .clock_get = process_cpu_clock_get,
93781 .timer_create = process_cpu_timer_create,
93782 .nsleep = process_cpu_nsleep,
93783 .nsleep_restart = process_cpu_nsleep_restart,
93784 };
93785- struct k_clock thread = {
93786+ static struct k_clock thread = {
93787 .clock_getres = thread_cpu_clock_getres,
93788 .clock_get = thread_cpu_clock_get,
93789 .timer_create = thread_cpu_timer_create,
93790diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93791index 31ea01f..7fc61ef 100644
93792--- a/kernel/time/posix-timers.c
93793+++ b/kernel/time/posix-timers.c
93794@@ -43,6 +43,7 @@
93795 #include <linux/hash.h>
93796 #include <linux/posix-clock.h>
93797 #include <linux/posix-timers.h>
93798+#include <linux/grsecurity.h>
93799 #include <linux/syscalls.h>
93800 #include <linux/wait.h>
93801 #include <linux/workqueue.h>
93802@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93803 * which we beg off on and pass to do_sys_settimeofday().
93804 */
93805
93806-static struct k_clock posix_clocks[MAX_CLOCKS];
93807+static struct k_clock *posix_clocks[MAX_CLOCKS];
93808
93809 /*
93810 * These ones are defined below.
93811@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93812 */
93813 static __init int init_posix_timers(void)
93814 {
93815- struct k_clock clock_realtime = {
93816+ static struct k_clock clock_realtime = {
93817 .clock_getres = hrtimer_get_res,
93818 .clock_get = posix_clock_realtime_get,
93819 .clock_set = posix_clock_realtime_set,
93820@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93821 .timer_get = common_timer_get,
93822 .timer_del = common_timer_del,
93823 };
93824- struct k_clock clock_monotonic = {
93825+ static struct k_clock clock_monotonic = {
93826 .clock_getres = hrtimer_get_res,
93827 .clock_get = posix_ktime_get_ts,
93828 .nsleep = common_nsleep,
93829@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93830 .timer_get = common_timer_get,
93831 .timer_del = common_timer_del,
93832 };
93833- struct k_clock clock_monotonic_raw = {
93834+ static struct k_clock clock_monotonic_raw = {
93835 .clock_getres = hrtimer_get_res,
93836 .clock_get = posix_get_monotonic_raw,
93837 };
93838- struct k_clock clock_realtime_coarse = {
93839+ static struct k_clock clock_realtime_coarse = {
93840 .clock_getres = posix_get_coarse_res,
93841 .clock_get = posix_get_realtime_coarse,
93842 };
93843- struct k_clock clock_monotonic_coarse = {
93844+ static struct k_clock clock_monotonic_coarse = {
93845 .clock_getres = posix_get_coarse_res,
93846 .clock_get = posix_get_monotonic_coarse,
93847 };
93848- struct k_clock clock_tai = {
93849+ static struct k_clock clock_tai = {
93850 .clock_getres = hrtimer_get_res,
93851 .clock_get = posix_get_tai,
93852 .nsleep = common_nsleep,
93853@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93854 .timer_get = common_timer_get,
93855 .timer_del = common_timer_del,
93856 };
93857- struct k_clock clock_boottime = {
93858+ static struct k_clock clock_boottime = {
93859 .clock_getres = hrtimer_get_res,
93860 .clock_get = posix_get_boottime,
93861 .nsleep = common_nsleep,
93862@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93863 return;
93864 }
93865
93866- posix_clocks[clock_id] = *new_clock;
93867+ posix_clocks[clock_id] = new_clock;
93868 }
93869 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93870
93871@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93872 return (id & CLOCKFD_MASK) == CLOCKFD ?
93873 &clock_posix_dynamic : &clock_posix_cpu;
93874
93875- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93876+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93877 return NULL;
93878- return &posix_clocks[id];
93879+ return posix_clocks[id];
93880 }
93881
93882 static int common_timer_create(struct k_itimer *new_timer)
93883@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93884 struct k_clock *kc = clockid_to_kclock(which_clock);
93885 struct k_itimer *new_timer;
93886 int error, new_timer_id;
93887- sigevent_t event;
93888+ sigevent_t event = { };
93889 int it_id_set = IT_ID_NOT_SET;
93890
93891 if (!kc)
93892@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93893 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93894 return -EFAULT;
93895
93896+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93897+ have their clock_set fptr set to a nosettime dummy function
93898+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93899+ call common_clock_set, which calls do_sys_settimeofday, which
93900+ we hook
93901+ */
93902+
93903 return kc->clock_set(which_clock, &new_tp);
93904 }
93905
93906diff --git a/kernel/time/time.c b/kernel/time/time.c
93907index 2c85b77..6530536 100644
93908--- a/kernel/time/time.c
93909+++ b/kernel/time/time.c
93910@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93911 return error;
93912
93913 if (tz) {
93914+ /* we log in do_settimeofday called below, so don't log twice
93915+ */
93916+ if (!tv)
93917+ gr_log_timechange();
93918+
93919 sys_tz = *tz;
93920 update_vsyscall_tz();
93921 if (firsttime) {
93922diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93923index 6a93185..288c331 100644
93924--- a/kernel/time/timekeeping.c
93925+++ b/kernel/time/timekeeping.c
93926@@ -15,6 +15,7 @@
93927 #include <linux/init.h>
93928 #include <linux/mm.h>
93929 #include <linux/sched.h>
93930+#include <linux/grsecurity.h>
93931 #include <linux/syscore_ops.h>
93932 #include <linux/clocksource.h>
93933 #include <linux/jiffies.h>
93934@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93935 if (!timespec64_valid_strict(ts))
93936 return -EINVAL;
93937
93938+ gr_log_timechange();
93939+
93940 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93941 write_seqcount_begin(&tk_core.seq);
93942
93943diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93944index 2d3f5c5..7ed7dc5 100644
93945--- a/kernel/time/timer.c
93946+++ b/kernel/time/timer.c
93947@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93948 /*
93949 * This function runs timers and the timer-tq in bottom half context.
93950 */
93951-static void run_timer_softirq(struct softirq_action *h)
93952+static __latent_entropy void run_timer_softirq(void)
93953 {
93954 struct tvec_base *base = __this_cpu_read(tvec_bases);
93955
93956@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93957 *
93958 * In all cases the return value is guaranteed to be non-negative.
93959 */
93960-signed long __sched schedule_timeout(signed long timeout)
93961+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93962 {
93963 struct timer_list timer;
93964 unsigned long expire;
93965diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93966index 61ed862..3b52c65 100644
93967--- a/kernel/time/timer_list.c
93968+++ b/kernel/time/timer_list.c
93969@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93970
93971 static void print_name_offset(struct seq_file *m, void *sym)
93972 {
93973+#ifdef CONFIG_GRKERNSEC_HIDESYM
93974+ SEQ_printf(m, "<%p>", NULL);
93975+#else
93976 char symname[KSYM_NAME_LEN];
93977
93978 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93979 SEQ_printf(m, "<%pK>", sym);
93980 else
93981 SEQ_printf(m, "%s", symname);
93982+#endif
93983 }
93984
93985 static void
93986@@ -119,7 +123,11 @@ next_one:
93987 static void
93988 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93989 {
93990+#ifdef CONFIG_GRKERNSEC_HIDESYM
93991+ SEQ_printf(m, " .base: %p\n", NULL);
93992+#else
93993 SEQ_printf(m, " .base: %pK\n", base);
93994+#endif
93995 SEQ_printf(m, " .index: %d\n",
93996 base->index);
93997 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93998@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93999 {
94000 struct proc_dir_entry *pe;
94001
94002+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94003+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94004+#else
94005 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94006+#endif
94007 if (!pe)
94008 return -ENOMEM;
94009 return 0;
94010diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94011index 1fb08f2..ca4bb1e 100644
94012--- a/kernel/time/timer_stats.c
94013+++ b/kernel/time/timer_stats.c
94014@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94015 static unsigned long nr_entries;
94016 static struct entry entries[MAX_ENTRIES];
94017
94018-static atomic_t overflow_count;
94019+static atomic_unchecked_t overflow_count;
94020
94021 /*
94022 * The entries are in a hash-table, for fast lookup:
94023@@ -140,7 +140,7 @@ static void reset_entries(void)
94024 nr_entries = 0;
94025 memset(entries, 0, sizeof(entries));
94026 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94027- atomic_set(&overflow_count, 0);
94028+ atomic_set_unchecked(&overflow_count, 0);
94029 }
94030
94031 static struct entry *alloc_entry(void)
94032@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94033 if (likely(entry))
94034 entry->count++;
94035 else
94036- atomic_inc(&overflow_count);
94037+ atomic_inc_unchecked(&overflow_count);
94038
94039 out_unlock:
94040 raw_spin_unlock_irqrestore(lock, flags);
94041@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94042
94043 static void print_name_offset(struct seq_file *m, unsigned long addr)
94044 {
94045+#ifdef CONFIG_GRKERNSEC_HIDESYM
94046+ seq_printf(m, "<%p>", NULL);
94047+#else
94048 char symname[KSYM_NAME_LEN];
94049
94050 if (lookup_symbol_name(addr, symname) < 0)
94051- seq_printf(m, "<%p>", (void *)addr);
94052+ seq_printf(m, "<%pK>", (void *)addr);
94053 else
94054 seq_printf(m, "%s", symname);
94055+#endif
94056 }
94057
94058 static int tstats_show(struct seq_file *m, void *v)
94059@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
94060
94061 seq_puts(m, "Timer Stats Version: v0.3\n");
94062 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94063- if (atomic_read(&overflow_count))
94064- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
94065+ if (atomic_read_unchecked(&overflow_count))
94066+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
94067 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
94068
94069 for (i = 0; i < nr_entries; i++) {
94070@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
94071 {
94072 struct proc_dir_entry *pe;
94073
94074+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94075+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94076+#else
94077 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94078+#endif
94079 if (!pe)
94080 return -ENOMEM;
94081 return 0;
94082diff --git a/kernel/torture.c b/kernel/torture.c
94083index dd70993..0bf694b 100644
94084--- a/kernel/torture.c
94085+++ b/kernel/torture.c
94086@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
94087 mutex_lock(&fullstop_mutex);
94088 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
94089 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
94090- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
94091+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
94092 } else {
94093 pr_warn("Concurrent rmmod and shutdown illegal!\n");
94094 }
94095@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
94096 if (!torture_must_stop()) {
94097 if (stutter > 1) {
94098 schedule_timeout_interruptible(stutter - 1);
94099- ACCESS_ONCE(stutter_pause_test) = 2;
94100+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
94101 }
94102 schedule_timeout_interruptible(1);
94103- ACCESS_ONCE(stutter_pause_test) = 1;
94104+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
94105 }
94106 if (!torture_must_stop())
94107 schedule_timeout_interruptible(stutter);
94108- ACCESS_ONCE(stutter_pause_test) = 0;
94109+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
94110 torture_shutdown_absorb("torture_stutter");
94111 } while (!torture_must_stop());
94112 torture_kthread_stopping("torture_stutter");
94113@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
94114 schedule_timeout_uninterruptible(10);
94115 return true;
94116 }
94117- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
94118+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
94119 mutex_unlock(&fullstop_mutex);
94120 torture_shutdown_cleanup();
94121 torture_shuffle_cleanup();
94122diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94123index 483cecf..ac46091 100644
94124--- a/kernel/trace/blktrace.c
94125+++ b/kernel/trace/blktrace.c
94126@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94127 struct blk_trace *bt = filp->private_data;
94128 char buf[16];
94129
94130- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94131+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94132
94133 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94134 }
94135@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94136 return 1;
94137
94138 bt = buf->chan->private_data;
94139- atomic_inc(&bt->dropped);
94140+ atomic_inc_unchecked(&bt->dropped);
94141 return 0;
94142 }
94143
94144@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94145
94146 bt->dir = dir;
94147 bt->dev = dev;
94148- atomic_set(&bt->dropped, 0);
94149+ atomic_set_unchecked(&bt->dropped, 0);
94150 INIT_LIST_HEAD(&bt->running_list);
94151
94152 ret = -EIO;
94153diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94154index af5bffd..57664b8 100644
94155--- a/kernel/trace/ftrace.c
94156+++ b/kernel/trace/ftrace.c
94157@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94158 if (unlikely(ftrace_disabled))
94159 return 0;
94160
94161+ ret = ftrace_arch_code_modify_prepare();
94162+ FTRACE_WARN_ON(ret);
94163+ if (ret)
94164+ return 0;
94165+
94166 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94167+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94168 if (ret) {
94169 ftrace_bug(ret, rec);
94170- return 0;
94171 }
94172- return 1;
94173+ return ret ? 0 : 1;
94174 }
94175
94176 /*
94177@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
94178 if (!count)
94179 return 0;
94180
94181+ pax_open_kernel();
94182 sort(start, count, sizeof(*start),
94183 ftrace_cmp_ips, ftrace_swap_ips);
94184+ pax_close_kernel();
94185
94186 start_pg = ftrace_allocate_pages(count);
94187 if (!start_pg)
94188@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
94189
94190 if (t->ret_stack == NULL) {
94191 atomic_set(&t->tracing_graph_pause, 0);
94192- atomic_set(&t->trace_overrun, 0);
94193+ atomic_set_unchecked(&t->trace_overrun, 0);
94194 t->curr_ret_stack = -1;
94195 /* Make sure the tasks see the -1 first: */
94196 smp_wmb();
94197@@ -5876,7 +5883,7 @@ static void
94198 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
94199 {
94200 atomic_set(&t->tracing_graph_pause, 0);
94201- atomic_set(&t->trace_overrun, 0);
94202+ atomic_set_unchecked(&t->trace_overrun, 0);
94203 t->ftrace_timestamp = 0;
94204 /* make curr_ret_stack visible before we add the ret_stack */
94205 smp_wmb();
94206diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94207index d2e151c..1498973 100644
94208--- a/kernel/trace/ring_buffer.c
94209+++ b/kernel/trace/ring_buffer.c
94210@@ -350,9 +350,9 @@ struct buffer_data_page {
94211 */
94212 struct buffer_page {
94213 struct list_head list; /* list of buffer pages */
94214- local_t write; /* index for next write */
94215+ local_unchecked_t write; /* index for next write */
94216 unsigned read; /* index for next read */
94217- local_t entries; /* entries on this page */
94218+ local_unchecked_t entries; /* entries on this page */
94219 unsigned long real_end; /* real end of data */
94220 struct buffer_data_page *page; /* Actual data page */
94221 };
94222@@ -473,11 +473,11 @@ struct ring_buffer_per_cpu {
94223 unsigned long last_overrun;
94224 local_t entries_bytes;
94225 local_t entries;
94226- local_t overrun;
94227- local_t commit_overrun;
94228- local_t dropped_events;
94229+ local_unchecked_t overrun;
94230+ local_unchecked_t commit_overrun;
94231+ local_unchecked_t dropped_events;
94232 local_t committing;
94233- local_t commits;
94234+ local_unchecked_t commits;
94235 unsigned long read;
94236 unsigned long read_bytes;
94237 u64 write_stamp;
94238@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94239 *
94240 * We add a counter to the write field to denote this.
94241 */
94242- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94243- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94244+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94245+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94246
94247 /*
94248 * Just make sure we have seen our old_write and synchronize
94249@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94250 * cmpxchg to only update if an interrupt did not already
94251 * do it for us. If the cmpxchg fails, we don't care.
94252 */
94253- (void)local_cmpxchg(&next_page->write, old_write, val);
94254- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94255+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94256+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94257
94258 /*
94259 * No need to worry about races with clearing out the commit.
94260@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94261
94262 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94263 {
94264- return local_read(&bpage->entries) & RB_WRITE_MASK;
94265+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94266 }
94267
94268 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94269 {
94270- return local_read(&bpage->write) & RB_WRITE_MASK;
94271+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94272 }
94273
94274 static int
94275@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94276 * bytes consumed in ring buffer from here.
94277 * Increment overrun to account for the lost events.
94278 */
94279- local_add(page_entries, &cpu_buffer->overrun);
94280+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94281 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94282 }
94283
94284@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94285 * it is our responsibility to update
94286 * the counters.
94287 */
94288- local_add(entries, &cpu_buffer->overrun);
94289+ local_add_unchecked(entries, &cpu_buffer->overrun);
94290 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94291
94292 /*
94293@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94294 if (tail == BUF_PAGE_SIZE)
94295 tail_page->real_end = 0;
94296
94297- local_sub(length, &tail_page->write);
94298+ local_sub_unchecked(length, &tail_page->write);
94299 return;
94300 }
94301
94302@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94303 rb_event_set_padding(event);
94304
94305 /* Set the write back to the previous setting */
94306- local_sub(length, &tail_page->write);
94307+ local_sub_unchecked(length, &tail_page->write);
94308 return;
94309 }
94310
94311@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94312
94313 /* Set write to end of buffer */
94314 length = (tail + length) - BUF_PAGE_SIZE;
94315- local_sub(length, &tail_page->write);
94316+ local_sub_unchecked(length, &tail_page->write);
94317 }
94318
94319 /*
94320@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94321 * about it.
94322 */
94323 if (unlikely(next_page == commit_page)) {
94324- local_inc(&cpu_buffer->commit_overrun);
94325+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94326 goto out_reset;
94327 }
94328
94329@@ -2360,7 +2360,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94330 * this is easy, just stop here.
94331 */
94332 if (!(buffer->flags & RB_FL_OVERWRITE)) {
94333- local_inc(&cpu_buffer->dropped_events);
94334+ local_inc_unchecked(&cpu_buffer->dropped_events);
94335 goto out_reset;
94336 }
94337
94338@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94339 cpu_buffer->tail_page) &&
94340 (cpu_buffer->commit_page ==
94341 cpu_buffer->reader_page))) {
94342- local_inc(&cpu_buffer->commit_overrun);
94343+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94344 goto out_reset;
94345 }
94346 }
94347@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94348 length += RB_LEN_TIME_EXTEND;
94349
94350 tail_page = cpu_buffer->tail_page;
94351- write = local_add_return(length, &tail_page->write);
94352+ write = local_add_return_unchecked(length, &tail_page->write);
94353
94354 /* set write to only the index of the write */
94355 write &= RB_WRITE_MASK;
94356@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94357 kmemcheck_annotate_bitfield(event, bitfield);
94358 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94359
94360- local_inc(&tail_page->entries);
94361+ local_inc_unchecked(&tail_page->entries);
94362
94363 /*
94364 * If this is the first commit on the page, then update
94365@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94366
94367 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94368 unsigned long write_mask =
94369- local_read(&bpage->write) & ~RB_WRITE_MASK;
94370+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94371 unsigned long event_length = rb_event_length(event);
94372 /*
94373 * This is on the tail page. It is possible that
94374@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94375 */
94376 old_index += write_mask;
94377 new_index += write_mask;
94378- index = local_cmpxchg(&bpage->write, old_index, new_index);
94379+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94380 if (index == old_index) {
94381 /* update counters */
94382 local_sub(event_length, &cpu_buffer->entries_bytes);
94383@@ -2516,7 +2516,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94384 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
94385 {
94386 local_inc(&cpu_buffer->committing);
94387- local_inc(&cpu_buffer->commits);
94388+ local_inc_unchecked(&cpu_buffer->commits);
94389 }
94390
94391 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94392@@ -2528,7 +2528,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94393 return;
94394
94395 again:
94396- commits = local_read(&cpu_buffer->commits);
94397+ commits = local_read_unchecked(&cpu_buffer->commits);
94398 /* synchronize with interrupts */
94399 barrier();
94400 if (local_read(&cpu_buffer->committing) == 1)
94401@@ -2544,7 +2544,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94402 * updating of the commit page and the clearing of the
94403 * committing counter.
94404 */
94405- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
94406+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
94407 !local_read(&cpu_buffer->committing)) {
94408 local_inc(&cpu_buffer->committing);
94409 goto again;
94410@@ -2574,7 +2574,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
94411 barrier();
94412 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
94413 local_dec(&cpu_buffer->committing);
94414- local_dec(&cpu_buffer->commits);
94415+ local_dec_unchecked(&cpu_buffer->commits);
94416 return NULL;
94417 }
94418 #endif
94419@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94420
94421 /* Do the likely case first */
94422 if (likely(bpage->page == (void *)addr)) {
94423- local_dec(&bpage->entries);
94424+ local_dec_unchecked(&bpage->entries);
94425 return;
94426 }
94427
94428@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94429 start = bpage;
94430 do {
94431 if (bpage->page == (void *)addr) {
94432- local_dec(&bpage->entries);
94433+ local_dec_unchecked(&bpage->entries);
94434 return;
94435 }
94436 rb_inc_page(cpu_buffer, &bpage);
94437@@ -3200,7 +3200,7 @@ static inline unsigned long
94438 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94439 {
94440 return local_read(&cpu_buffer->entries) -
94441- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94442+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94443 }
94444
94445 /**
94446@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94447 return 0;
94448
94449 cpu_buffer = buffer->buffers[cpu];
94450- ret = local_read(&cpu_buffer->overrun);
94451+ ret = local_read_unchecked(&cpu_buffer->overrun);
94452
94453 return ret;
94454 }
94455@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94456 return 0;
94457
94458 cpu_buffer = buffer->buffers[cpu];
94459- ret = local_read(&cpu_buffer->commit_overrun);
94460+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94461
94462 return ret;
94463 }
94464@@ -3334,7 +3334,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
94465 return 0;
94466
94467 cpu_buffer = buffer->buffers[cpu];
94468- ret = local_read(&cpu_buffer->dropped_events);
94469+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
94470
94471 return ret;
94472 }
94473@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94474 /* if you care about this being correct, lock the buffer */
94475 for_each_buffer_cpu(buffer, cpu) {
94476 cpu_buffer = buffer->buffers[cpu];
94477- overruns += local_read(&cpu_buffer->overrun);
94478+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94479 }
94480
94481 return overruns;
94482@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94483 /*
94484 * Reset the reader page to size zero.
94485 */
94486- local_set(&cpu_buffer->reader_page->write, 0);
94487- local_set(&cpu_buffer->reader_page->entries, 0);
94488+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94489+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94490 local_set(&cpu_buffer->reader_page->page->commit, 0);
94491 cpu_buffer->reader_page->real_end = 0;
94492
94493@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94494 * want to compare with the last_overrun.
94495 */
94496 smp_mb();
94497- overwrite = local_read(&(cpu_buffer->overrun));
94498+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94499
94500 /*
94501 * Here's the tricky part.
94502@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94503
94504 cpu_buffer->head_page
94505 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94506- local_set(&cpu_buffer->head_page->write, 0);
94507- local_set(&cpu_buffer->head_page->entries, 0);
94508+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94509+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94510 local_set(&cpu_buffer->head_page->page->commit, 0);
94511
94512 cpu_buffer->head_page->read = 0;
94513@@ -4186,18 +4186,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94514
94515 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94516 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94517- local_set(&cpu_buffer->reader_page->write, 0);
94518- local_set(&cpu_buffer->reader_page->entries, 0);
94519+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94520+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94521 local_set(&cpu_buffer->reader_page->page->commit, 0);
94522 cpu_buffer->reader_page->read = 0;
94523
94524 local_set(&cpu_buffer->entries_bytes, 0);
94525- local_set(&cpu_buffer->overrun, 0);
94526- local_set(&cpu_buffer->commit_overrun, 0);
94527- local_set(&cpu_buffer->dropped_events, 0);
94528+ local_set_unchecked(&cpu_buffer->overrun, 0);
94529+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94530+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
94531 local_set(&cpu_buffer->entries, 0);
94532 local_set(&cpu_buffer->committing, 0);
94533- local_set(&cpu_buffer->commits, 0);
94534+ local_set_unchecked(&cpu_buffer->commits, 0);
94535 cpu_buffer->read = 0;
94536 cpu_buffer->read_bytes = 0;
94537
94538@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94539 rb_init_page(bpage);
94540 bpage = reader->page;
94541 reader->page = *data_page;
94542- local_set(&reader->write, 0);
94543- local_set(&reader->entries, 0);
94544+ local_set_unchecked(&reader->write, 0);
94545+ local_set_unchecked(&reader->entries, 0);
94546 reader->read = 0;
94547 *data_page = bpage;
94548
94549diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94550index 361a827..6a319a3 100644
94551--- a/kernel/trace/trace.c
94552+++ b/kernel/trace/trace.c
94553@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94554 return 0;
94555 }
94556
94557-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94558+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94559 {
94560 /* do nothing if flag is already set */
94561 if (!!(trace_flags & mask) == !!enabled)
94562diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94563index 8de48ba..3e5b4fa 100644
94564--- a/kernel/trace/trace.h
94565+++ b/kernel/trace/trace.h
94566@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94567 void trace_printk_init_buffers(void);
94568 void trace_printk_start_comm(void);
94569 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94570-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94571+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94572
94573 /*
94574 * Normal trace_printk() and friends allocates special buffers
94575diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94576index 57b67b1..66082a9 100644
94577--- a/kernel/trace/trace_clock.c
94578+++ b/kernel/trace/trace_clock.c
94579@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94580 return now;
94581 }
94582
94583-static atomic64_t trace_counter;
94584+static atomic64_unchecked_t trace_counter;
94585
94586 /*
94587 * trace_clock_counter(): simply an atomic counter.
94588@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94589 */
94590 u64 notrace trace_clock_counter(void)
94591 {
94592- return atomic64_add_return(1, &trace_counter);
94593+ return atomic64_inc_return_unchecked(&trace_counter);
94594 }
94595diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94596index b03a0ea..2df3168 100644
94597--- a/kernel/trace/trace_events.c
94598+++ b/kernel/trace/trace_events.c
94599@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94600 return 0;
94601 }
94602
94603-struct ftrace_module_file_ops;
94604 static void __add_event_to_tracers(struct ftrace_event_call *call);
94605
94606 /* Add an additional event_call dynamically */
94607diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94608index ba47600..d0e47fa 100644
94609--- a/kernel/trace/trace_functions_graph.c
94610+++ b/kernel/trace/trace_functions_graph.c
94611@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94612
94613 /* The return trace stack is full */
94614 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94615- atomic_inc(&current->trace_overrun);
94616+ atomic_inc_unchecked(&current->trace_overrun);
94617 return -EBUSY;
94618 }
94619
94620@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94621 *ret = current->ret_stack[index].ret;
94622 trace->func = current->ret_stack[index].func;
94623 trace->calltime = current->ret_stack[index].calltime;
94624- trace->overrun = atomic_read(&current->trace_overrun);
94625+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94626 trace->depth = index;
94627 }
94628
94629diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94630index 7a9ba62..2e0e4a1 100644
94631--- a/kernel/trace/trace_mmiotrace.c
94632+++ b/kernel/trace/trace_mmiotrace.c
94633@@ -24,7 +24,7 @@ struct header_iter {
94634 static struct trace_array *mmio_trace_array;
94635 static bool overrun_detected;
94636 static unsigned long prev_overruns;
94637-static atomic_t dropped_count;
94638+static atomic_unchecked_t dropped_count;
94639
94640 static void mmio_reset_data(struct trace_array *tr)
94641 {
94642@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94643
94644 static unsigned long count_overruns(struct trace_iterator *iter)
94645 {
94646- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94647+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94648 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94649
94650 if (over > prev_overruns)
94651@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94652 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94653 sizeof(*entry), 0, pc);
94654 if (!event) {
94655- atomic_inc(&dropped_count);
94656+ atomic_inc_unchecked(&dropped_count);
94657 return;
94658 }
94659 entry = ring_buffer_event_data(event);
94660@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94661 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94662 sizeof(*entry), 0, pc);
94663 if (!event) {
94664- atomic_inc(&dropped_count);
94665+ atomic_inc_unchecked(&dropped_count);
94666 return;
94667 }
94668 entry = ring_buffer_event_data(event);
94669diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94670index b77b9a6..82f19bd 100644
94671--- a/kernel/trace/trace_output.c
94672+++ b/kernel/trace/trace_output.c
94673@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94674 goto out;
94675 }
94676
94677+ pax_open_kernel();
94678 if (event->funcs->trace == NULL)
94679- event->funcs->trace = trace_nop_print;
94680+ *(void **)&event->funcs->trace = trace_nop_print;
94681 if (event->funcs->raw == NULL)
94682- event->funcs->raw = trace_nop_print;
94683+ *(void **)&event->funcs->raw = trace_nop_print;
94684 if (event->funcs->hex == NULL)
94685- event->funcs->hex = trace_nop_print;
94686+ *(void **)&event->funcs->hex = trace_nop_print;
94687 if (event->funcs->binary == NULL)
94688- event->funcs->binary = trace_nop_print;
94689+ *(void **)&event->funcs->binary = trace_nop_print;
94690+ pax_close_kernel();
94691
94692 key = event->type & (EVENT_HASHSIZE - 1);
94693
94694diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94695index f8b45d8..70ff6c8 100644
94696--- a/kernel/trace/trace_seq.c
94697+++ b/kernel/trace/trace_seq.c
94698@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94699 return 0;
94700 }
94701
94702- seq_buf_path(&s->seq, path, "\n");
94703+ seq_buf_path(&s->seq, path, "\n\\");
94704
94705 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94706 s->seq.len = save_len;
94707diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94708index 16eddb3..758b308 100644
94709--- a/kernel/trace/trace_stack.c
94710+++ b/kernel/trace/trace_stack.c
94711@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94712 return;
94713
94714 /* we do not handle interrupt stacks yet */
94715- if (!object_is_on_stack(stack))
94716+ if (!object_starts_on_stack(stack))
94717 return;
94718
94719 local_irq_save(flags);
94720diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94721index c6ee36f..78513f3 100644
94722--- a/kernel/trace/trace_syscalls.c
94723+++ b/kernel/trace/trace_syscalls.c
94724@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94725 int num;
94726
94727 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94728+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94729+ return -EINVAL;
94730
94731 mutex_lock(&syscall_trace_lock);
94732 if (!sys_perf_refcount_enter)
94733@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94734 int num;
94735
94736 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94737+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94738+ return;
94739
94740 mutex_lock(&syscall_trace_lock);
94741 sys_perf_refcount_enter--;
94742@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94743 int num;
94744
94745 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94746+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94747+ return -EINVAL;
94748
94749 mutex_lock(&syscall_trace_lock);
94750 if (!sys_perf_refcount_exit)
94751@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94752 int num;
94753
94754 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94755+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94756+ return;
94757
94758 mutex_lock(&syscall_trace_lock);
94759 sys_perf_refcount_exit--;
94760diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94761index 4109f83..fe1f830 100644
94762--- a/kernel/user_namespace.c
94763+++ b/kernel/user_namespace.c
94764@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94765 !kgid_has_mapping(parent_ns, group))
94766 return -EPERM;
94767
94768+#ifdef CONFIG_GRKERNSEC
94769+ /*
94770+ * This doesn't really inspire confidence:
94771+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94772+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94773+ * Increases kernel attack surface in areas developers
94774+ * previously cared little about ("low importance due
94775+ * to requiring "root" capability")
94776+ * To be removed when this code receives *proper* review
94777+ */
94778+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94779+ !capable(CAP_SETGID))
94780+ return -EPERM;
94781+#endif
94782+
94783 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94784 if (!ns)
94785 return -ENOMEM;
94786@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94787 if (atomic_read(&current->mm->mm_users) > 1)
94788 return -EINVAL;
94789
94790- if (current->fs->users != 1)
94791+ if (atomic_read(&current->fs->users) != 1)
94792 return -EINVAL;
94793
94794 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94795diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94796index c8eac43..4b5f08f 100644
94797--- a/kernel/utsname_sysctl.c
94798+++ b/kernel/utsname_sysctl.c
94799@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94800 static int proc_do_uts_string(struct ctl_table *table, int write,
94801 void __user *buffer, size_t *lenp, loff_t *ppos)
94802 {
94803- struct ctl_table uts_table;
94804+ ctl_table_no_const uts_table;
94805 int r;
94806 memcpy(&uts_table, table, sizeof(uts_table));
94807 uts_table.data = get_uts(table, write);
94808diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94809index 70bf118..4be3c37 100644
94810--- a/kernel/watchdog.c
94811+++ b/kernel/watchdog.c
94812@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94813 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94814 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94815
94816-static struct smp_hotplug_thread watchdog_threads = {
94817+static struct smp_hotplug_thread watchdog_threads __read_only = {
94818 .store = &softlockup_watchdog,
94819 .thread_should_run = watchdog_should_run,
94820 .thread_fn = watchdog,
94821diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94822index 82d0c8d..37f4222 100644
94823--- a/kernel/workqueue.c
94824+++ b/kernel/workqueue.c
94825@@ -4565,7 +4565,7 @@ static void rebind_workers(struct worker_pool *pool)
94826 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94827 worker_flags |= WORKER_REBOUND;
94828 worker_flags &= ~WORKER_UNBOUND;
94829- ACCESS_ONCE(worker->flags) = worker_flags;
94830+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94831 }
94832
94833 spin_unlock_irq(&pool->lock);
94834diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94835index 5f2ce61..85a0b1b 100644
94836--- a/lib/Kconfig.debug
94837+++ b/lib/Kconfig.debug
94838@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94839
94840 config DEBUG_WW_MUTEX_SLOWPATH
94841 bool "Wait/wound mutex debugging: Slowpath testing"
94842- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94843+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94844 select DEBUG_LOCK_ALLOC
94845 select DEBUG_SPINLOCK
94846 select DEBUG_MUTEXES
94847@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94848
94849 config DEBUG_LOCK_ALLOC
94850 bool "Lock debugging: detect incorrect freeing of live locks"
94851- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94852+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94853 select DEBUG_SPINLOCK
94854 select DEBUG_MUTEXES
94855 select LOCKDEP
94856@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94857
94858 config PROVE_LOCKING
94859 bool "Lock debugging: prove locking correctness"
94860- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94861+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94862 select LOCKDEP
94863 select DEBUG_SPINLOCK
94864 select DEBUG_MUTEXES
94865@@ -992,7 +992,7 @@ config LOCKDEP
94866
94867 config LOCK_STAT
94868 bool "Lock usage statistics"
94869- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94870+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94871 select LOCKDEP
94872 select DEBUG_SPINLOCK
94873 select DEBUG_MUTEXES
94874@@ -1453,6 +1453,7 @@ config LATENCYTOP
94875 depends on DEBUG_KERNEL
94876 depends on STACKTRACE_SUPPORT
94877 depends on PROC_FS
94878+ depends on !GRKERNSEC_HIDESYM
94879 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94880 select KALLSYMS
94881 select KALLSYMS_ALL
94882@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94883 config DEBUG_STRICT_USER_COPY_CHECKS
94884 bool "Strict user copy size checks"
94885 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94886- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94887+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94888 help
94889 Enabling this option turns a certain set of sanity checks for user
94890 copy operations into compile time failures.
94891@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94892
94893 config PROVIDE_OHCI1394_DMA_INIT
94894 bool "Remote debugging over FireWire early on boot"
94895- depends on PCI && X86
94896+ depends on PCI && X86 && !GRKERNSEC
94897 help
94898 If you want to debug problems which hang or crash the kernel early
94899 on boot and the crashing machine has a FireWire port, you can use
94900diff --git a/lib/Makefile b/lib/Makefile
94901index 3c3b30b..ca29102 100644
94902--- a/lib/Makefile
94903+++ b/lib/Makefile
94904@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94905 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94906 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94907 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94908-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94909+obj-y += list_debug.o
94910 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94911
94912 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94913diff --git a/lib/average.c b/lib/average.c
94914index 114d1be..ab0350c 100644
94915--- a/lib/average.c
94916+++ b/lib/average.c
94917@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94918 {
94919 unsigned long internal = ACCESS_ONCE(avg->internal);
94920
94921- ACCESS_ONCE(avg->internal) = internal ?
94922+ ACCESS_ONCE_RW(avg->internal) = internal ?
94923 (((internal << avg->weight) - internal) +
94924 (val << avg->factor)) >> avg->weight :
94925 (val << avg->factor);
94926diff --git a/lib/bitmap.c b/lib/bitmap.c
94927index 324ea9e..46b1ae2 100644
94928--- a/lib/bitmap.c
94929+++ b/lib/bitmap.c
94930@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94931 }
94932 EXPORT_SYMBOL(__bitmap_subset);
94933
94934-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94935+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94936 {
94937 unsigned int k, lim = bits/BITS_PER_LONG;
94938 int w = 0;
94939@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94940 {
94941 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94942 u32 chunk;
94943- const char __user __force *ubuf = (const char __user __force *)buf;
94944+ const char __user *ubuf = (const char __force_user *)buf;
94945
94946 bitmap_zero(maskp, nmaskbits);
94947
94948@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94949 {
94950 if (!access_ok(VERIFY_READ, ubuf, ulen))
94951 return -EFAULT;
94952- return __bitmap_parse((const char __force *)ubuf,
94953+ return __bitmap_parse((const char __force_kernel *)ubuf,
94954 ulen, 1, maskp, nmaskbits);
94955
94956 }
94957@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94958 {
94959 unsigned a, b;
94960 int c, old_c, totaldigits;
94961- const char __user __force *ubuf = (const char __user __force *)buf;
94962+ const char __user *ubuf = (const char __force_user *)buf;
94963 int exp_digit, in_range;
94964
94965 totaldigits = c = 0;
94966@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94967 {
94968 if (!access_ok(VERIFY_READ, ubuf, ulen))
94969 return -EFAULT;
94970- return __bitmap_parselist((const char __force *)ubuf,
94971+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94972 ulen, 1, maskp, nmaskbits);
94973 }
94974 EXPORT_SYMBOL(bitmap_parselist_user);
94975diff --git a/lib/bug.c b/lib/bug.c
94976index 0c3bd95..5a615a1 100644
94977--- a/lib/bug.c
94978+++ b/lib/bug.c
94979@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94980 return BUG_TRAP_TYPE_NONE;
94981
94982 bug = find_bug(bugaddr);
94983+ if (!bug)
94984+ return BUG_TRAP_TYPE_NONE;
94985
94986 file = NULL;
94987 line = 0;
94988diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94989index 547f7f9..a6d4ba0 100644
94990--- a/lib/debugobjects.c
94991+++ b/lib/debugobjects.c
94992@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94993 if (limit > 4)
94994 return;
94995
94996- is_on_stack = object_is_on_stack(addr);
94997+ is_on_stack = object_starts_on_stack(addr);
94998 if (is_on_stack == onstack)
94999 return;
95000
95001diff --git a/lib/div64.c b/lib/div64.c
95002index 4382ad7..08aa558 100644
95003--- a/lib/div64.c
95004+++ b/lib/div64.c
95005@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
95006 EXPORT_SYMBOL(__div64_32);
95007
95008 #ifndef div_s64_rem
95009-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95010+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
95011 {
95012 u64 quotient;
95013
95014@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
95015 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
95016 */
95017 #ifndef div64_u64
95018-u64 div64_u64(u64 dividend, u64 divisor)
95019+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
95020 {
95021 u32 high = divisor >> 32;
95022 u64 quot;
95023diff --git a/lib/dma-debug.c b/lib/dma-debug.c
95024index 9722bd2..0d826f4 100644
95025--- a/lib/dma-debug.c
95026+++ b/lib/dma-debug.c
95027@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
95028
95029 void dma_debug_add_bus(struct bus_type *bus)
95030 {
95031- struct notifier_block *nb;
95032+ notifier_block_no_const *nb;
95033
95034 if (dma_debug_disabled())
95035 return;
95036@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
95037
95038 static void check_for_stack(struct device *dev, void *addr)
95039 {
95040- if (object_is_on_stack(addr))
95041+ if (object_starts_on_stack(addr))
95042 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
95043 "stack [addr=%p]\n", addr);
95044 }
95045diff --git a/lib/inflate.c b/lib/inflate.c
95046index 013a761..c28f3fc 100644
95047--- a/lib/inflate.c
95048+++ b/lib/inflate.c
95049@@ -269,7 +269,7 @@ static void free(void *where)
95050 malloc_ptr = free_mem_ptr;
95051 }
95052 #else
95053-#define malloc(a) kmalloc(a, GFP_KERNEL)
95054+#define malloc(a) kmalloc((a), GFP_KERNEL)
95055 #define free(a) kfree(a)
95056 #endif
95057
95058diff --git a/lib/ioremap.c b/lib/ioremap.c
95059index 0c9216c..863bd89 100644
95060--- a/lib/ioremap.c
95061+++ b/lib/ioremap.c
95062@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
95063 unsigned long next;
95064
95065 phys_addr -= addr;
95066- pmd = pmd_alloc(&init_mm, pud, addr);
95067+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
95068 if (!pmd)
95069 return -ENOMEM;
95070 do {
95071@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
95072 unsigned long next;
95073
95074 phys_addr -= addr;
95075- pud = pud_alloc(&init_mm, pgd, addr);
95076+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
95077 if (!pud)
95078 return -ENOMEM;
95079 do {
95080diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
95081index bd2bea9..6b3c95e 100644
95082--- a/lib/is_single_threaded.c
95083+++ b/lib/is_single_threaded.c
95084@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
95085 struct task_struct *p, *t;
95086 bool ret;
95087
95088+ if (!mm)
95089+ return true;
95090+
95091 if (atomic_read(&task->signal->live) != 1)
95092 return false;
95093
95094diff --git a/lib/kobject.c b/lib/kobject.c
95095index 03d4ab3..46f6374 100644
95096--- a/lib/kobject.c
95097+++ b/lib/kobject.c
95098@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
95099
95100
95101 static DEFINE_SPINLOCK(kobj_ns_type_lock);
95102-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
95103+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
95104
95105-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95106+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
95107 {
95108 enum kobj_ns_type type = ops->type;
95109 int error;
95110diff --git a/lib/list_debug.c b/lib/list_debug.c
95111index c24c2f7..f0296f4 100644
95112--- a/lib/list_debug.c
95113+++ b/lib/list_debug.c
95114@@ -11,7 +11,9 @@
95115 #include <linux/bug.h>
95116 #include <linux/kernel.h>
95117 #include <linux/rculist.h>
95118+#include <linux/mm.h>
95119
95120+#ifdef CONFIG_DEBUG_LIST
95121 /*
95122 * Insert a new entry between two known consecutive entries.
95123 *
95124@@ -19,21 +21,40 @@
95125 * the prev/next entries already!
95126 */
95127
95128+static bool __list_add_debug(struct list_head *new,
95129+ struct list_head *prev,
95130+ struct list_head *next)
95131+{
95132+ if (unlikely(next->prev != prev)) {
95133+ printk(KERN_ERR "list_add corruption. next->prev should be "
95134+ "prev (%p), but was %p. (next=%p).\n",
95135+ prev, next->prev, next);
95136+ BUG();
95137+ return false;
95138+ }
95139+ if (unlikely(prev->next != next)) {
95140+ printk(KERN_ERR "list_add corruption. prev->next should be "
95141+ "next (%p), but was %p. (prev=%p).\n",
95142+ next, prev->next, prev);
95143+ BUG();
95144+ return false;
95145+ }
95146+ if (unlikely(new == prev || new == next)) {
95147+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
95148+ new, prev, next);
95149+ BUG();
95150+ return false;
95151+ }
95152+ return true;
95153+}
95154+
95155 void __list_add(struct list_head *new,
95156- struct list_head *prev,
95157- struct list_head *next)
95158+ struct list_head *prev,
95159+ struct list_head *next)
95160 {
95161- WARN(next->prev != prev,
95162- "list_add corruption. next->prev should be "
95163- "prev (%p), but was %p. (next=%p).\n",
95164- prev, next->prev, next);
95165- WARN(prev->next != next,
95166- "list_add corruption. prev->next should be "
95167- "next (%p), but was %p. (prev=%p).\n",
95168- next, prev->next, prev);
95169- WARN(new == prev || new == next,
95170- "list_add double add: new=%p, prev=%p, next=%p.\n",
95171- new, prev, next);
95172+ if (!__list_add_debug(new, prev, next))
95173+ return;
95174+
95175 next->prev = new;
95176 new->next = next;
95177 new->prev = prev;
95178@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95179 }
95180 EXPORT_SYMBOL(__list_add);
95181
95182-void __list_del_entry(struct list_head *entry)
95183+static bool __list_del_entry_debug(struct list_head *entry)
95184 {
95185 struct list_head *prev, *next;
95186
95187 prev = entry->prev;
95188 next = entry->next;
95189
95190- if (WARN(next == LIST_POISON1,
95191- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95192- entry, LIST_POISON1) ||
95193- WARN(prev == LIST_POISON2,
95194- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95195- entry, LIST_POISON2) ||
95196- WARN(prev->next != entry,
95197- "list_del corruption. prev->next should be %p, "
95198- "but was %p\n", entry, prev->next) ||
95199- WARN(next->prev != entry,
95200- "list_del corruption. next->prev should be %p, "
95201- "but was %p\n", entry, next->prev))
95202+ if (unlikely(next == LIST_POISON1)) {
95203+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95204+ entry, LIST_POISON1);
95205+ BUG();
95206+ return false;
95207+ }
95208+ if (unlikely(prev == LIST_POISON2)) {
95209+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95210+ entry, LIST_POISON2);
95211+ BUG();
95212+ return false;
95213+ }
95214+ if (unlikely(entry->prev->next != entry)) {
95215+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95216+ "but was %p\n", entry, prev->next);
95217+ BUG();
95218+ return false;
95219+ }
95220+ if (unlikely(entry->next->prev != entry)) {
95221+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95222+ "but was %p\n", entry, next->prev);
95223+ BUG();
95224+ return false;
95225+ }
95226+ return true;
95227+}
95228+
95229+void __list_del_entry(struct list_head *entry)
95230+{
95231+ if (!__list_del_entry_debug(entry))
95232 return;
95233
95234- __list_del(prev, next);
95235+ __list_del(entry->prev, entry->next);
95236 }
95237 EXPORT_SYMBOL(__list_del_entry);
95238
95239@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95240 void __list_add_rcu(struct list_head *new,
95241 struct list_head *prev, struct list_head *next)
95242 {
95243- WARN(next->prev != prev,
95244- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95245- prev, next->prev, next);
95246- WARN(prev->next != next,
95247- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95248- next, prev->next, prev);
95249+ if (!__list_add_debug(new, prev, next))
95250+ return;
95251+
95252 new->next = next;
95253 new->prev = prev;
95254 rcu_assign_pointer(list_next_rcu(prev), new);
95255 next->prev = new;
95256 }
95257 EXPORT_SYMBOL(__list_add_rcu);
95258+#endif
95259+
95260+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95261+{
95262+#ifdef CONFIG_DEBUG_LIST
95263+ if (!__list_add_debug(new, prev, next))
95264+ return;
95265+#endif
95266+
95267+ pax_open_kernel();
95268+ next->prev = new;
95269+ new->next = next;
95270+ new->prev = prev;
95271+ prev->next = new;
95272+ pax_close_kernel();
95273+}
95274+EXPORT_SYMBOL(__pax_list_add);
95275+
95276+void pax_list_del(struct list_head *entry)
95277+{
95278+#ifdef CONFIG_DEBUG_LIST
95279+ if (!__list_del_entry_debug(entry))
95280+ return;
95281+#endif
95282+
95283+ pax_open_kernel();
95284+ __list_del(entry->prev, entry->next);
95285+ entry->next = LIST_POISON1;
95286+ entry->prev = LIST_POISON2;
95287+ pax_close_kernel();
95288+}
95289+EXPORT_SYMBOL(pax_list_del);
95290+
95291+void pax_list_del_init(struct list_head *entry)
95292+{
95293+ pax_open_kernel();
95294+ __list_del(entry->prev, entry->next);
95295+ INIT_LIST_HEAD(entry);
95296+ pax_close_kernel();
95297+}
95298+EXPORT_SYMBOL(pax_list_del_init);
95299+
95300+void __pax_list_add_rcu(struct list_head *new,
95301+ struct list_head *prev, struct list_head *next)
95302+{
95303+#ifdef CONFIG_DEBUG_LIST
95304+ if (!__list_add_debug(new, prev, next))
95305+ return;
95306+#endif
95307+
95308+ pax_open_kernel();
95309+ new->next = next;
95310+ new->prev = prev;
95311+ rcu_assign_pointer(list_next_rcu(prev), new);
95312+ next->prev = new;
95313+ pax_close_kernel();
95314+}
95315+EXPORT_SYMBOL(__pax_list_add_rcu);
95316+
95317+void pax_list_del_rcu(struct list_head *entry)
95318+{
95319+#ifdef CONFIG_DEBUG_LIST
95320+ if (!__list_del_entry_debug(entry))
95321+ return;
95322+#endif
95323+
95324+ pax_open_kernel();
95325+ __list_del(entry->prev, entry->next);
95326+ entry->next = LIST_POISON1;
95327+ entry->prev = LIST_POISON2;
95328+ pax_close_kernel();
95329+}
95330+EXPORT_SYMBOL(pax_list_del_rcu);
95331diff --git a/lib/lockref.c b/lib/lockref.c
95332index d2233de..fa1a2f6 100644
95333--- a/lib/lockref.c
95334+++ b/lib/lockref.c
95335@@ -48,13 +48,13 @@
95336 void lockref_get(struct lockref *lockref)
95337 {
95338 CMPXCHG_LOOP(
95339- new.count++;
95340+ __lockref_inc(&new);
95341 ,
95342 return;
95343 );
95344
95345 spin_lock(&lockref->lock);
95346- lockref->count++;
95347+ __lockref_inc(lockref);
95348 spin_unlock(&lockref->lock);
95349 }
95350 EXPORT_SYMBOL(lockref_get);
95351@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95352 int retval;
95353
95354 CMPXCHG_LOOP(
95355- new.count++;
95356+ __lockref_inc(&new);
95357 if (!old.count)
95358 return 0;
95359 ,
95360@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95361 spin_lock(&lockref->lock);
95362 retval = 0;
95363 if (lockref->count) {
95364- lockref->count++;
95365+ __lockref_inc(lockref);
95366 retval = 1;
95367 }
95368 spin_unlock(&lockref->lock);
95369@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95370 int lockref_get_or_lock(struct lockref *lockref)
95371 {
95372 CMPXCHG_LOOP(
95373- new.count++;
95374+ __lockref_inc(&new);
95375 if (!old.count)
95376 break;
95377 ,
95378@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95379 spin_lock(&lockref->lock);
95380 if (!lockref->count)
95381 return 0;
95382- lockref->count++;
95383+ __lockref_inc(lockref);
95384 spin_unlock(&lockref->lock);
95385 return 1;
95386 }
95387@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95388 int lockref_put_or_lock(struct lockref *lockref)
95389 {
95390 CMPXCHG_LOOP(
95391- new.count--;
95392+ __lockref_dec(&new);
95393 if (old.count <= 1)
95394 break;
95395 ,
95396@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95397 spin_lock(&lockref->lock);
95398 if (lockref->count <= 1)
95399 return 0;
95400- lockref->count--;
95401+ __lockref_dec(lockref);
95402 spin_unlock(&lockref->lock);
95403 return 1;
95404 }
95405@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95406 int retval;
95407
95408 CMPXCHG_LOOP(
95409- new.count++;
95410+ __lockref_inc(&new);
95411 if ((int)old.count < 0)
95412 return 0;
95413 ,
95414@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95415 spin_lock(&lockref->lock);
95416 retval = 0;
95417 if ((int) lockref->count >= 0) {
95418- lockref->count++;
95419+ __lockref_inc(lockref);
95420 retval = 1;
95421 }
95422 spin_unlock(&lockref->lock);
95423diff --git a/lib/nlattr.c b/lib/nlattr.c
95424index 9c3e85f..0affd1b 100644
95425--- a/lib/nlattr.c
95426+++ b/lib/nlattr.c
95427@@ -279,7 +279,11 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
95428 {
95429 int minlen = min_t(int, count, nla_len(src));
95430
95431+ BUG_ON(minlen < 0);
95432+
95433 memcpy(dest, nla_data(src), minlen);
95434+ if (count > minlen)
95435+ memset(dest + minlen, 0, count - minlen);
95436
95437 return minlen;
95438 }
95439diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95440index 6111bcb..02e816b 100644
95441--- a/lib/percpu-refcount.c
95442+++ b/lib/percpu-refcount.c
95443@@ -31,7 +31,7 @@
95444 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95445 */
95446
95447-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95448+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95449
95450 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95451
95452diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95453index 3291a8e..346a91e 100644
95454--- a/lib/radix-tree.c
95455+++ b/lib/radix-tree.c
95456@@ -67,7 +67,7 @@ struct radix_tree_preload {
95457 int nr;
95458 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95459 };
95460-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95461+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95462
95463 static inline void *ptr_to_indirect(void *ptr)
95464 {
95465diff --git a/lib/random32.c b/lib/random32.c
95466index 0bee183..526f12f 100644
95467--- a/lib/random32.c
95468+++ b/lib/random32.c
95469@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95470 }
95471 #endif
95472
95473-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95474+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95475
95476 /**
95477 * prandom_u32_state - seeded pseudo-random number generator.
95478diff --git a/lib/rbtree.c b/lib/rbtree.c
95479index c16c81a..4dcbda1 100644
95480--- a/lib/rbtree.c
95481+++ b/lib/rbtree.c
95482@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95483 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95484
95485 static const struct rb_augment_callbacks dummy_callbacks = {
95486- dummy_propagate, dummy_copy, dummy_rotate
95487+ .propagate = dummy_propagate,
95488+ .copy = dummy_copy,
95489+ .rotate = dummy_rotate
95490 };
95491
95492 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95493diff --git a/lib/show_mem.c b/lib/show_mem.c
95494index 7de89f4..00d70b7 100644
95495--- a/lib/show_mem.c
95496+++ b/lib/show_mem.c
95497@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95498 quicklist_total_size());
95499 #endif
95500 #ifdef CONFIG_MEMORY_FAILURE
95501- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95502+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95503 #endif
95504 }
95505diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95506index bb2b201..46abaf9 100644
95507--- a/lib/strncpy_from_user.c
95508+++ b/lib/strncpy_from_user.c
95509@@ -21,7 +21,7 @@
95510 */
95511 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95512 {
95513- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95514+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95515 long res = 0;
95516
95517 /*
95518diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95519index a28df52..3d55877 100644
95520--- a/lib/strnlen_user.c
95521+++ b/lib/strnlen_user.c
95522@@ -26,7 +26,7 @@
95523 */
95524 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95525 {
95526- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95527+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95528 long align, res = 0;
95529 unsigned long c;
95530
95531diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95532index 4abda07..b9d3765 100644
95533--- a/lib/swiotlb.c
95534+++ b/lib/swiotlb.c
95535@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95536
95537 void
95538 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95539- dma_addr_t dev_addr)
95540+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95541 {
95542 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95543
95544diff --git a/lib/usercopy.c b/lib/usercopy.c
95545index 4f5b1dd..7cab418 100644
95546--- a/lib/usercopy.c
95547+++ b/lib/usercopy.c
95548@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95549 WARN(1, "Buffer overflow detected!\n");
95550 }
95551 EXPORT_SYMBOL(copy_from_user_overflow);
95552+
95553+void copy_to_user_overflow(void)
95554+{
95555+ WARN(1, "Buffer overflow detected!\n");
95556+}
95557+EXPORT_SYMBOL(copy_to_user_overflow);
95558diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95559index ec337f6..8484eb2 100644
95560--- a/lib/vsprintf.c
95561+++ b/lib/vsprintf.c
95562@@ -16,6 +16,9 @@
95563 * - scnprintf and vscnprintf
95564 */
95565
95566+#ifdef CONFIG_GRKERNSEC_HIDESYM
95567+#define __INCLUDED_BY_HIDESYM 1
95568+#endif
95569 #include <stdarg.h>
95570 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95571 #include <linux/types.h>
95572@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95573 #ifdef CONFIG_KALLSYMS
95574 if (*fmt == 'B')
95575 sprint_backtrace(sym, value);
95576- else if (*fmt != 'f' && *fmt != 's')
95577+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95578 sprint_symbol(sym, value);
95579 else
95580 sprint_symbol_no_offset(sym, value);
95581@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95582 return number(buf, end, num, spec);
95583 }
95584
95585+#ifdef CONFIG_GRKERNSEC_HIDESYM
95586+int kptr_restrict __read_mostly = 2;
95587+#else
95588 int kptr_restrict __read_mostly;
95589+#endif
95590
95591 /*
95592 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95593@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95594 *
95595 * - 'F' For symbolic function descriptor pointers with offset
95596 * - 'f' For simple symbolic function names without offset
95597+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95598 * - 'S' For symbolic direct pointers with offset
95599 * - 's' For symbolic direct pointers without offset
95600+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95601 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95602 * - 'B' For backtraced symbolic direct pointers with offset
95603 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95604@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95605
95606 if (!ptr && *fmt != 'K') {
95607 /*
95608- * Print (null) with the same width as a pointer so it makes
95609+ * Print (nil) with the same width as a pointer so it makes
95610 * tabular output look nice.
95611 */
95612 if (spec.field_width == -1)
95613 spec.field_width = default_width;
95614- return string(buf, end, "(null)", spec);
95615+ return string(buf, end, "(nil)", spec);
95616 }
95617
95618 switch (*fmt) {
95619@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95620 /* Fallthrough */
95621 case 'S':
95622 case 's':
95623+#ifdef CONFIG_GRKERNSEC_HIDESYM
95624+ break;
95625+#else
95626+ return symbol_string(buf, end, ptr, spec, fmt);
95627+#endif
95628+ case 'X':
95629+ ptr = dereference_function_descriptor(ptr);
95630+ case 'A':
95631 case 'B':
95632 return symbol_string(buf, end, ptr, spec, fmt);
95633 case 'R':
95634@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95635 va_end(va);
95636 return buf;
95637 }
95638+ case 'P':
95639+ break;
95640 case 'K':
95641 /*
95642 * %pK cannot be used in IRQ context because its test
95643@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95644 ((const struct file *)ptr)->f_path.dentry,
95645 spec, fmt);
95646 }
95647+
95648+#ifdef CONFIG_GRKERNSEC_HIDESYM
95649+ /* 'P' = approved pointers to copy to userland,
95650+ as in the /proc/kallsyms case, as we make it display nothing
95651+ for non-root users, and the real contents for root users
95652+ 'X' = approved simple symbols
95653+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95654+ above
95655+ */
95656+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95657+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95658+ dump_stack();
95659+ ptr = NULL;
95660+ }
95661+#endif
95662+
95663 spec.flags |= SMALL;
95664 if (spec.field_width == -1) {
95665 spec.field_width = default_width;
95666@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95667 typeof(type) value; \
95668 if (sizeof(type) == 8) { \
95669 args = PTR_ALIGN(args, sizeof(u32)); \
95670- *(u32 *)&value = *(u32 *)args; \
95671- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95672+ *(u32 *)&value = *(const u32 *)args; \
95673+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95674 } else { \
95675 args = PTR_ALIGN(args, sizeof(type)); \
95676- value = *(typeof(type) *)args; \
95677+ value = *(const typeof(type) *)args; \
95678 } \
95679 args += sizeof(type); \
95680 value; \
95681@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95682 case FORMAT_TYPE_STR: {
95683 const char *str_arg = args;
95684 args += strlen(str_arg) + 1;
95685- str = string(str, end, (char *)str_arg, spec);
95686+ str = string(str, end, str_arg, spec);
95687 break;
95688 }
95689
95690diff --git a/localversion-grsec b/localversion-grsec
95691new file mode 100644
95692index 0000000..7cd6065
95693--- /dev/null
95694+++ b/localversion-grsec
95695@@ -0,0 +1 @@
95696+-grsec
95697diff --git a/mm/Kconfig b/mm/Kconfig
95698index 1d1ae6b..0f05885 100644
95699--- a/mm/Kconfig
95700+++ b/mm/Kconfig
95701@@ -341,10 +341,11 @@ config KSM
95702 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95703
95704 config DEFAULT_MMAP_MIN_ADDR
95705- int "Low address space to protect from user allocation"
95706+ int "Low address space to protect from user allocation"
95707 depends on MMU
95708- default 4096
95709- help
95710+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95711+ default 65536
95712+ help
95713 This is the portion of low virtual memory which should be protected
95714 from userspace allocation. Keeping a user from writing to low pages
95715 can help reduce the impact of kernel NULL pointer bugs.
95716@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95717
95718 config HWPOISON_INJECT
95719 tristate "HWPoison pages injector"
95720- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95721+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95722 select PROC_PAGE_MONITOR
95723
95724 config NOMMU_INITIAL_TRIM_EXCESS
95725diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
95726index 957d3da..1d34e20 100644
95727--- a/mm/Kconfig.debug
95728+++ b/mm/Kconfig.debug
95729@@ -10,6 +10,7 @@ config PAGE_EXTENSION
95730 config DEBUG_PAGEALLOC
95731 bool "Debug page memory allocations"
95732 depends on DEBUG_KERNEL
95733+ depends on !PAX_MEMORY_SANITIZE
95734 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
95735 depends on !KMEMCHECK
95736 select PAGE_EXTENSION
95737diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95738index 0ae0df5..82ac56b 100644
95739--- a/mm/backing-dev.c
95740+++ b/mm/backing-dev.c
95741@@ -12,7 +12,7 @@
95742 #include <linux/device.h>
95743 #include <trace/events/writeback.h>
95744
95745-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95746+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95747
95748 struct backing_dev_info default_backing_dev_info = {
95749 .name = "default",
95750@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95751 return err;
95752
95753 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95754- atomic_long_inc_return(&bdi_seq));
95755+ atomic_long_inc_return_unchecked(&bdi_seq));
95756 if (err) {
95757 bdi_destroy(bdi);
95758 return err;
95759diff --git a/mm/filemap.c b/mm/filemap.c
95760index 673e458..7192013 100644
95761--- a/mm/filemap.c
95762+++ b/mm/filemap.c
95763@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95764 struct address_space *mapping = file->f_mapping;
95765
95766 if (!mapping->a_ops->readpage)
95767- return -ENOEXEC;
95768+ return -ENODEV;
95769 file_accessed(file);
95770 vma->vm_ops = &generic_file_vm_ops;
95771 return 0;
95772@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95773 *pos = i_size_read(inode);
95774
95775 if (limit != RLIM_INFINITY) {
95776+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95777 if (*pos >= limit) {
95778 send_sig(SIGXFSZ, current, 0);
95779 return -EFBIG;
95780diff --git a/mm/fremap.c b/mm/fremap.c
95781index 2805d71..8b56e7d 100644
95782--- a/mm/fremap.c
95783+++ b/mm/fremap.c
95784@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95785 retry:
95786 vma = find_vma(mm, start);
95787
95788+#ifdef CONFIG_PAX_SEGMEXEC
95789+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95790+ goto out;
95791+#endif
95792+
95793 /*
95794 * Make sure the vma is shared, that it supports prefaulting,
95795 * and that the remapped range is valid and fully within
95796diff --git a/mm/gup.c b/mm/gup.c
95797index 9b2afbf..647297c 100644
95798--- a/mm/gup.c
95799+++ b/mm/gup.c
95800@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95801 unsigned int fault_flags = 0;
95802 int ret;
95803
95804- /* For mlock, just skip the stack guard page. */
95805- if ((*flags & FOLL_MLOCK) &&
95806- (stack_guard_page_start(vma, address) ||
95807- stack_guard_page_end(vma, address + PAGE_SIZE)))
95808- return -ENOENT;
95809 if (*flags & FOLL_WRITE)
95810 fault_flags |= FAULT_FLAG_WRITE;
95811 if (nonblocking)
95812@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95813 if (!(gup_flags & FOLL_FORCE))
95814 gup_flags |= FOLL_NUMA;
95815
95816- do {
95817+ while (nr_pages) {
95818 struct page *page;
95819 unsigned int foll_flags = gup_flags;
95820 unsigned int page_increm;
95821
95822 /* first iteration or cross vma bound */
95823 if (!vma || start >= vma->vm_end) {
95824- vma = find_extend_vma(mm, start);
95825+ vma = find_vma(mm, start);
95826 if (!vma && in_gate_area(mm, start)) {
95827 int ret;
95828 ret = get_gate_page(mm, start & PAGE_MASK,
95829@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95830 goto next_page;
95831 }
95832
95833- if (!vma || check_vma_flags(vma, gup_flags))
95834+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95835 return i ? : -EFAULT;
95836 if (is_vm_hugetlb_page(vma)) {
95837 i = follow_hugetlb_page(mm, vma, pages, vmas,
95838@@ -518,7 +513,7 @@ next_page:
95839 i += page_increm;
95840 start += page_increm * PAGE_SIZE;
95841 nr_pages -= page_increm;
95842- } while (nr_pages);
95843+ }
95844 return i;
95845 }
95846 EXPORT_SYMBOL(__get_user_pages);
95847diff --git a/mm/highmem.c b/mm/highmem.c
95848index 123bcd3..0de52ba 100644
95849--- a/mm/highmem.c
95850+++ b/mm/highmem.c
95851@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95852 * So no dangers, even with speculative execution.
95853 */
95854 page = pte_page(pkmap_page_table[i]);
95855+ pax_open_kernel();
95856 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95857-
95858+ pax_close_kernel();
95859 set_page_address(page, NULL);
95860 need_flush = 1;
95861 }
95862@@ -259,9 +260,11 @@ start:
95863 }
95864 }
95865 vaddr = PKMAP_ADDR(last_pkmap_nr);
95866+
95867+ pax_open_kernel();
95868 set_pte_at(&init_mm, vaddr,
95869 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95870-
95871+ pax_close_kernel();
95872 pkmap_count[last_pkmap_nr] = 1;
95873 set_page_address(page, (void *)vaddr);
95874
95875diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95876index 267e419..394bed9 100644
95877--- a/mm/hugetlb.c
95878+++ b/mm/hugetlb.c
95879@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95880 struct ctl_table *table, int write,
95881 void __user *buffer, size_t *length, loff_t *ppos)
95882 {
95883+ ctl_table_no_const t;
95884 struct hstate *h = &default_hstate;
95885 unsigned long tmp = h->max_huge_pages;
95886 int ret;
95887@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95888 if (!hugepages_supported())
95889 return -ENOTSUPP;
95890
95891- table->data = &tmp;
95892- table->maxlen = sizeof(unsigned long);
95893- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95894+ t = *table;
95895+ t.data = &tmp;
95896+ t.maxlen = sizeof(unsigned long);
95897+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95898 if (ret)
95899 goto out;
95900
95901@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95902 struct hstate *h = &default_hstate;
95903 unsigned long tmp;
95904 int ret;
95905+ ctl_table_no_const hugetlb_table;
95906
95907 if (!hugepages_supported())
95908 return -ENOTSUPP;
95909@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95910 if (write && hstate_is_gigantic(h))
95911 return -EINVAL;
95912
95913- table->data = &tmp;
95914- table->maxlen = sizeof(unsigned long);
95915- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95916+ hugetlb_table = *table;
95917+ hugetlb_table.data = &tmp;
95918+ hugetlb_table.maxlen = sizeof(unsigned long);
95919+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95920 if (ret)
95921 goto out;
95922
95923@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95924 i_mmap_unlock_write(mapping);
95925 }
95926
95927+#ifdef CONFIG_PAX_SEGMEXEC
95928+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95929+{
95930+ struct mm_struct *mm = vma->vm_mm;
95931+ struct vm_area_struct *vma_m;
95932+ unsigned long address_m;
95933+ pte_t *ptep_m;
95934+
95935+ vma_m = pax_find_mirror_vma(vma);
95936+ if (!vma_m)
95937+ return;
95938+
95939+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95940+ address_m = address + SEGMEXEC_TASK_SIZE;
95941+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95942+ get_page(page_m);
95943+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95944+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95945+}
95946+#endif
95947+
95948 /*
95949 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95950 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95951@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95952 make_huge_pte(vma, new_page, 1));
95953 page_remove_rmap(old_page);
95954 hugepage_add_new_anon_rmap(new_page, vma, address);
95955+
95956+#ifdef CONFIG_PAX_SEGMEXEC
95957+ pax_mirror_huge_pte(vma, address, new_page);
95958+#endif
95959+
95960 /* Make the old page be freed below */
95961 new_page = old_page;
95962 }
95963@@ -3070,6 +3100,10 @@ retry:
95964 && (vma->vm_flags & VM_SHARED)));
95965 set_huge_pte_at(mm, address, ptep, new_pte);
95966
95967+#ifdef CONFIG_PAX_SEGMEXEC
95968+ pax_mirror_huge_pte(vma, address, page);
95969+#endif
95970+
95971 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95972 /* Optimization, do the COW without a second fault */
95973 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95974@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95975 struct address_space *mapping;
95976 int need_wait_lock = 0;
95977
95978+#ifdef CONFIG_PAX_SEGMEXEC
95979+ struct vm_area_struct *vma_m;
95980+#endif
95981+
95982 address &= huge_page_mask(h);
95983
95984 ptep = huge_pte_offset(mm, address);
95985@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95986 VM_FAULT_SET_HINDEX(hstate_index(h));
95987 }
95988
95989+#ifdef CONFIG_PAX_SEGMEXEC
95990+ vma_m = pax_find_mirror_vma(vma);
95991+ if (vma_m) {
95992+ unsigned long address_m;
95993+
95994+ if (vma->vm_start > vma_m->vm_start) {
95995+ address_m = address;
95996+ address -= SEGMEXEC_TASK_SIZE;
95997+ vma = vma_m;
95998+ h = hstate_vma(vma);
95999+ } else
96000+ address_m = address + SEGMEXEC_TASK_SIZE;
96001+
96002+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
96003+ return VM_FAULT_OOM;
96004+ address_m &= HPAGE_MASK;
96005+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
96006+ }
96007+#endif
96008+
96009 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
96010 if (!ptep)
96011 return VM_FAULT_OOM;
96012diff --git a/mm/internal.h b/mm/internal.h
96013index efad241..57ae4ca 100644
96014--- a/mm/internal.h
96015+++ b/mm/internal.h
96016@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
96017
96018 extern int __isolate_free_page(struct page *page, unsigned int order);
96019 extern void __free_pages_bootmem(struct page *page, unsigned int order);
96020+extern void free_compound_page(struct page *page);
96021 extern void prep_compound_page(struct page *page, unsigned long order);
96022 #ifdef CONFIG_MEMORY_FAILURE
96023 extern bool is_free_buddy_page(struct page *page);
96024@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
96025
96026 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
96027 unsigned long, unsigned long,
96028- unsigned long, unsigned long);
96029+ unsigned long, unsigned long) __intentional_overflow(-1);
96030
96031 extern void set_pageblock_order(void);
96032 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
96033diff --git a/mm/kmemleak.c b/mm/kmemleak.c
96034index 3cda50c..032ba634 100644
96035--- a/mm/kmemleak.c
96036+++ b/mm/kmemleak.c
96037@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
96038
96039 for (i = 0; i < object->trace_len; i++) {
96040 void *ptr = (void *)object->trace[i];
96041- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
96042+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
96043 }
96044 }
96045
96046@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
96047 return -ENOMEM;
96048 }
96049
96050- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
96051+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
96052 &kmemleak_fops);
96053 if (!dentry)
96054 pr_warning("Failed to create the debugfs kmemleak file\n");
96055diff --git a/mm/maccess.c b/mm/maccess.c
96056index d53adf9..03a24bf 100644
96057--- a/mm/maccess.c
96058+++ b/mm/maccess.c
96059@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
96060 set_fs(KERNEL_DS);
96061 pagefault_disable();
96062 ret = __copy_from_user_inatomic(dst,
96063- (__force const void __user *)src, size);
96064+ (const void __force_user *)src, size);
96065 pagefault_enable();
96066 set_fs(old_fs);
96067
96068@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
96069
96070 set_fs(KERNEL_DS);
96071 pagefault_disable();
96072- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
96073+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
96074 pagefault_enable();
96075 set_fs(old_fs);
96076
96077diff --git a/mm/madvise.c b/mm/madvise.c
96078index a271adc..5e1a2b4 100644
96079--- a/mm/madvise.c
96080+++ b/mm/madvise.c
96081@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
96082 pgoff_t pgoff;
96083 unsigned long new_flags = vma->vm_flags;
96084
96085+#ifdef CONFIG_PAX_SEGMEXEC
96086+ struct vm_area_struct *vma_m;
96087+#endif
96088+
96089 switch (behavior) {
96090 case MADV_NORMAL:
96091 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
96092@@ -126,6 +130,13 @@ success:
96093 /*
96094 * vm_flags is protected by the mmap_sem held in write mode.
96095 */
96096+
96097+#ifdef CONFIG_PAX_SEGMEXEC
96098+ vma_m = pax_find_mirror_vma(vma);
96099+ if (vma_m)
96100+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
96101+#endif
96102+
96103 vma->vm_flags = new_flags;
96104
96105 out:
96106@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96107 struct vm_area_struct **prev,
96108 unsigned long start, unsigned long end)
96109 {
96110+
96111+#ifdef CONFIG_PAX_SEGMEXEC
96112+ struct vm_area_struct *vma_m;
96113+#endif
96114+
96115 *prev = vma;
96116 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
96117 return -EINVAL;
96118@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
96119 zap_page_range(vma, start, end - start, &details);
96120 } else
96121 zap_page_range(vma, start, end - start, NULL);
96122+
96123+#ifdef CONFIG_PAX_SEGMEXEC
96124+ vma_m = pax_find_mirror_vma(vma);
96125+ if (vma_m) {
96126+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
96127+ struct zap_details details = {
96128+ .nonlinear_vma = vma_m,
96129+ .last_index = ULONG_MAX,
96130+ };
96131+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, &details);
96132+ } else
96133+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
96134+ }
96135+#endif
96136+
96137 return 0;
96138 }
96139
96140@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
96141 if (end < start)
96142 return error;
96143
96144+#ifdef CONFIG_PAX_SEGMEXEC
96145+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
96146+ if (end > SEGMEXEC_TASK_SIZE)
96147+ return error;
96148+ } else
96149+#endif
96150+
96151+ if (end > TASK_SIZE)
96152+ return error;
96153+
96154 error = 0;
96155 if (end == start)
96156 return error;
96157diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96158index 20c29dd..22bd8e2 100644
96159--- a/mm/memory-failure.c
96160+++ b/mm/memory-failure.c
96161@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96162
96163 int sysctl_memory_failure_recovery __read_mostly = 1;
96164
96165-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96166+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96167
96168 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96169
96170@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96171 pfn, t->comm, t->pid);
96172 si.si_signo = SIGBUS;
96173 si.si_errno = 0;
96174- si.si_addr = (void *)addr;
96175+ si.si_addr = (void __user *)addr;
96176 #ifdef __ARCH_SI_TRAPNO
96177 si.si_trapno = trapno;
96178 #endif
96179@@ -786,7 +786,7 @@ static struct page_state {
96180 unsigned long res;
96181 char *msg;
96182 int (*action)(struct page *p, unsigned long pfn);
96183-} error_states[] = {
96184+} __do_const error_states[] = {
96185 { reserved, reserved, "reserved kernel", me_kernel },
96186 /*
96187 * free pages are specially detected outside this table:
96188@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96189 nr_pages = 1 << compound_order(hpage);
96190 else /* normal page or thp */
96191 nr_pages = 1;
96192- atomic_long_add(nr_pages, &num_poisoned_pages);
96193+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96194
96195 /*
96196 * We need/can do nothing about count=0 pages.
96197@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96198 if (PageHWPoison(hpage)) {
96199 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96200 || (p != hpage && TestSetPageHWPoison(hpage))) {
96201- atomic_long_sub(nr_pages, &num_poisoned_pages);
96202+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96203 unlock_page(hpage);
96204 return 0;
96205 }
96206@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96207 */
96208 if (!PageHWPoison(p)) {
96209 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96210- atomic_long_sub(nr_pages, &num_poisoned_pages);
96211+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96212 put_page(hpage);
96213 res = 0;
96214 goto out;
96215 }
96216 if (hwpoison_filter(p)) {
96217 if (TestClearPageHWPoison(p))
96218- atomic_long_sub(nr_pages, &num_poisoned_pages);
96219+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96220 unlock_page(hpage);
96221 put_page(hpage);
96222 return 0;
96223@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
96224 return 0;
96225 }
96226 if (TestClearPageHWPoison(p))
96227- atomic_long_dec(&num_poisoned_pages);
96228+ atomic_long_dec_unchecked(&num_poisoned_pages);
96229 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96230 return 0;
96231 }
96232@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
96233 */
96234 if (TestClearPageHWPoison(page)) {
96235 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96236- atomic_long_sub(nr_pages, &num_poisoned_pages);
96237+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96238 freeit = 1;
96239 if (PageHuge(page))
96240 clear_page_hwpoison_huge_page(page);
96241@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96242 if (PageHuge(page)) {
96243 set_page_hwpoison_huge_page(hpage);
96244 dequeue_hwpoisoned_huge_page(hpage);
96245- atomic_long_add(1 << compound_order(hpage),
96246+ atomic_long_add_unchecked(1 << compound_order(hpage),
96247 &num_poisoned_pages);
96248 } else {
96249 SetPageHWPoison(page);
96250- atomic_long_inc(&num_poisoned_pages);
96251+ atomic_long_inc_unchecked(&num_poisoned_pages);
96252 }
96253 }
96254 return ret;
96255@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
96256 put_page(page);
96257 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96258 SetPageHWPoison(page);
96259- atomic_long_inc(&num_poisoned_pages);
96260+ atomic_long_inc_unchecked(&num_poisoned_pages);
96261 return 0;
96262 }
96263
96264@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
96265 if (!is_free_buddy_page(page))
96266 pr_info("soft offline: %#lx: page leaked\n",
96267 pfn);
96268- atomic_long_inc(&num_poisoned_pages);
96269+ atomic_long_inc_unchecked(&num_poisoned_pages);
96270 }
96271 } else {
96272 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96273@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
96274 if (PageHuge(page)) {
96275 set_page_hwpoison_huge_page(hpage);
96276 dequeue_hwpoisoned_huge_page(hpage);
96277- atomic_long_add(1 << compound_order(hpage),
96278+ atomic_long_add_unchecked(1 << compound_order(hpage),
96279 &num_poisoned_pages);
96280 } else {
96281 SetPageHWPoison(page);
96282- atomic_long_inc(&num_poisoned_pages);
96283+ atomic_long_inc_unchecked(&num_poisoned_pages);
96284 }
96285 }
96286 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96287diff --git a/mm/memory.c b/mm/memory.c
96288index 6aa7822..3c76005 100644
96289--- a/mm/memory.c
96290+++ b/mm/memory.c
96291@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96292 free_pte_range(tlb, pmd, addr);
96293 } while (pmd++, addr = next, addr != end);
96294
96295+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96296 start &= PUD_MASK;
96297 if (start < floor)
96298 return;
96299@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96300 pmd = pmd_offset(pud, start);
96301 pud_clear(pud);
96302 pmd_free_tlb(tlb, pmd, start);
96303+#endif
96304+
96305 }
96306
96307 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96308@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96309 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96310 } while (pud++, addr = next, addr != end);
96311
96312+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96313 start &= PGDIR_MASK;
96314 if (start < floor)
96315 return;
96316@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96317 pud = pud_offset(pgd, start);
96318 pgd_clear(pgd);
96319 pud_free_tlb(tlb, pud, start);
96320+#endif
96321+
96322 }
96323
96324 /*
96325@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96326 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96327 */
96328 if (vma->vm_ops)
96329- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96330+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96331 vma->vm_ops->fault);
96332 if (vma->vm_file)
96333- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96334+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96335 vma->vm_file->f_op->mmap);
96336 dump_stack();
96337 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96338@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96339 page_add_file_rmap(page);
96340 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96341
96342+#ifdef CONFIG_PAX_SEGMEXEC
96343+ pax_mirror_file_pte(vma, addr, page, ptl);
96344+#endif
96345+
96346 retval = 0;
96347 pte_unmap_unlock(pte, ptl);
96348 return retval;
96349@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96350 if (!page_count(page))
96351 return -EINVAL;
96352 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96353+
96354+#ifdef CONFIG_PAX_SEGMEXEC
96355+ struct vm_area_struct *vma_m;
96356+#endif
96357+
96358 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96359 BUG_ON(vma->vm_flags & VM_PFNMAP);
96360 vma->vm_flags |= VM_MIXEDMAP;
96361+
96362+#ifdef CONFIG_PAX_SEGMEXEC
96363+ vma_m = pax_find_mirror_vma(vma);
96364+ if (vma_m)
96365+ vma_m->vm_flags |= VM_MIXEDMAP;
96366+#endif
96367+
96368 }
96369 return insert_page(vma, addr, page, vma->vm_page_prot);
96370 }
96371@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96372 unsigned long pfn)
96373 {
96374 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96375+ BUG_ON(vma->vm_mirror);
96376
96377 if (addr < vma->vm_start || addr >= vma->vm_end)
96378 return -EFAULT;
96379@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96380
96381 BUG_ON(pud_huge(*pud));
96382
96383- pmd = pmd_alloc(mm, pud, addr);
96384+ pmd = (mm == &init_mm) ?
96385+ pmd_alloc_kernel(mm, pud, addr) :
96386+ pmd_alloc(mm, pud, addr);
96387 if (!pmd)
96388 return -ENOMEM;
96389 do {
96390@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96391 unsigned long next;
96392 int err;
96393
96394- pud = pud_alloc(mm, pgd, addr);
96395+ pud = (mm == &init_mm) ?
96396+ pud_alloc_kernel(mm, pgd, addr) :
96397+ pud_alloc(mm, pgd, addr);
96398 if (!pud)
96399 return -ENOMEM;
96400 do {
96401@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96402 return ret;
96403 }
96404
96405+#ifdef CONFIG_PAX_SEGMEXEC
96406+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96407+{
96408+ struct mm_struct *mm = vma->vm_mm;
96409+ spinlock_t *ptl;
96410+ pte_t *pte, entry;
96411+
96412+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96413+ entry = *pte;
96414+ if (!pte_present(entry)) {
96415+ if (!pte_none(entry)) {
96416+ BUG_ON(pte_file(entry));
96417+ free_swap_and_cache(pte_to_swp_entry(entry));
96418+ pte_clear_not_present_full(mm, address, pte, 0);
96419+ }
96420+ } else {
96421+ struct page *page;
96422+
96423+ flush_cache_page(vma, address, pte_pfn(entry));
96424+ entry = ptep_clear_flush(vma, address, pte);
96425+ BUG_ON(pte_dirty(entry));
96426+ page = vm_normal_page(vma, address, entry);
96427+ if (page) {
96428+ update_hiwater_rss(mm);
96429+ if (PageAnon(page))
96430+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96431+ else
96432+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96433+ page_remove_rmap(page);
96434+ page_cache_release(page);
96435+ }
96436+ }
96437+ pte_unmap_unlock(pte, ptl);
96438+}
96439+
96440+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96441+ *
96442+ * the ptl of the lower mapped page is held on entry and is not released on exit
96443+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96444+ */
96445+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96446+{
96447+ struct mm_struct *mm = vma->vm_mm;
96448+ unsigned long address_m;
96449+ spinlock_t *ptl_m;
96450+ struct vm_area_struct *vma_m;
96451+ pmd_t *pmd_m;
96452+ pte_t *pte_m, entry_m;
96453+
96454+ BUG_ON(!page_m || !PageAnon(page_m));
96455+
96456+ vma_m = pax_find_mirror_vma(vma);
96457+ if (!vma_m)
96458+ return;
96459+
96460+ BUG_ON(!PageLocked(page_m));
96461+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96462+ address_m = address + SEGMEXEC_TASK_SIZE;
96463+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96464+ pte_m = pte_offset_map(pmd_m, address_m);
96465+ ptl_m = pte_lockptr(mm, pmd_m);
96466+ if (ptl != ptl_m) {
96467+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96468+ if (!pte_none(*pte_m))
96469+ goto out;
96470+ }
96471+
96472+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96473+ page_cache_get(page_m);
96474+ page_add_anon_rmap(page_m, vma_m, address_m);
96475+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96476+ set_pte_at(mm, address_m, pte_m, entry_m);
96477+ update_mmu_cache(vma_m, address_m, pte_m);
96478+out:
96479+ if (ptl != ptl_m)
96480+ spin_unlock(ptl_m);
96481+ pte_unmap(pte_m);
96482+ unlock_page(page_m);
96483+}
96484+
96485+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96486+{
96487+ struct mm_struct *mm = vma->vm_mm;
96488+ unsigned long address_m;
96489+ spinlock_t *ptl_m;
96490+ struct vm_area_struct *vma_m;
96491+ pmd_t *pmd_m;
96492+ pte_t *pte_m, entry_m;
96493+
96494+ BUG_ON(!page_m || PageAnon(page_m));
96495+
96496+ vma_m = pax_find_mirror_vma(vma);
96497+ if (!vma_m)
96498+ return;
96499+
96500+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96501+ address_m = address + SEGMEXEC_TASK_SIZE;
96502+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96503+ pte_m = pte_offset_map(pmd_m, address_m);
96504+ ptl_m = pte_lockptr(mm, pmd_m);
96505+ if (ptl != ptl_m) {
96506+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96507+ if (!pte_none(*pte_m))
96508+ goto out;
96509+ }
96510+
96511+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96512+ page_cache_get(page_m);
96513+ page_add_file_rmap(page_m);
96514+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96515+ set_pte_at(mm, address_m, pte_m, entry_m);
96516+ update_mmu_cache(vma_m, address_m, pte_m);
96517+out:
96518+ if (ptl != ptl_m)
96519+ spin_unlock(ptl_m);
96520+ pte_unmap(pte_m);
96521+}
96522+
96523+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96524+{
96525+ struct mm_struct *mm = vma->vm_mm;
96526+ unsigned long address_m;
96527+ spinlock_t *ptl_m;
96528+ struct vm_area_struct *vma_m;
96529+ pmd_t *pmd_m;
96530+ pte_t *pte_m, entry_m;
96531+
96532+ vma_m = pax_find_mirror_vma(vma);
96533+ if (!vma_m)
96534+ return;
96535+
96536+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96537+ address_m = address + SEGMEXEC_TASK_SIZE;
96538+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96539+ pte_m = pte_offset_map(pmd_m, address_m);
96540+ ptl_m = pte_lockptr(mm, pmd_m);
96541+ if (ptl != ptl_m) {
96542+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96543+ if (!pte_none(*pte_m))
96544+ goto out;
96545+ }
96546+
96547+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96548+ set_pte_at(mm, address_m, pte_m, entry_m);
96549+out:
96550+ if (ptl != ptl_m)
96551+ spin_unlock(ptl_m);
96552+ pte_unmap(pte_m);
96553+}
96554+
96555+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96556+{
96557+ struct page *page_m;
96558+ pte_t entry;
96559+
96560+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96561+ goto out;
96562+
96563+ entry = *pte;
96564+ page_m = vm_normal_page(vma, address, entry);
96565+ if (!page_m)
96566+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96567+ else if (PageAnon(page_m)) {
96568+ if (pax_find_mirror_vma(vma)) {
96569+ pte_unmap_unlock(pte, ptl);
96570+ lock_page(page_m);
96571+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96572+ if (pte_same(entry, *pte))
96573+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96574+ else
96575+ unlock_page(page_m);
96576+ }
96577+ } else
96578+ pax_mirror_file_pte(vma, address, page_m, ptl);
96579+
96580+out:
96581+ pte_unmap_unlock(pte, ptl);
96582+}
96583+#endif
96584+
96585 /*
96586 * This routine handles present pages, when users try to write
96587 * to a shared page. It is done by copying the page to a new address
96588@@ -2212,6 +2419,12 @@ gotten:
96589 */
96590 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96591 if (likely(pte_same(*page_table, orig_pte))) {
96592+
96593+#ifdef CONFIG_PAX_SEGMEXEC
96594+ if (pax_find_mirror_vma(vma))
96595+ BUG_ON(!trylock_page(new_page));
96596+#endif
96597+
96598 if (old_page) {
96599 if (!PageAnon(old_page)) {
96600 dec_mm_counter_fast(mm, MM_FILEPAGES);
96601@@ -2265,6 +2478,10 @@ gotten:
96602 page_remove_rmap(old_page);
96603 }
96604
96605+#ifdef CONFIG_PAX_SEGMEXEC
96606+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96607+#endif
96608+
96609 /* Free the old page.. */
96610 new_page = old_page;
96611 ret |= VM_FAULT_WRITE;
96612@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96613 swap_free(entry);
96614 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96615 try_to_free_swap(page);
96616+
96617+#ifdef CONFIG_PAX_SEGMEXEC
96618+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96619+#endif
96620+
96621 unlock_page(page);
96622 if (page != swapcache) {
96623 /*
96624@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96625
96626 /* No need to invalidate - it was non-present before */
96627 update_mmu_cache(vma, address, page_table);
96628+
96629+#ifdef CONFIG_PAX_SEGMEXEC
96630+ pax_mirror_anon_pte(vma, address, page, ptl);
96631+#endif
96632+
96633 unlock:
96634 pte_unmap_unlock(page_table, ptl);
96635 out:
96636@@ -2581,40 +2808,6 @@ out_release:
96637 }
96638
96639 /*
96640- * This is like a special single-page "expand_{down|up}wards()",
96641- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96642- * doesn't hit another vma.
96643- */
96644-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96645-{
96646- address &= PAGE_MASK;
96647- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96648- struct vm_area_struct *prev = vma->vm_prev;
96649-
96650- /*
96651- * Is there a mapping abutting this one below?
96652- *
96653- * That's only ok if it's the same stack mapping
96654- * that has gotten split..
96655- */
96656- if (prev && prev->vm_end == address)
96657- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96658-
96659- return expand_downwards(vma, address - PAGE_SIZE);
96660- }
96661- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96662- struct vm_area_struct *next = vma->vm_next;
96663-
96664- /* As VM_GROWSDOWN but s/below/above/ */
96665- if (next && next->vm_start == address + PAGE_SIZE)
96666- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96667-
96668- return expand_upwards(vma, address + PAGE_SIZE);
96669- }
96670- return 0;
96671-}
96672-
96673-/*
96674 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96675 * but allow concurrent faults), and pte mapped but not yet locked.
96676 * We return with mmap_sem still held, but pte unmapped and unlocked.
96677@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96678 unsigned int flags)
96679 {
96680 struct mem_cgroup *memcg;
96681- struct page *page;
96682+ struct page *page = NULL;
96683 spinlock_t *ptl;
96684 pte_t entry;
96685
96686- pte_unmap(page_table);
96687-
96688- /* Check if we need to add a guard page to the stack */
96689- if (check_stack_guard_page(vma, address) < 0)
96690- return VM_FAULT_SIGSEGV;
96691-
96692- /* Use the zero-page for reads */
96693 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96694 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96695 vma->vm_page_prot));
96696- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96697+ ptl = pte_lockptr(mm, pmd);
96698+ spin_lock(ptl);
96699 if (!pte_none(*page_table))
96700 goto unlock;
96701 goto setpte;
96702 }
96703
96704 /* Allocate our own private page. */
96705+ pte_unmap(page_table);
96706+
96707 if (unlikely(anon_vma_prepare(vma)))
96708 goto oom;
96709 page = alloc_zeroed_user_highpage_movable(vma, address);
96710@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96711 if (!pte_none(*page_table))
96712 goto release;
96713
96714+#ifdef CONFIG_PAX_SEGMEXEC
96715+ if (pax_find_mirror_vma(vma))
96716+ BUG_ON(!trylock_page(page));
96717+#endif
96718+
96719 inc_mm_counter_fast(mm, MM_ANONPAGES);
96720 page_add_new_anon_rmap(page, vma, address);
96721 mem_cgroup_commit_charge(page, memcg, false);
96722@@ -2677,6 +2871,12 @@ setpte:
96723
96724 /* No need to invalidate - it was non-present before */
96725 update_mmu_cache(vma, address, page_table);
96726+
96727+#ifdef CONFIG_PAX_SEGMEXEC
96728+ if (page)
96729+ pax_mirror_anon_pte(vma, address, page, ptl);
96730+#endif
96731+
96732 unlock:
96733 pte_unmap_unlock(page_table, ptl);
96734 return 0;
96735@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96736 return ret;
96737 }
96738 do_set_pte(vma, address, fault_page, pte, false, false);
96739+
96740+#ifdef CONFIG_PAX_SEGMEXEC
96741+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96742+#endif
96743+
96744 unlock_page(fault_page);
96745 unlock_out:
96746 pte_unmap_unlock(pte, ptl);
96747@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96748 page_cache_release(fault_page);
96749 goto uncharge_out;
96750 }
96751+
96752+#ifdef CONFIG_PAX_SEGMEXEC
96753+ if (pax_find_mirror_vma(vma))
96754+ BUG_ON(!trylock_page(new_page));
96755+#endif
96756+
96757 do_set_pte(vma, address, new_page, pte, true, true);
96758+
96759+#ifdef CONFIG_PAX_SEGMEXEC
96760+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96761+#endif
96762+
96763 mem_cgroup_commit_charge(new_page, memcg, false);
96764 lru_cache_add_active_or_unevictable(new_page, vma);
96765 pte_unmap_unlock(pte, ptl);
96766@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96767 return ret;
96768 }
96769 do_set_pte(vma, address, fault_page, pte, true, false);
96770+
96771+#ifdef CONFIG_PAX_SEGMEXEC
96772+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96773+#endif
96774+
96775 pte_unmap_unlock(pte, ptl);
96776
96777 if (set_page_dirty(fault_page))
96778@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96779 if (flags & FAULT_FLAG_WRITE)
96780 flush_tlb_fix_spurious_fault(vma, address);
96781 }
96782+
96783+#ifdef CONFIG_PAX_SEGMEXEC
96784+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96785+ return 0;
96786+#endif
96787+
96788 unlock:
96789 pte_unmap_unlock(pte, ptl);
96790 return 0;
96791@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96792 pmd_t *pmd;
96793 pte_t *pte;
96794
96795+#ifdef CONFIG_PAX_SEGMEXEC
96796+ struct vm_area_struct *vma_m;
96797+#endif
96798+
96799 if (unlikely(is_vm_hugetlb_page(vma)))
96800 return hugetlb_fault(mm, vma, address, flags);
96801
96802+#ifdef CONFIG_PAX_SEGMEXEC
96803+ vma_m = pax_find_mirror_vma(vma);
96804+ if (vma_m) {
96805+ unsigned long address_m;
96806+ pgd_t *pgd_m;
96807+ pud_t *pud_m;
96808+ pmd_t *pmd_m;
96809+
96810+ if (vma->vm_start > vma_m->vm_start) {
96811+ address_m = address;
96812+ address -= SEGMEXEC_TASK_SIZE;
96813+ vma = vma_m;
96814+ } else
96815+ address_m = address + SEGMEXEC_TASK_SIZE;
96816+
96817+ pgd_m = pgd_offset(mm, address_m);
96818+ pud_m = pud_alloc(mm, pgd_m, address_m);
96819+ if (!pud_m)
96820+ return VM_FAULT_OOM;
96821+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96822+ if (!pmd_m)
96823+ return VM_FAULT_OOM;
96824+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96825+ return VM_FAULT_OOM;
96826+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96827+ }
96828+#endif
96829+
96830 pgd = pgd_offset(mm, address);
96831 pud = pud_alloc(mm, pgd, address);
96832 if (!pud)
96833@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96834 spin_unlock(&mm->page_table_lock);
96835 return 0;
96836 }
96837+
96838+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96839+{
96840+ pud_t *new = pud_alloc_one(mm, address);
96841+ if (!new)
96842+ return -ENOMEM;
96843+
96844+ smp_wmb(); /* See comment in __pte_alloc */
96845+
96846+ spin_lock(&mm->page_table_lock);
96847+ if (pgd_present(*pgd)) /* Another has populated it */
96848+ pud_free(mm, new);
96849+ else
96850+ pgd_populate_kernel(mm, pgd, new);
96851+ spin_unlock(&mm->page_table_lock);
96852+ return 0;
96853+}
96854 #endif /* __PAGETABLE_PUD_FOLDED */
96855
96856 #ifndef __PAGETABLE_PMD_FOLDED
96857@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96858 spin_unlock(&mm->page_table_lock);
96859 return 0;
96860 }
96861+
96862+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96863+{
96864+ pmd_t *new = pmd_alloc_one(mm, address);
96865+ if (!new)
96866+ return -ENOMEM;
96867+
96868+ smp_wmb(); /* See comment in __pte_alloc */
96869+
96870+ spin_lock(&mm->page_table_lock);
96871+#ifndef __ARCH_HAS_4LEVEL_HACK
96872+ if (pud_present(*pud)) /* Another has populated it */
96873+ pmd_free(mm, new);
96874+ else
96875+ pud_populate_kernel(mm, pud, new);
96876+#else
96877+ if (pgd_present(*pud)) /* Another has populated it */
96878+ pmd_free(mm, new);
96879+ else
96880+ pgd_populate_kernel(mm, pud, new);
96881+#endif /* __ARCH_HAS_4LEVEL_HACK */
96882+ spin_unlock(&mm->page_table_lock);
96883+ return 0;
96884+}
96885 #endif /* __PAGETABLE_PMD_FOLDED */
96886
96887 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96888@@ -3550,8 +3850,8 @@ out:
96889 return ret;
96890 }
96891
96892-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96893- void *buf, int len, int write)
96894+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96895+ void *buf, size_t len, int write)
96896 {
96897 resource_size_t phys_addr;
96898 unsigned long prot = 0;
96899@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96900 * Access another process' address space as given in mm. If non-NULL, use the
96901 * given task for page fault accounting.
96902 */
96903-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96904- unsigned long addr, void *buf, int len, int write)
96905+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96906+ unsigned long addr, void *buf, size_t len, int write)
96907 {
96908 struct vm_area_struct *vma;
96909 void *old_buf = buf;
96910@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96911 down_read(&mm->mmap_sem);
96912 /* ignore errors, just check how much was successfully transferred */
96913 while (len) {
96914- int bytes, ret, offset;
96915+ ssize_t bytes, ret, offset;
96916 void *maddr;
96917 struct page *page = NULL;
96918
96919@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96920 *
96921 * The caller must hold a reference on @mm.
96922 */
96923-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96924- void *buf, int len, int write)
96925+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96926+ void *buf, size_t len, int write)
96927 {
96928 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96929 }
96930@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96931 * Source/target buffer must be kernel space,
96932 * Do not walk the page table directly, use get_user_pages
96933 */
96934-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96935- void *buf, int len, int write)
96936+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96937+ void *buf, size_t len, int write)
96938 {
96939 struct mm_struct *mm;
96940- int ret;
96941+ ssize_t ret;
96942
96943 mm = get_task_mm(tsk);
96944 if (!mm)
96945diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96946index 0e0961b..c9143b9 100644
96947--- a/mm/mempolicy.c
96948+++ b/mm/mempolicy.c
96949@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96950 unsigned long vmstart;
96951 unsigned long vmend;
96952
96953+#ifdef CONFIG_PAX_SEGMEXEC
96954+ struct vm_area_struct *vma_m;
96955+#endif
96956+
96957 vma = find_vma(mm, start);
96958 if (!vma || vma->vm_start > start)
96959 return -EFAULT;
96960@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96961 err = vma_replace_policy(vma, new_pol);
96962 if (err)
96963 goto out;
96964+
96965+#ifdef CONFIG_PAX_SEGMEXEC
96966+ vma_m = pax_find_mirror_vma(vma);
96967+ if (vma_m) {
96968+ err = vma_replace_policy(vma_m, new_pol);
96969+ if (err)
96970+ goto out;
96971+ }
96972+#endif
96973+
96974 }
96975
96976 out:
96977@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96978
96979 if (end < start)
96980 return -EINVAL;
96981+
96982+#ifdef CONFIG_PAX_SEGMEXEC
96983+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96984+ if (end > SEGMEXEC_TASK_SIZE)
96985+ return -EINVAL;
96986+ } else
96987+#endif
96988+
96989+ if (end > TASK_SIZE)
96990+ return -EINVAL;
96991+
96992 if (end == start)
96993 return 0;
96994
96995@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96996 */
96997 tcred = __task_cred(task);
96998 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96999- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97000- !capable(CAP_SYS_NICE)) {
97001+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97002 rcu_read_unlock();
97003 err = -EPERM;
97004 goto out_put;
97005@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
97006 goto out;
97007 }
97008
97009+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97010+ if (mm != current->mm &&
97011+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
97012+ mmput(mm);
97013+ err = -EPERM;
97014+ goto out;
97015+ }
97016+#endif
97017+
97018 err = do_migrate_pages(mm, old, new,
97019 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
97020
97021diff --git a/mm/migrate.c b/mm/migrate.c
97022index 344cdf6..07399500 100644
97023--- a/mm/migrate.c
97024+++ b/mm/migrate.c
97025@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
97026 */
97027 tcred = __task_cred(task);
97028 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
97029- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
97030- !capable(CAP_SYS_NICE)) {
97031+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
97032 rcu_read_unlock();
97033 err = -EPERM;
97034 goto out;
97035diff --git a/mm/mlock.c b/mm/mlock.c
97036index 73cf098..ab547c7 100644
97037--- a/mm/mlock.c
97038+++ b/mm/mlock.c
97039@@ -14,6 +14,7 @@
97040 #include <linux/pagevec.h>
97041 #include <linux/mempolicy.h>
97042 #include <linux/syscalls.h>
97043+#include <linux/security.h>
97044 #include <linux/sched.h>
97045 #include <linux/export.h>
97046 #include <linux/rmap.h>
97047@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
97048 {
97049 unsigned long nstart, end, tmp;
97050 struct vm_area_struct * vma, * prev;
97051- int error;
97052+ int error = 0;
97053
97054 VM_BUG_ON(start & ~PAGE_MASK);
97055 VM_BUG_ON(len != PAGE_ALIGN(len));
97056@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
97057 return -EINVAL;
97058 if (end == start)
97059 return 0;
97060+ if (end > TASK_SIZE)
97061+ return -EINVAL;
97062+
97063 vma = find_vma(current->mm, start);
97064 if (!vma || vma->vm_start > start)
97065 return -ENOMEM;
97066@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
97067 for (nstart = start ; ; ) {
97068 vm_flags_t newflags;
97069
97070+#ifdef CONFIG_PAX_SEGMEXEC
97071+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97072+ break;
97073+#endif
97074+
97075 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
97076
97077 newflags = vma->vm_flags & ~VM_LOCKED;
97078@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
97079 locked += current->mm->locked_vm;
97080
97081 /* check against resource limits */
97082+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
97083 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
97084 error = do_mlock(start, len, 1);
97085
97086@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
97087 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
97088 vm_flags_t newflags;
97089
97090+#ifdef CONFIG_PAX_SEGMEXEC
97091+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
97092+ break;
97093+#endif
97094+
97095 newflags = vma->vm_flags & ~VM_LOCKED;
97096 if (flags & MCL_CURRENT)
97097 newflags |= VM_LOCKED;
97098@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
97099 lock_limit >>= PAGE_SHIFT;
97100
97101 ret = -ENOMEM;
97102+
97103+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
97104+
97105 down_write(&current->mm->mmap_sem);
97106-
97107 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
97108 capable(CAP_IPC_LOCK))
97109 ret = do_mlockall(flags);
97110diff --git a/mm/mmap.c b/mm/mmap.c
97111index 0bc66f1..2bfa432 100644
97112--- a/mm/mmap.c
97113+++ b/mm/mmap.c
97114@@ -41,6 +41,7 @@
97115 #include <linux/notifier.h>
97116 #include <linux/memory.h>
97117 #include <linux/printk.h>
97118+#include <linux/random.h>
97119
97120 #include <asm/uaccess.h>
97121 #include <asm/cacheflush.h>
97122@@ -57,6 +58,16 @@
97123 #define arch_rebalance_pgtables(addr, len) (addr)
97124 #endif
97125
97126+static inline void verify_mm_writelocked(struct mm_struct *mm)
97127+{
97128+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
97129+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97130+ up_read(&mm->mmap_sem);
97131+ BUG();
97132+ }
97133+#endif
97134+}
97135+
97136 static void unmap_region(struct mm_struct *mm,
97137 struct vm_area_struct *vma, struct vm_area_struct *prev,
97138 unsigned long start, unsigned long end);
97139@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
97140 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
97141 *
97142 */
97143-pgprot_t protection_map[16] = {
97144+pgprot_t protection_map[16] __read_only = {
97145 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
97146 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
97147 };
97148
97149-pgprot_t vm_get_page_prot(unsigned long vm_flags)
97150+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
97151 {
97152- return __pgprot(pgprot_val(protection_map[vm_flags &
97153+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
97154 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
97155 pgprot_val(arch_vm_get_page_prot(vm_flags)));
97156+
97157+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97158+ if (!(__supported_pte_mask & _PAGE_NX) &&
97159+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97160+ (vm_flags & (VM_READ | VM_WRITE)))
97161+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97162+#endif
97163+
97164+ return prot;
97165 }
97166 EXPORT_SYMBOL(vm_get_page_prot);
97167
97168@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97169 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97170 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97171 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97172+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97173 /*
97174 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97175 * other variables. It can be updated by several CPUs frequently.
97176@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97177 struct vm_area_struct *next = vma->vm_next;
97178
97179 might_sleep();
97180+ BUG_ON(vma->vm_mirror);
97181 if (vma->vm_ops && vma->vm_ops->close)
97182 vma->vm_ops->close(vma);
97183 if (vma->vm_file)
97184@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
97185
97186 SYSCALL_DEFINE1(brk, unsigned long, brk)
97187 {
97188+ unsigned long rlim;
97189 unsigned long retval;
97190 unsigned long newbrk, oldbrk;
97191 struct mm_struct *mm = current->mm;
97192@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97193 * segment grow beyond its set limit the in case where the limit is
97194 * not page aligned -Ram Gupta
97195 */
97196- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
97197+ rlim = rlimit(RLIMIT_DATA);
97198+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97199+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97200+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97201+ rlim = 4096 * PAGE_SIZE;
97202+#endif
97203+ if (check_data_rlimit(rlim, brk, mm->start_brk,
97204 mm->end_data, mm->start_data))
97205 goto out;
97206
97207@@ -976,6 +1005,12 @@ static int
97208 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97209 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97210 {
97211+
97212+#ifdef CONFIG_PAX_SEGMEXEC
97213+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97214+ return 0;
97215+#endif
97216+
97217 if (is_mergeable_vma(vma, file, vm_flags) &&
97218 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97219 if (vma->vm_pgoff == vm_pgoff)
97220@@ -995,6 +1030,12 @@ static int
97221 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97222 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97223 {
97224+
97225+#ifdef CONFIG_PAX_SEGMEXEC
97226+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97227+ return 0;
97228+#endif
97229+
97230 if (is_mergeable_vma(vma, file, vm_flags) &&
97231 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97232 pgoff_t vm_pglen;
97233@@ -1044,6 +1085,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97234 struct vm_area_struct *area, *next;
97235 int err;
97236
97237+#ifdef CONFIG_PAX_SEGMEXEC
97238+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97239+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97240+
97241+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97242+#endif
97243+
97244 /*
97245 * We later require that vma->vm_flags == vm_flags,
97246 * so this tests vma->vm_flags & VM_SPECIAL, too.
97247@@ -1059,6 +1107,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97248 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97249 next = next->vm_next;
97250
97251+#ifdef CONFIG_PAX_SEGMEXEC
97252+ if (prev)
97253+ prev_m = pax_find_mirror_vma(prev);
97254+ if (area)
97255+ area_m = pax_find_mirror_vma(area);
97256+ if (next)
97257+ next_m = pax_find_mirror_vma(next);
97258+#endif
97259+
97260 /*
97261 * Can it merge with the predecessor?
97262 */
97263@@ -1078,9 +1135,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97264 /* cases 1, 6 */
97265 err = vma_adjust(prev, prev->vm_start,
97266 next->vm_end, prev->vm_pgoff, NULL);
97267- } else /* cases 2, 5, 7 */
97268+
97269+#ifdef CONFIG_PAX_SEGMEXEC
97270+ if (!err && prev_m)
97271+ err = vma_adjust(prev_m, prev_m->vm_start,
97272+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97273+#endif
97274+
97275+ } else { /* cases 2, 5, 7 */
97276 err = vma_adjust(prev, prev->vm_start,
97277 end, prev->vm_pgoff, NULL);
97278+
97279+#ifdef CONFIG_PAX_SEGMEXEC
97280+ if (!err && prev_m)
97281+ err = vma_adjust(prev_m, prev_m->vm_start,
97282+ end_m, prev_m->vm_pgoff, NULL);
97283+#endif
97284+
97285+ }
97286 if (err)
97287 return NULL;
97288 khugepaged_enter_vma_merge(prev, vm_flags);
97289@@ -1094,12 +1166,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97290 mpol_equal(policy, vma_policy(next)) &&
97291 can_vma_merge_before(next, vm_flags,
97292 anon_vma, file, pgoff+pglen)) {
97293- if (prev && addr < prev->vm_end) /* case 4 */
97294+ if (prev && addr < prev->vm_end) { /* case 4 */
97295 err = vma_adjust(prev, prev->vm_start,
97296 addr, prev->vm_pgoff, NULL);
97297- else /* cases 3, 8 */
97298+
97299+#ifdef CONFIG_PAX_SEGMEXEC
97300+ if (!err && prev_m)
97301+ err = vma_adjust(prev_m, prev_m->vm_start,
97302+ addr_m, prev_m->vm_pgoff, NULL);
97303+#endif
97304+
97305+ } else { /* cases 3, 8 */
97306 err = vma_adjust(area, addr, next->vm_end,
97307 next->vm_pgoff - pglen, NULL);
97308+
97309+#ifdef CONFIG_PAX_SEGMEXEC
97310+ if (!err && area_m)
97311+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97312+ next_m->vm_pgoff - pglen, NULL);
97313+#endif
97314+
97315+ }
97316 if (err)
97317 return NULL;
97318 khugepaged_enter_vma_merge(area, vm_flags);
97319@@ -1208,8 +1295,10 @@ none:
97320 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97321 struct file *file, long pages)
97322 {
97323- const unsigned long stack_flags
97324- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97325+
97326+#ifdef CONFIG_PAX_RANDMMAP
97327+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97328+#endif
97329
97330 mm->total_vm += pages;
97331
97332@@ -1217,7 +1306,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97333 mm->shared_vm += pages;
97334 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97335 mm->exec_vm += pages;
97336- } else if (flags & stack_flags)
97337+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97338 mm->stack_vm += pages;
97339 }
97340 #endif /* CONFIG_PROC_FS */
97341@@ -1247,6 +1336,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97342 locked += mm->locked_vm;
97343 lock_limit = rlimit(RLIMIT_MEMLOCK);
97344 lock_limit >>= PAGE_SHIFT;
97345+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97346 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97347 return -EAGAIN;
97348 }
97349@@ -1273,7 +1363,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97350 * (the exception is when the underlying filesystem is noexec
97351 * mounted, in which case we dont add PROT_EXEC.)
97352 */
97353- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97354+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97355 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97356 prot |= PROT_EXEC;
97357
97358@@ -1299,7 +1389,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97359 /* Obtain the address to map to. we verify (or select) it and ensure
97360 * that it represents a valid section of the address space.
97361 */
97362- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97363+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97364 if (addr & ~PAGE_MASK)
97365 return addr;
97366
97367@@ -1310,6 +1400,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97368 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97369 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97370
97371+#ifdef CONFIG_PAX_MPROTECT
97372+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97373+
97374+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97375+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97376+ mm->binfmt->handle_mmap)
97377+ mm->binfmt->handle_mmap(file);
97378+#endif
97379+
97380+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97381+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97382+ gr_log_rwxmmap(file);
97383+
97384+#ifdef CONFIG_PAX_EMUPLT
97385+ vm_flags &= ~VM_EXEC;
97386+#else
97387+ return -EPERM;
97388+#endif
97389+
97390+ }
97391+
97392+ if (!(vm_flags & VM_EXEC))
97393+ vm_flags &= ~VM_MAYEXEC;
97394+#else
97395+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97396+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97397+#endif
97398+ else
97399+ vm_flags &= ~VM_MAYWRITE;
97400+ }
97401+#endif
97402+
97403+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97404+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97405+ vm_flags &= ~VM_PAGEEXEC;
97406+#endif
97407+
97408 if (flags & MAP_LOCKED)
97409 if (!can_do_mlock())
97410 return -EPERM;
97411@@ -1397,6 +1524,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97412 vm_flags |= VM_NORESERVE;
97413 }
97414
97415+ if (!gr_acl_handle_mmap(file, prot))
97416+ return -EACCES;
97417+
97418 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97419 if (!IS_ERR_VALUE(addr) &&
97420 ((vm_flags & VM_LOCKED) ||
97421@@ -1490,7 +1620,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97422 vm_flags_t vm_flags = vma->vm_flags;
97423
97424 /* If it was private or non-writable, the write bit is already clear */
97425- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97426+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97427 return 0;
97428
97429 /* The backer wishes to know when pages are first written to? */
97430@@ -1541,7 +1671,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97431 struct rb_node **rb_link, *rb_parent;
97432 unsigned long charged = 0;
97433
97434+#ifdef CONFIG_PAX_SEGMEXEC
97435+ struct vm_area_struct *vma_m = NULL;
97436+#endif
97437+
97438+ /*
97439+ * mm->mmap_sem is required to protect against another thread
97440+ * changing the mappings in case we sleep.
97441+ */
97442+ verify_mm_writelocked(mm);
97443+
97444 /* Check against address space limit. */
97445+
97446+#ifdef CONFIG_PAX_RANDMMAP
97447+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97448+#endif
97449+
97450 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97451 unsigned long nr_pages;
97452
97453@@ -1560,11 +1705,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97454
97455 /* Clear old maps */
97456 error = -ENOMEM;
97457-munmap_back:
97458 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97459 if (do_munmap(mm, addr, len))
97460 return -ENOMEM;
97461- goto munmap_back;
97462+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97463 }
97464
97465 /*
97466@@ -1595,6 +1739,16 @@ munmap_back:
97467 goto unacct_error;
97468 }
97469
97470+#ifdef CONFIG_PAX_SEGMEXEC
97471+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97472+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97473+ if (!vma_m) {
97474+ error = -ENOMEM;
97475+ goto free_vma;
97476+ }
97477+ }
97478+#endif
97479+
97480 vma->vm_mm = mm;
97481 vma->vm_start = addr;
97482 vma->vm_end = addr + len;
97483@@ -1625,6 +1779,13 @@ munmap_back:
97484 if (error)
97485 goto unmap_and_free_vma;
97486
97487+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97488+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97489+ vma->vm_flags |= VM_PAGEEXEC;
97490+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97491+ }
97492+#endif
97493+
97494 /* Can addr have changed??
97495 *
97496 * Answer: Yes, several device drivers can do it in their
97497@@ -1643,6 +1804,12 @@ munmap_back:
97498 }
97499
97500 vma_link(mm, vma, prev, rb_link, rb_parent);
97501+
97502+#ifdef CONFIG_PAX_SEGMEXEC
97503+ if (vma_m)
97504+ BUG_ON(pax_mirror_vma(vma_m, vma));
97505+#endif
97506+
97507 /* Once vma denies write, undo our temporary denial count */
97508 if (file) {
97509 if (vm_flags & VM_SHARED)
97510@@ -1655,6 +1822,7 @@ out:
97511 perf_event_mmap(vma);
97512
97513 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97514+ track_exec_limit(mm, addr, addr + len, vm_flags);
97515 if (vm_flags & VM_LOCKED) {
97516 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97517 vma == get_gate_vma(current->mm)))
97518@@ -1692,6 +1860,12 @@ allow_write_and_free_vma:
97519 if (vm_flags & VM_DENYWRITE)
97520 allow_write_access(file);
97521 free_vma:
97522+
97523+#ifdef CONFIG_PAX_SEGMEXEC
97524+ if (vma_m)
97525+ kmem_cache_free(vm_area_cachep, vma_m);
97526+#endif
97527+
97528 kmem_cache_free(vm_area_cachep, vma);
97529 unacct_error:
97530 if (charged)
97531@@ -1699,7 +1873,63 @@ unacct_error:
97532 return error;
97533 }
97534
97535-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97536+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97537+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97538+{
97539+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97540+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97541+
97542+ return 0;
97543+}
97544+#endif
97545+
97546+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97547+{
97548+ if (!vma) {
97549+#ifdef CONFIG_STACK_GROWSUP
97550+ if (addr > sysctl_heap_stack_gap)
97551+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97552+ else
97553+ vma = find_vma(current->mm, 0);
97554+ if (vma && (vma->vm_flags & VM_GROWSUP))
97555+ return false;
97556+#endif
97557+ return true;
97558+ }
97559+
97560+ if (addr + len > vma->vm_start)
97561+ return false;
97562+
97563+ if (vma->vm_flags & VM_GROWSDOWN)
97564+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97565+#ifdef CONFIG_STACK_GROWSUP
97566+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97567+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97568+#endif
97569+ else if (offset)
97570+ return offset <= vma->vm_start - addr - len;
97571+
97572+ return true;
97573+}
97574+
97575+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97576+{
97577+ if (vma->vm_start < len)
97578+ return -ENOMEM;
97579+
97580+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97581+ if (offset <= vma->vm_start - len)
97582+ return vma->vm_start - len - offset;
97583+ else
97584+ return -ENOMEM;
97585+ }
97586+
97587+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97588+ return vma->vm_start - len - sysctl_heap_stack_gap;
97589+ return -ENOMEM;
97590+}
97591+
97592+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97593 {
97594 /*
97595 * We implement the search by looking for an rbtree node that
97596@@ -1747,11 +1977,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97597 }
97598 }
97599
97600- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97601+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97602 check_current:
97603 /* Check if current node has a suitable gap */
97604 if (gap_start > high_limit)
97605 return -ENOMEM;
97606+
97607+ if (gap_end - gap_start > info->threadstack_offset)
97608+ gap_start += info->threadstack_offset;
97609+ else
97610+ gap_start = gap_end;
97611+
97612+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97613+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97614+ gap_start += sysctl_heap_stack_gap;
97615+ else
97616+ gap_start = gap_end;
97617+ }
97618+ if (vma->vm_flags & VM_GROWSDOWN) {
97619+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97620+ gap_end -= sysctl_heap_stack_gap;
97621+ else
97622+ gap_end = gap_start;
97623+ }
97624 if (gap_end >= low_limit && gap_end - gap_start >= length)
97625 goto found;
97626
97627@@ -1801,7 +2049,7 @@ found:
97628 return gap_start;
97629 }
97630
97631-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97632+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97633 {
97634 struct mm_struct *mm = current->mm;
97635 struct vm_area_struct *vma;
97636@@ -1855,6 +2103,24 @@ check_current:
97637 gap_end = vma->vm_start;
97638 if (gap_end < low_limit)
97639 return -ENOMEM;
97640+
97641+ if (gap_end - gap_start > info->threadstack_offset)
97642+ gap_end -= info->threadstack_offset;
97643+ else
97644+ gap_end = gap_start;
97645+
97646+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97647+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97648+ gap_start += sysctl_heap_stack_gap;
97649+ else
97650+ gap_start = gap_end;
97651+ }
97652+ if (vma->vm_flags & VM_GROWSDOWN) {
97653+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97654+ gap_end -= sysctl_heap_stack_gap;
97655+ else
97656+ gap_end = gap_start;
97657+ }
97658 if (gap_start <= high_limit && gap_end - gap_start >= length)
97659 goto found;
97660
97661@@ -1918,6 +2184,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97662 struct mm_struct *mm = current->mm;
97663 struct vm_area_struct *vma;
97664 struct vm_unmapped_area_info info;
97665+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97666
97667 if (len > TASK_SIZE - mmap_min_addr)
97668 return -ENOMEM;
97669@@ -1925,11 +2192,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97670 if (flags & MAP_FIXED)
97671 return addr;
97672
97673+#ifdef CONFIG_PAX_RANDMMAP
97674+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97675+#endif
97676+
97677 if (addr) {
97678 addr = PAGE_ALIGN(addr);
97679 vma = find_vma(mm, addr);
97680 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97681- (!vma || addr + len <= vma->vm_start))
97682+ check_heap_stack_gap(vma, addr, len, offset))
97683 return addr;
97684 }
97685
97686@@ -1938,6 +2209,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97687 info.low_limit = mm->mmap_base;
97688 info.high_limit = TASK_SIZE;
97689 info.align_mask = 0;
97690+ info.threadstack_offset = offset;
97691 return vm_unmapped_area(&info);
97692 }
97693 #endif
97694@@ -1956,6 +2228,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97695 struct mm_struct *mm = current->mm;
97696 unsigned long addr = addr0;
97697 struct vm_unmapped_area_info info;
97698+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97699
97700 /* requested length too big for entire address space */
97701 if (len > TASK_SIZE - mmap_min_addr)
97702@@ -1964,12 +2237,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97703 if (flags & MAP_FIXED)
97704 return addr;
97705
97706+#ifdef CONFIG_PAX_RANDMMAP
97707+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97708+#endif
97709+
97710 /* requesting a specific address */
97711 if (addr) {
97712 addr = PAGE_ALIGN(addr);
97713 vma = find_vma(mm, addr);
97714 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97715- (!vma || addr + len <= vma->vm_start))
97716+ check_heap_stack_gap(vma, addr, len, offset))
97717 return addr;
97718 }
97719
97720@@ -1978,6 +2255,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97721 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97722 info.high_limit = mm->mmap_base;
97723 info.align_mask = 0;
97724+ info.threadstack_offset = offset;
97725 addr = vm_unmapped_area(&info);
97726
97727 /*
97728@@ -1990,6 +2268,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97729 VM_BUG_ON(addr != -ENOMEM);
97730 info.flags = 0;
97731 info.low_limit = TASK_UNMAPPED_BASE;
97732+
97733+#ifdef CONFIG_PAX_RANDMMAP
97734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97735+ info.low_limit += mm->delta_mmap;
97736+#endif
97737+
97738 info.high_limit = TASK_SIZE;
97739 addr = vm_unmapped_area(&info);
97740 }
97741@@ -2090,6 +2374,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97742 return vma;
97743 }
97744
97745+#ifdef CONFIG_PAX_SEGMEXEC
97746+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97747+{
97748+ struct vm_area_struct *vma_m;
97749+
97750+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97751+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97752+ BUG_ON(vma->vm_mirror);
97753+ return NULL;
97754+ }
97755+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97756+ vma_m = vma->vm_mirror;
97757+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97758+ BUG_ON(vma->vm_file != vma_m->vm_file);
97759+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97760+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97761+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97762+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97763+ return vma_m;
97764+}
97765+#endif
97766+
97767 /*
97768 * Verify that the stack growth is acceptable and
97769 * update accounting. This is shared with both the
97770@@ -2107,8 +2413,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97771
97772 /* Stack limit test */
97773 actual_size = size;
97774- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97775- actual_size -= PAGE_SIZE;
97776+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97777 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97778 return -ENOMEM;
97779
97780@@ -2119,6 +2424,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97781 locked = mm->locked_vm + grow;
97782 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97783 limit >>= PAGE_SHIFT;
97784+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97785 if (locked > limit && !capable(CAP_IPC_LOCK))
97786 return -ENOMEM;
97787 }
97788@@ -2148,37 +2454,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97789 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97790 * vma is the last one with address > vma->vm_end. Have to extend vma.
97791 */
97792+#ifndef CONFIG_IA64
97793+static
97794+#endif
97795 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97796 {
97797 int error;
97798+ bool locknext;
97799
97800 if (!(vma->vm_flags & VM_GROWSUP))
97801 return -EFAULT;
97802
97803+ /* Also guard against wrapping around to address 0. */
97804+ if (address < PAGE_ALIGN(address+1))
97805+ address = PAGE_ALIGN(address+1);
97806+ else
97807+ return -ENOMEM;
97808+
97809 /*
97810 * We must make sure the anon_vma is allocated
97811 * so that the anon_vma locking is not a noop.
97812 */
97813 if (unlikely(anon_vma_prepare(vma)))
97814 return -ENOMEM;
97815+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97816+ if (locknext && anon_vma_prepare(vma->vm_next))
97817+ return -ENOMEM;
97818 vma_lock_anon_vma(vma);
97819+ if (locknext)
97820+ vma_lock_anon_vma(vma->vm_next);
97821
97822 /*
97823 * vma->vm_start/vm_end cannot change under us because the caller
97824 * is required to hold the mmap_sem in read mode. We need the
97825- * anon_vma lock to serialize against concurrent expand_stacks.
97826- * Also guard against wrapping around to address 0.
97827+ * anon_vma locks to serialize against concurrent expand_stacks
97828+ * and expand_upwards.
97829 */
97830- if (address < PAGE_ALIGN(address+4))
97831- address = PAGE_ALIGN(address+4);
97832- else {
97833- vma_unlock_anon_vma(vma);
97834- return -ENOMEM;
97835- }
97836 error = 0;
97837
97838 /* Somebody else might have raced and expanded it already */
97839- if (address > vma->vm_end) {
97840+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97841+ error = -ENOMEM;
97842+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97843 unsigned long size, grow;
97844
97845 size = address - vma->vm_start;
97846@@ -2213,6 +2530,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97847 }
97848 }
97849 }
97850+ if (locknext)
97851+ vma_unlock_anon_vma(vma->vm_next);
97852 vma_unlock_anon_vma(vma);
97853 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97854 validate_mm(vma->vm_mm);
97855@@ -2227,6 +2546,8 @@ int expand_downwards(struct vm_area_struct *vma,
97856 unsigned long address)
97857 {
97858 int error;
97859+ bool lockprev = false;
97860+ struct vm_area_struct *prev;
97861
97862 /*
97863 * We must make sure the anon_vma is allocated
97864@@ -2240,6 +2561,15 @@ int expand_downwards(struct vm_area_struct *vma,
97865 if (error)
97866 return error;
97867
97868+ prev = vma->vm_prev;
97869+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97870+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97871+#endif
97872+ if (lockprev && anon_vma_prepare(prev))
97873+ return -ENOMEM;
97874+ if (lockprev)
97875+ vma_lock_anon_vma(prev);
97876+
97877 vma_lock_anon_vma(vma);
97878
97879 /*
97880@@ -2249,9 +2579,17 @@ int expand_downwards(struct vm_area_struct *vma,
97881 */
97882
97883 /* Somebody else might have raced and expanded it already */
97884- if (address < vma->vm_start) {
97885+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97886+ error = -ENOMEM;
97887+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97888 unsigned long size, grow;
97889
97890+#ifdef CONFIG_PAX_SEGMEXEC
97891+ struct vm_area_struct *vma_m;
97892+
97893+ vma_m = pax_find_mirror_vma(vma);
97894+#endif
97895+
97896 size = vma->vm_end - address;
97897 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97898
97899@@ -2276,13 +2614,27 @@ int expand_downwards(struct vm_area_struct *vma,
97900 vma->vm_pgoff -= grow;
97901 anon_vma_interval_tree_post_update_vma(vma);
97902 vma_gap_update(vma);
97903+
97904+#ifdef CONFIG_PAX_SEGMEXEC
97905+ if (vma_m) {
97906+ anon_vma_interval_tree_pre_update_vma(vma_m);
97907+ vma_m->vm_start -= grow << PAGE_SHIFT;
97908+ vma_m->vm_pgoff -= grow;
97909+ anon_vma_interval_tree_post_update_vma(vma_m);
97910+ vma_gap_update(vma_m);
97911+ }
97912+#endif
97913+
97914 spin_unlock(&vma->vm_mm->page_table_lock);
97915
97916+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97917 perf_event_mmap(vma);
97918 }
97919 }
97920 }
97921 vma_unlock_anon_vma(vma);
97922+ if (lockprev)
97923+ vma_unlock_anon_vma(prev);
97924 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97925 validate_mm(vma->vm_mm);
97926 return error;
97927@@ -2382,6 +2734,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97928 do {
97929 long nrpages = vma_pages(vma);
97930
97931+#ifdef CONFIG_PAX_SEGMEXEC
97932+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97933+ vma = remove_vma(vma);
97934+ continue;
97935+ }
97936+#endif
97937+
97938 if (vma->vm_flags & VM_ACCOUNT)
97939 nr_accounted += nrpages;
97940 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97941@@ -2426,6 +2785,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97942 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97943 vma->vm_prev = NULL;
97944 do {
97945+
97946+#ifdef CONFIG_PAX_SEGMEXEC
97947+ if (vma->vm_mirror) {
97948+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97949+ vma->vm_mirror->vm_mirror = NULL;
97950+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97951+ vma->vm_mirror = NULL;
97952+ }
97953+#endif
97954+
97955 vma_rb_erase(vma, &mm->mm_rb);
97956 mm->map_count--;
97957 tail_vma = vma;
97958@@ -2453,14 +2822,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97959 struct vm_area_struct *new;
97960 int err = -ENOMEM;
97961
97962+#ifdef CONFIG_PAX_SEGMEXEC
97963+ struct vm_area_struct *vma_m, *new_m = NULL;
97964+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97965+#endif
97966+
97967 if (is_vm_hugetlb_page(vma) && (addr &
97968 ~(huge_page_mask(hstate_vma(vma)))))
97969 return -EINVAL;
97970
97971+#ifdef CONFIG_PAX_SEGMEXEC
97972+ vma_m = pax_find_mirror_vma(vma);
97973+#endif
97974+
97975 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97976 if (!new)
97977 goto out_err;
97978
97979+#ifdef CONFIG_PAX_SEGMEXEC
97980+ if (vma_m) {
97981+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97982+ if (!new_m) {
97983+ kmem_cache_free(vm_area_cachep, new);
97984+ goto out_err;
97985+ }
97986+ }
97987+#endif
97988+
97989 /* most fields are the same, copy all, and then fixup */
97990 *new = *vma;
97991
97992@@ -2473,6 +2861,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97993 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97994 }
97995
97996+#ifdef CONFIG_PAX_SEGMEXEC
97997+ if (vma_m) {
97998+ *new_m = *vma_m;
97999+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
98000+ new_m->vm_mirror = new;
98001+ new->vm_mirror = new_m;
98002+
98003+ if (new_below)
98004+ new_m->vm_end = addr_m;
98005+ else {
98006+ new_m->vm_start = addr_m;
98007+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
98008+ }
98009+ }
98010+#endif
98011+
98012 err = vma_dup_policy(vma, new);
98013 if (err)
98014 goto out_free_vma;
98015@@ -2493,6 +2897,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98016 else
98017 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
98018
98019+#ifdef CONFIG_PAX_SEGMEXEC
98020+ if (!err && vma_m) {
98021+ struct mempolicy *pol = vma_policy(new);
98022+
98023+ if (anon_vma_clone(new_m, vma_m))
98024+ goto out_free_mpol;
98025+
98026+ mpol_get(pol);
98027+ set_vma_policy(new_m, pol);
98028+
98029+ if (new_m->vm_file)
98030+ get_file(new_m->vm_file);
98031+
98032+ if (new_m->vm_ops && new_m->vm_ops->open)
98033+ new_m->vm_ops->open(new_m);
98034+
98035+ if (new_below)
98036+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
98037+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
98038+ else
98039+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
98040+
98041+ if (err) {
98042+ if (new_m->vm_ops && new_m->vm_ops->close)
98043+ new_m->vm_ops->close(new_m);
98044+ if (new_m->vm_file)
98045+ fput(new_m->vm_file);
98046+ mpol_put(pol);
98047+ }
98048+ }
98049+#endif
98050+
98051 /* Success. */
98052 if (!err)
98053 return 0;
98054@@ -2502,10 +2938,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98055 new->vm_ops->close(new);
98056 if (new->vm_file)
98057 fput(new->vm_file);
98058- unlink_anon_vmas(new);
98059 out_free_mpol:
98060 mpol_put(vma_policy(new));
98061 out_free_vma:
98062+
98063+#ifdef CONFIG_PAX_SEGMEXEC
98064+ if (new_m) {
98065+ unlink_anon_vmas(new_m);
98066+ kmem_cache_free(vm_area_cachep, new_m);
98067+ }
98068+#endif
98069+
98070+ unlink_anon_vmas(new);
98071 kmem_cache_free(vm_area_cachep, new);
98072 out_err:
98073 return err;
98074@@ -2518,6 +2962,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98075 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98076 unsigned long addr, int new_below)
98077 {
98078+
98079+#ifdef CONFIG_PAX_SEGMEXEC
98080+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
98081+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
98082+ if (mm->map_count >= sysctl_max_map_count-1)
98083+ return -ENOMEM;
98084+ } else
98085+#endif
98086+
98087 if (mm->map_count >= sysctl_max_map_count)
98088 return -ENOMEM;
98089
98090@@ -2529,11 +2982,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98091 * work. This now handles partial unmappings.
98092 * Jeremy Fitzhardinge <jeremy@goop.org>
98093 */
98094+#ifdef CONFIG_PAX_SEGMEXEC
98095 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98096 {
98097+ int ret = __do_munmap(mm, start, len);
98098+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
98099+ return ret;
98100+
98101+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
98102+}
98103+
98104+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98105+#else
98106+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98107+#endif
98108+{
98109 unsigned long end;
98110 struct vm_area_struct *vma, *prev, *last;
98111
98112+ /*
98113+ * mm->mmap_sem is required to protect against another thread
98114+ * changing the mappings in case we sleep.
98115+ */
98116+ verify_mm_writelocked(mm);
98117+
98118 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
98119 return -EINVAL;
98120
98121@@ -2611,6 +3083,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
98122 /* Fix up all other VM information */
98123 remove_vma_list(mm, vma);
98124
98125+ track_exec_limit(mm, start, end, 0UL);
98126+
98127 return 0;
98128 }
98129
98130@@ -2619,6 +3093,13 @@ int vm_munmap(unsigned long start, size_t len)
98131 int ret;
98132 struct mm_struct *mm = current->mm;
98133
98134+
98135+#ifdef CONFIG_PAX_SEGMEXEC
98136+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
98137+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
98138+ return -EINVAL;
98139+#endif
98140+
98141 down_write(&mm->mmap_sem);
98142 ret = do_munmap(mm, start, len);
98143 up_write(&mm->mmap_sem);
98144@@ -2632,16 +3113,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
98145 return vm_munmap(addr, len);
98146 }
98147
98148-static inline void verify_mm_writelocked(struct mm_struct *mm)
98149-{
98150-#ifdef CONFIG_DEBUG_VM
98151- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
98152- WARN_ON(1);
98153- up_read(&mm->mmap_sem);
98154- }
98155-#endif
98156-}
98157-
98158 /*
98159 * this is really a simplified "do_mmap". it only handles
98160 * anonymous maps. eventually we may be able to do some
98161@@ -2655,6 +3126,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98162 struct rb_node **rb_link, *rb_parent;
98163 pgoff_t pgoff = addr >> PAGE_SHIFT;
98164 int error;
98165+ unsigned long charged;
98166
98167 len = PAGE_ALIGN(len);
98168 if (!len)
98169@@ -2662,10 +3134,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98170
98171 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98172
98173+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98174+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98175+ flags &= ~VM_EXEC;
98176+
98177+#ifdef CONFIG_PAX_MPROTECT
98178+ if (mm->pax_flags & MF_PAX_MPROTECT)
98179+ flags &= ~VM_MAYEXEC;
98180+#endif
98181+
98182+ }
98183+#endif
98184+
98185 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98186 if (error & ~PAGE_MASK)
98187 return error;
98188
98189+ charged = len >> PAGE_SHIFT;
98190+
98191 error = mlock_future_check(mm, mm->def_flags, len);
98192 if (error)
98193 return error;
98194@@ -2679,21 +3165,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98195 /*
98196 * Clear old maps. this also does some error checking for us
98197 */
98198- munmap_back:
98199 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98200 if (do_munmap(mm, addr, len))
98201 return -ENOMEM;
98202- goto munmap_back;
98203+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98204 }
98205
98206 /* Check against address space limits *after* clearing old maps... */
98207- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98208+ if (!may_expand_vm(mm, charged))
98209 return -ENOMEM;
98210
98211 if (mm->map_count > sysctl_max_map_count)
98212 return -ENOMEM;
98213
98214- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98215+ if (security_vm_enough_memory_mm(mm, charged))
98216 return -ENOMEM;
98217
98218 /* Can we just expand an old private anonymous mapping? */
98219@@ -2707,7 +3192,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98220 */
98221 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98222 if (!vma) {
98223- vm_unacct_memory(len >> PAGE_SHIFT);
98224+ vm_unacct_memory(charged);
98225 return -ENOMEM;
98226 }
98227
98228@@ -2721,10 +3206,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98229 vma_link(mm, vma, prev, rb_link, rb_parent);
98230 out:
98231 perf_event_mmap(vma);
98232- mm->total_vm += len >> PAGE_SHIFT;
98233+ mm->total_vm += charged;
98234 if (flags & VM_LOCKED)
98235- mm->locked_vm += (len >> PAGE_SHIFT);
98236+ mm->locked_vm += charged;
98237 vma->vm_flags |= VM_SOFTDIRTY;
98238+ track_exec_limit(mm, addr, addr + len, flags);
98239 return addr;
98240 }
98241
98242@@ -2786,6 +3272,7 @@ void exit_mmap(struct mm_struct *mm)
98243 while (vma) {
98244 if (vma->vm_flags & VM_ACCOUNT)
98245 nr_accounted += vma_pages(vma);
98246+ vma->vm_mirror = NULL;
98247 vma = remove_vma(vma);
98248 }
98249 vm_unacct_memory(nr_accounted);
98250@@ -2803,6 +3290,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98251 struct vm_area_struct *prev;
98252 struct rb_node **rb_link, *rb_parent;
98253
98254+#ifdef CONFIG_PAX_SEGMEXEC
98255+ struct vm_area_struct *vma_m = NULL;
98256+#endif
98257+
98258+ if (security_mmap_addr(vma->vm_start))
98259+ return -EPERM;
98260+
98261 /*
98262 * The vm_pgoff of a purely anonymous vma should be irrelevant
98263 * until its first write fault, when page's anon_vma and index
98264@@ -2826,7 +3320,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98265 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98266 return -ENOMEM;
98267
98268+#ifdef CONFIG_PAX_SEGMEXEC
98269+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98270+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98271+ if (!vma_m)
98272+ return -ENOMEM;
98273+ }
98274+#endif
98275+
98276 vma_link(mm, vma, prev, rb_link, rb_parent);
98277+
98278+#ifdef CONFIG_PAX_SEGMEXEC
98279+ if (vma_m)
98280+ BUG_ON(pax_mirror_vma(vma_m, vma));
98281+#endif
98282+
98283 return 0;
98284 }
98285
98286@@ -2845,6 +3353,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98287 struct rb_node **rb_link, *rb_parent;
98288 bool faulted_in_anon_vma = true;
98289
98290+ BUG_ON(vma->vm_mirror);
98291+
98292 /*
98293 * If anonymous vma has not yet been faulted, update new pgoff
98294 * to match new location, to increase its chance of merging.
98295@@ -2909,6 +3419,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98296 return NULL;
98297 }
98298
98299+#ifdef CONFIG_PAX_SEGMEXEC
98300+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98301+{
98302+ struct vm_area_struct *prev_m;
98303+ struct rb_node **rb_link_m, *rb_parent_m;
98304+ struct mempolicy *pol_m;
98305+
98306+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98307+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98308+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98309+ *vma_m = *vma;
98310+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98311+ if (anon_vma_clone(vma_m, vma))
98312+ return -ENOMEM;
98313+ pol_m = vma_policy(vma_m);
98314+ mpol_get(pol_m);
98315+ set_vma_policy(vma_m, pol_m);
98316+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98317+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98318+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98319+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98320+ if (vma_m->vm_file)
98321+ get_file(vma_m->vm_file);
98322+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98323+ vma_m->vm_ops->open(vma_m);
98324+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98325+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98326+ vma_m->vm_mirror = vma;
98327+ vma->vm_mirror = vma_m;
98328+ return 0;
98329+}
98330+#endif
98331+
98332 /*
98333 * Return true if the calling process may expand its vm space by the passed
98334 * number of pages
98335@@ -2920,6 +3463,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98336
98337 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98338
98339+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98340 if (cur + npages > lim)
98341 return 0;
98342 return 1;
98343@@ -3002,6 +3546,22 @@ static struct vm_area_struct *__install_special_mapping(
98344 vma->vm_start = addr;
98345 vma->vm_end = addr + len;
98346
98347+#ifdef CONFIG_PAX_MPROTECT
98348+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98349+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98350+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98351+ return ERR_PTR(-EPERM);
98352+ if (!(vm_flags & VM_EXEC))
98353+ vm_flags &= ~VM_MAYEXEC;
98354+#else
98355+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98356+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98357+#endif
98358+ else
98359+ vm_flags &= ~VM_MAYWRITE;
98360+ }
98361+#endif
98362+
98363 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98364 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98365
98366diff --git a/mm/mprotect.c b/mm/mprotect.c
98367index ace9345..63320dc 100644
98368--- a/mm/mprotect.c
98369+++ b/mm/mprotect.c
98370@@ -24,10 +24,18 @@
98371 #include <linux/migrate.h>
98372 #include <linux/perf_event.h>
98373 #include <linux/ksm.h>
98374+#include <linux/sched/sysctl.h>
98375+
98376+#ifdef CONFIG_PAX_MPROTECT
98377+#include <linux/elf.h>
98378+#include <linux/binfmts.h>
98379+#endif
98380+
98381 #include <asm/uaccess.h>
98382 #include <asm/pgtable.h>
98383 #include <asm/cacheflush.h>
98384 #include <asm/tlbflush.h>
98385+#include <asm/mmu_context.h>
98386
98387 /*
98388 * For a prot_numa update we only hold mmap_sem for read so there is a
98389@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98390 return pages;
98391 }
98392
98393+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98394+/* called while holding the mmap semaphor for writing except stack expansion */
98395+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98396+{
98397+ unsigned long oldlimit, newlimit = 0UL;
98398+
98399+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98400+ return;
98401+
98402+ spin_lock(&mm->page_table_lock);
98403+ oldlimit = mm->context.user_cs_limit;
98404+ if ((prot & VM_EXEC) && oldlimit < end)
98405+ /* USER_CS limit moved up */
98406+ newlimit = end;
98407+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98408+ /* USER_CS limit moved down */
98409+ newlimit = start;
98410+
98411+ if (newlimit) {
98412+ mm->context.user_cs_limit = newlimit;
98413+
98414+#ifdef CONFIG_SMP
98415+ wmb();
98416+ cpus_clear(mm->context.cpu_user_cs_mask);
98417+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98418+#endif
98419+
98420+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98421+ }
98422+ spin_unlock(&mm->page_table_lock);
98423+ if (newlimit == end) {
98424+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98425+
98426+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98427+ if (is_vm_hugetlb_page(vma))
98428+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98429+ else
98430+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98431+ }
98432+}
98433+#endif
98434+
98435 int
98436 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98437 unsigned long start, unsigned long end, unsigned long newflags)
98438@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98439 int error;
98440 int dirty_accountable = 0;
98441
98442+#ifdef CONFIG_PAX_SEGMEXEC
98443+ struct vm_area_struct *vma_m = NULL;
98444+ unsigned long start_m, end_m;
98445+
98446+ start_m = start + SEGMEXEC_TASK_SIZE;
98447+ end_m = end + SEGMEXEC_TASK_SIZE;
98448+#endif
98449+
98450 if (newflags == oldflags) {
98451 *pprev = vma;
98452 return 0;
98453 }
98454
98455+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98456+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98457+
98458+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98459+ return -ENOMEM;
98460+
98461+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98462+ return -ENOMEM;
98463+ }
98464+
98465 /*
98466 * If we make a private mapping writable we increase our commit;
98467 * but (without finer accounting) cannot reduce our commit if we
98468@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98469 }
98470 }
98471
98472+#ifdef CONFIG_PAX_SEGMEXEC
98473+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98474+ if (start != vma->vm_start) {
98475+ error = split_vma(mm, vma, start, 1);
98476+ if (error)
98477+ goto fail;
98478+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98479+ *pprev = (*pprev)->vm_next;
98480+ }
98481+
98482+ if (end != vma->vm_end) {
98483+ error = split_vma(mm, vma, end, 0);
98484+ if (error)
98485+ goto fail;
98486+ }
98487+
98488+ if (pax_find_mirror_vma(vma)) {
98489+ error = __do_munmap(mm, start_m, end_m - start_m);
98490+ if (error)
98491+ goto fail;
98492+ } else {
98493+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98494+ if (!vma_m) {
98495+ error = -ENOMEM;
98496+ goto fail;
98497+ }
98498+ vma->vm_flags = newflags;
98499+ error = pax_mirror_vma(vma_m, vma);
98500+ if (error) {
98501+ vma->vm_flags = oldflags;
98502+ goto fail;
98503+ }
98504+ }
98505+ }
98506+#endif
98507+
98508 /*
98509 * First try to merge with previous and/or next vma.
98510 */
98511@@ -314,7 +418,19 @@ success:
98512 * vm_flags and vm_page_prot are protected by the mmap_sem
98513 * held in write mode.
98514 */
98515+
98516+#ifdef CONFIG_PAX_SEGMEXEC
98517+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98518+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98519+#endif
98520+
98521 vma->vm_flags = newflags;
98522+
98523+#ifdef CONFIG_PAX_MPROTECT
98524+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98525+ mm->binfmt->handle_mprotect(vma, newflags);
98526+#endif
98527+
98528 dirty_accountable = vma_wants_writenotify(vma);
98529 vma_set_page_prot(vma);
98530
98531@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98532 end = start + len;
98533 if (end <= start)
98534 return -ENOMEM;
98535+
98536+#ifdef CONFIG_PAX_SEGMEXEC
98537+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98538+ if (end > SEGMEXEC_TASK_SIZE)
98539+ return -EINVAL;
98540+ } else
98541+#endif
98542+
98543+ if (end > TASK_SIZE)
98544+ return -EINVAL;
98545+
98546 if (!arch_validate_prot(prot))
98547 return -EINVAL;
98548
98549@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98550 /*
98551 * Does the application expect PROT_READ to imply PROT_EXEC:
98552 */
98553- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98554+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98555 prot |= PROT_EXEC;
98556
98557 vm_flags = calc_vm_prot_bits(prot);
98558@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98559 if (start > vma->vm_start)
98560 prev = vma;
98561
98562+#ifdef CONFIG_PAX_MPROTECT
98563+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98564+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98565+#endif
98566+
98567 for (nstart = start ; ; ) {
98568 unsigned long newflags;
98569
98570@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98571
98572 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98573 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98574+ if (prot & (PROT_WRITE | PROT_EXEC))
98575+ gr_log_rwxmprotect(vma);
98576+
98577+ error = -EACCES;
98578+ goto out;
98579+ }
98580+
98581+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98582 error = -EACCES;
98583 goto out;
98584 }
98585@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98586 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98587 if (error)
98588 goto out;
98589+
98590+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98591+
98592 nstart = tmp;
98593
98594 if (nstart < prev->vm_end)
98595diff --git a/mm/mremap.c b/mm/mremap.c
98596index 17fa018..6f7892b 100644
98597--- a/mm/mremap.c
98598+++ b/mm/mremap.c
98599@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98600 continue;
98601 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98602 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98603+
98604+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98605+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98606+ pte = pte_exprotect(pte);
98607+#endif
98608+
98609 pte = move_soft_dirty_pte(pte);
98610 set_pte_at(mm, new_addr, new_pte, pte);
98611 }
98612@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98613 if (is_vm_hugetlb_page(vma))
98614 goto Einval;
98615
98616+#ifdef CONFIG_PAX_SEGMEXEC
98617+ if (pax_find_mirror_vma(vma))
98618+ goto Einval;
98619+#endif
98620+
98621 /* We can't remap across vm area boundaries */
98622 if (old_len > vma->vm_end - addr)
98623 goto Efault;
98624@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98625 unsigned long ret = -EINVAL;
98626 unsigned long charged = 0;
98627 unsigned long map_flags;
98628+ unsigned long pax_task_size = TASK_SIZE;
98629
98630 if (new_addr & ~PAGE_MASK)
98631 goto out;
98632
98633- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98634+#ifdef CONFIG_PAX_SEGMEXEC
98635+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98636+ pax_task_size = SEGMEXEC_TASK_SIZE;
98637+#endif
98638+
98639+ pax_task_size -= PAGE_SIZE;
98640+
98641+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98642 goto out;
98643
98644 /* Check if the location we're moving into overlaps the
98645 * old location at all, and fail if it does.
98646 */
98647- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98648- goto out;
98649-
98650- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98651+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98652 goto out;
98653
98654 ret = do_munmap(mm, new_addr, new_len);
98655@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98656 unsigned long ret = -EINVAL;
98657 unsigned long charged = 0;
98658 bool locked = false;
98659+ unsigned long pax_task_size = TASK_SIZE;
98660
98661 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98662 return ret;
98663@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98664 if (!new_len)
98665 return ret;
98666
98667+#ifdef CONFIG_PAX_SEGMEXEC
98668+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98669+ pax_task_size = SEGMEXEC_TASK_SIZE;
98670+#endif
98671+
98672+ pax_task_size -= PAGE_SIZE;
98673+
98674+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98675+ old_len > pax_task_size || addr > pax_task_size-old_len)
98676+ return ret;
98677+
98678 down_write(&current->mm->mmap_sem);
98679
98680 if (flags & MREMAP_FIXED) {
98681@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98682 new_addr = addr;
98683 }
98684 ret = addr;
98685+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98686 goto out;
98687 }
98688 }
98689@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98690 goto out;
98691 }
98692
98693+ map_flags = vma->vm_flags;
98694 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98695+ if (!(ret & ~PAGE_MASK)) {
98696+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98697+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98698+ }
98699 }
98700 out:
98701 if (ret & ~PAGE_MASK)
98702diff --git a/mm/nommu.c b/mm/nommu.c
98703index ae5baae..cbb2ed5 100644
98704--- a/mm/nommu.c
98705+++ b/mm/nommu.c
98706@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98707 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98708 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98709 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98710-int heap_stack_gap = 0;
98711
98712 atomic_long_t mmap_pages_allocated;
98713
98714@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98715 EXPORT_SYMBOL(find_vma);
98716
98717 /*
98718- * find a VMA
98719- * - we don't extend stack VMAs under NOMMU conditions
98720- */
98721-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98722-{
98723- return find_vma(mm, addr);
98724-}
98725-
98726-/*
98727 * expand a stack to a given address
98728 * - not supported under NOMMU conditions
98729 */
98730@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98731
98732 /* most fields are the same, copy all, and then fixup */
98733 *new = *vma;
98734+ INIT_LIST_HEAD(&new->anon_vma_chain);
98735 *region = *vma->vm_region;
98736 new->vm_region = region;
98737
98738@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98739 }
98740 EXPORT_SYMBOL(generic_file_remap_pages);
98741
98742-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98743- unsigned long addr, void *buf, int len, int write)
98744+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98745+ unsigned long addr, void *buf, size_t len, int write)
98746 {
98747 struct vm_area_struct *vma;
98748
98749@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98750 *
98751 * The caller must hold a reference on @mm.
98752 */
98753-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98754- void *buf, int len, int write)
98755+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98756+ void *buf, size_t len, int write)
98757 {
98758 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98759 }
98760@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98761 * Access another process' address space.
98762 * - source/target buffer must be kernel space
98763 */
98764-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98765+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98766 {
98767 struct mm_struct *mm;
98768
98769diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98770index f24d4c9..77820e3 100644
98771--- a/mm/page-writeback.c
98772+++ b/mm/page-writeback.c
98773@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98774 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98775 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98776 */
98777-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98778+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98779 unsigned long thresh,
98780 unsigned long bg_thresh,
98781 unsigned long dirty,
98782diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98783index 8bbef06..a8d1989 100644
98784--- a/mm/page_alloc.c
98785+++ b/mm/page_alloc.c
98786@@ -60,6 +60,7 @@
98787 #include <linux/hugetlb.h>
98788 #include <linux/sched/rt.h>
98789 #include <linux/page_owner.h>
98790+#include <linux/random.h>
98791
98792 #include <asm/sections.h>
98793 #include <asm/tlbflush.h>
98794@@ -358,7 +359,7 @@ out:
98795 * This usage means that zero-order pages may not be compound.
98796 */
98797
98798-static void free_compound_page(struct page *page)
98799+void free_compound_page(struct page *page)
98800 {
98801 __free_pages_ok(page, compound_order(page));
98802 }
98803@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98804 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98805 }
98806 #else
98807-struct page_ext_operations debug_guardpage_ops = { NULL, };
98808+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98809 static inline void set_page_guard(struct zone *zone, struct page *page,
98810 unsigned int order, int migratetype) {}
98811 static inline void clear_page_guard(struct zone *zone, struct page *page,
98812@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98813 int i;
98814 int bad = 0;
98815
98816+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98817+ unsigned long index = 1UL << order;
98818+#endif
98819+
98820 VM_BUG_ON_PAGE(PageTail(page), page);
98821 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98822
98823@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98824 debug_check_no_obj_freed(page_address(page),
98825 PAGE_SIZE << order);
98826 }
98827+
98828+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98829+ for (; index; --index)
98830+ sanitize_highpage(page + index - 1);
98831+#endif
98832+
98833 arch_free_page(page, order);
98834 kernel_map_pages(page, 1 << order, 0);
98835
98836@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98837 local_irq_restore(flags);
98838 }
98839
98840+#ifdef CONFIG_PAX_LATENT_ENTROPY
98841+bool __meminitdata extra_latent_entropy;
98842+
98843+static int __init setup_pax_extra_latent_entropy(char *str)
98844+{
98845+ extra_latent_entropy = true;
98846+ return 0;
98847+}
98848+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98849+
98850+volatile u64 latent_entropy __latent_entropy;
98851+EXPORT_SYMBOL(latent_entropy);
98852+#endif
98853+
98854 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98855 {
98856 unsigned int nr_pages = 1 << order;
98857@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98858 __ClearPageReserved(p);
98859 set_page_count(p, 0);
98860
98861+#ifdef CONFIG_PAX_LATENT_ENTROPY
98862+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98863+ u64 hash = 0;
98864+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98865+ const u64 *data = lowmem_page_address(page);
98866+
98867+ for (index = 0; index < end; index++)
98868+ hash ^= hash + data[index];
98869+ latent_entropy ^= hash;
98870+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98871+ }
98872+#endif
98873+
98874 page_zone(page)->managed_pages += nr_pages;
98875 set_page_refcounted(page);
98876 __free_pages(page, order);
98877@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98878 arch_alloc_page(page, order);
98879 kernel_map_pages(page, 1 << order, 1);
98880
98881+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98882 if (gfp_flags & __GFP_ZERO)
98883 prep_zero_page(page, order, gfp_flags);
98884+#endif
98885
98886 if (order && (gfp_flags & __GFP_COMP))
98887 prep_compound_page(page, order);
98888@@ -1700,7 +1740,7 @@ again:
98889 }
98890
98891 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98892- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98893+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98894 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98895 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98896
98897@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98898 do {
98899 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98900 high_wmark_pages(zone) - low_wmark_pages(zone) -
98901- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98902+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98903 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98904 } while (zone++ != preferred_zone);
98905 }
98906@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98907
98908 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98909 high_wmark_pages(zone) - low_wmark_pages(zone) -
98910- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98911+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98912
98913 setup_zone_migrate_reserve(zone);
98914 spin_unlock_irqrestore(&zone->lock, flags);
98915diff --git a/mm/percpu.c b/mm/percpu.c
98916index d39e2f4..de5f4b4 100644
98917--- a/mm/percpu.c
98918+++ b/mm/percpu.c
98919@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98920 static unsigned int pcpu_high_unit_cpu __read_mostly;
98921
98922 /* the address of the first chunk which starts with the kernel static area */
98923-void *pcpu_base_addr __read_mostly;
98924+void *pcpu_base_addr __read_only;
98925 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98926
98927 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98928diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98929index 5077afc..846c9ef 100644
98930--- a/mm/process_vm_access.c
98931+++ b/mm/process_vm_access.c
98932@@ -13,6 +13,7 @@
98933 #include <linux/uio.h>
98934 #include <linux/sched.h>
98935 #include <linux/highmem.h>
98936+#include <linux/security.h>
98937 #include <linux/ptrace.h>
98938 #include <linux/slab.h>
98939 #include <linux/syscalls.h>
98940@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98941 ssize_t iov_len;
98942 size_t total_len = iov_iter_count(iter);
98943
98944+ return -ENOSYS; // PaX: until properly audited
98945+
98946 /*
98947 * Work out how many pages of struct pages we're going to need
98948 * when eventually calling get_user_pages
98949 */
98950 for (i = 0; i < riovcnt; i++) {
98951 iov_len = rvec[i].iov_len;
98952- if (iov_len > 0) {
98953- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98954- + iov_len)
98955- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98956- / PAGE_SIZE + 1;
98957- nr_pages = max(nr_pages, nr_pages_iov);
98958- }
98959+ if (iov_len <= 0)
98960+ continue;
98961+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98962+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98963+ nr_pages = max(nr_pages, nr_pages_iov);
98964 }
98965
98966 if (nr_pages == 0)
98967@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98968 goto free_proc_pages;
98969 }
98970
98971+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98972+ rc = -EPERM;
98973+ goto put_task_struct;
98974+ }
98975+
98976 mm = mm_access(task, PTRACE_MODE_ATTACH);
98977 if (!mm || IS_ERR(mm)) {
98978 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98979diff --git a/mm/rmap.c b/mm/rmap.c
98980index ecb444a..0ff9eb3 100644
98981--- a/mm/rmap.c
98982+++ b/mm/rmap.c
98983@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98984 struct anon_vma *anon_vma = vma->anon_vma;
98985 struct anon_vma_chain *avc;
98986
98987+#ifdef CONFIG_PAX_SEGMEXEC
98988+ struct anon_vma_chain *avc_m = NULL;
98989+#endif
98990+
98991 might_sleep();
98992 if (unlikely(!anon_vma)) {
98993 struct mm_struct *mm = vma->vm_mm;
98994@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98995 if (!avc)
98996 goto out_enomem;
98997
98998+#ifdef CONFIG_PAX_SEGMEXEC
98999+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
99000+ if (!avc_m)
99001+ goto out_enomem_free_avc;
99002+#endif
99003+
99004 anon_vma = find_mergeable_anon_vma(vma);
99005 allocated = NULL;
99006 if (!anon_vma) {
99007@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99008 /* page_table_lock to protect against threads */
99009 spin_lock(&mm->page_table_lock);
99010 if (likely(!vma->anon_vma)) {
99011+
99012+#ifdef CONFIG_PAX_SEGMEXEC
99013+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
99014+
99015+ if (vma_m) {
99016+ BUG_ON(vma_m->anon_vma);
99017+ vma_m->anon_vma = anon_vma;
99018+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
99019+ anon_vma->degree++;
99020+ avc_m = NULL;
99021+ }
99022+#endif
99023+
99024 vma->anon_vma = anon_vma;
99025 anon_vma_chain_link(vma, avc, anon_vma);
99026 /* vma reference or self-parent link for new root */
99027@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
99028
99029 if (unlikely(allocated))
99030 put_anon_vma(allocated);
99031+
99032+#ifdef CONFIG_PAX_SEGMEXEC
99033+ if (unlikely(avc_m))
99034+ anon_vma_chain_free(avc_m);
99035+#endif
99036+
99037 if (unlikely(avc))
99038 anon_vma_chain_free(avc);
99039 }
99040 return 0;
99041
99042 out_enomem_free_avc:
99043+
99044+#ifdef CONFIG_PAX_SEGMEXEC
99045+ if (avc_m)
99046+ anon_vma_chain_free(avc_m);
99047+#endif
99048+
99049 anon_vma_chain_free(avc);
99050 out_enomem:
99051 return -ENOMEM;
99052@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
99053 * good chance of avoiding scanning the whole hierarchy when it searches where
99054 * page is mapped.
99055 */
99056-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99057+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
99058 {
99059 struct anon_vma_chain *avc, *pavc;
99060 struct anon_vma *root = NULL;
99061@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
99062 * the corresponding VMA in the parent process is attached to.
99063 * Returns 0 on success, non-zero on failure.
99064 */
99065-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
99066+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
99067 {
99068 struct anon_vma_chain *avc;
99069 struct anon_vma *anon_vma;
99070@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
99071 void __init anon_vma_init(void)
99072 {
99073 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
99074- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
99075- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
99076+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
99077+ anon_vma_ctor);
99078+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
99079+ SLAB_PANIC|SLAB_NO_SANITIZE);
99080 }
99081
99082 /*
99083diff --git a/mm/shmem.c b/mm/shmem.c
99084index 993e6ba..a962ba3 100644
99085--- a/mm/shmem.c
99086+++ b/mm/shmem.c
99087@@ -33,7 +33,7 @@
99088 #include <linux/swap.h>
99089 #include <linux/aio.h>
99090
99091-static struct vfsmount *shm_mnt;
99092+struct vfsmount *shm_mnt;
99093
99094 #ifdef CONFIG_SHMEM
99095 /*
99096@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
99097 #define BOGO_DIRENT_SIZE 20
99098
99099 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
99100-#define SHORT_SYMLINK_LEN 128
99101+#define SHORT_SYMLINK_LEN 64
99102
99103 /*
99104 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
99105@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
99106 static int shmem_xattr_validate(const char *name)
99107 {
99108 struct { const char *prefix; size_t len; } arr[] = {
99109+
99110+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99111+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
99112+#endif
99113+
99114 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
99115 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
99116 };
99117@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
99118 if (err)
99119 return err;
99120
99121+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
99122+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
99123+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
99124+ return -EOPNOTSUPP;
99125+ if (size > 8)
99126+ return -EINVAL;
99127+ }
99128+#endif
99129+
99130 return simple_xattr_set(&info->xattrs, name, value, size, flags);
99131 }
99132
99133@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
99134 int err = -ENOMEM;
99135
99136 /* Round up to L1_CACHE_BYTES to resist false sharing */
99137- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
99138- L1_CACHE_BYTES), GFP_KERNEL);
99139+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
99140 if (!sbinfo)
99141 return -ENOMEM;
99142
99143diff --git a/mm/slab.c b/mm/slab.c
99144index 65b5dcb..d53d866 100644
99145--- a/mm/slab.c
99146+++ b/mm/slab.c
99147@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99148 if ((x)->max_freeable < i) \
99149 (x)->max_freeable = i; \
99150 } while (0)
99151-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
99152-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
99153-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
99154-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
99155+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
99156+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
99157+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99158+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99159+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99160+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99161 #else
99162 #define STATS_INC_ACTIVE(x) do { } while (0)
99163 #define STATS_DEC_ACTIVE(x) do { } while (0)
99164@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99165 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99166 #define STATS_INC_FREEHIT(x) do { } while (0)
99167 #define STATS_INC_FREEMISS(x) do { } while (0)
99168+#define STATS_INC_SANITIZED(x) do { } while (0)
99169+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99170 #endif
99171
99172 #if DEBUG
99173@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99174 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99175 */
99176 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99177- const struct page *page, void *obj)
99178+ const struct page *page, const void *obj)
99179 {
99180 u32 offset = (obj - page->s_mem);
99181 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99182@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
99183 * structures first. Without this, further allocations will bug.
99184 */
99185 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
99186- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99187+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99188 slab_state = PARTIAL_NODE;
99189
99190 slab_early_init = 0;
99191@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99192
99193 cachep = find_mergeable(size, align, flags, name, ctor);
99194 if (cachep) {
99195- cachep->refcount++;
99196+ atomic_inc(&cachep->refcount);
99197
99198 /*
99199 * Adjust the object sizes so that we clear
99200@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99201 struct array_cache *ac = cpu_cache_get(cachep);
99202
99203 check_irq_off();
99204+
99205+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99206+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99207+ STATS_INC_NOT_SANITIZED(cachep);
99208+ else {
99209+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99210+
99211+ if (cachep->ctor)
99212+ cachep->ctor(objp);
99213+
99214+ STATS_INC_SANITIZED(cachep);
99215+ }
99216+#endif
99217+
99218 kmemleak_free_recursive(objp, cachep->flags);
99219 objp = cache_free_debugcheck(cachep, objp, caller);
99220
99221@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
99222 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
99223 }
99224
99225-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99226+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99227 {
99228 return __do_kmalloc_node(size, flags, node, _RET_IP_);
99229 }
99230@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
99231 * @flags: the type of memory to allocate (see kmalloc).
99232 * @caller: function caller for debug tracking of the caller
99233 */
99234-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
99235+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
99236 unsigned long caller)
99237 {
99238 struct kmem_cache *cachep;
99239@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
99240
99241 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99242 return;
99243+ VM_BUG_ON(!virt_addr_valid(objp));
99244 local_irq_save(flags);
99245 kfree_debugcheck(objp);
99246 c = virt_to_cache(objp);
99247@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99248 }
99249 /* cpu stats */
99250 {
99251- unsigned long allochit = atomic_read(&cachep->allochit);
99252- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99253- unsigned long freehit = atomic_read(&cachep->freehit);
99254- unsigned long freemiss = atomic_read(&cachep->freemiss);
99255+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99256+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99257+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99258+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99259
99260 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99261 allochit, allocmiss, freehit, freemiss);
99262 }
99263+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99264+ {
99265+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99266+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99267+
99268+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99269+ }
99270+#endif
99271 #endif
99272 }
99273
99274@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
99275 static int __init slab_proc_init(void)
99276 {
99277 #ifdef CONFIG_DEBUG_SLAB_LEAK
99278- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99279+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99280 #endif
99281 return 0;
99282 }
99283 module_init(slab_proc_init);
99284 #endif
99285
99286+bool is_usercopy_object(const void *ptr)
99287+{
99288+ struct page *page;
99289+ struct kmem_cache *cachep;
99290+
99291+ if (ZERO_OR_NULL_PTR(ptr))
99292+ return false;
99293+
99294+ if (!slab_is_available())
99295+ return false;
99296+
99297+ if (!virt_addr_valid(ptr))
99298+ return false;
99299+
99300+ page = virt_to_head_page(ptr);
99301+
99302+ if (!PageSlab(page))
99303+ return false;
99304+
99305+ cachep = page->slab_cache;
99306+ return cachep->flags & SLAB_USERCOPY;
99307+}
99308+
99309+#ifdef CONFIG_PAX_USERCOPY
99310+const char *check_heap_object(const void *ptr, unsigned long n)
99311+{
99312+ struct page *page;
99313+ struct kmem_cache *cachep;
99314+ unsigned int objnr;
99315+ unsigned long offset;
99316+
99317+ if (ZERO_OR_NULL_PTR(ptr))
99318+ return "<null>";
99319+
99320+ if (!virt_addr_valid(ptr))
99321+ return NULL;
99322+
99323+ page = virt_to_head_page(ptr);
99324+
99325+ if (!PageSlab(page))
99326+ return NULL;
99327+
99328+ cachep = page->slab_cache;
99329+ if (!(cachep->flags & SLAB_USERCOPY))
99330+ return cachep->name;
99331+
99332+ objnr = obj_to_index(cachep, page, ptr);
99333+ BUG_ON(objnr >= cachep->num);
99334+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99335+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99336+ return NULL;
99337+
99338+ return cachep->name;
99339+}
99340+#endif
99341+
99342 /**
99343 * ksize - get the actual amount of memory allocated for a given object
99344 * @objp: Pointer to the object
99345diff --git a/mm/slab.h b/mm/slab.h
99346index 1cf40054..10ad563 100644
99347--- a/mm/slab.h
99348+++ b/mm/slab.h
99349@@ -22,7 +22,7 @@ struct kmem_cache {
99350 unsigned int align; /* Alignment as calculated */
99351 unsigned long flags; /* Active flags on the slab */
99352 const char *name; /* Slab name for sysfs */
99353- int refcount; /* Use counter */
99354+ atomic_t refcount; /* Use counter */
99355 void (*ctor)(void *); /* Called on object slot creation */
99356 struct list_head list; /* List of all slab caches on the system */
99357 };
99358@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99359 /* The slab cache that manages slab cache information */
99360 extern struct kmem_cache *kmem_cache;
99361
99362+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99363+#ifdef CONFIG_X86_64
99364+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99365+#else
99366+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99367+#endif
99368+enum pax_sanitize_mode {
99369+ PAX_SANITIZE_SLAB_OFF = 0,
99370+ PAX_SANITIZE_SLAB_FAST,
99371+ PAX_SANITIZE_SLAB_FULL,
99372+};
99373+extern enum pax_sanitize_mode pax_sanitize_slab;
99374+#endif
99375+
99376 unsigned long calculate_alignment(unsigned long flags,
99377 unsigned long align, unsigned long size);
99378
99379@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99380
99381 /* Legal flag mask for kmem_cache_create(), for various configurations */
99382 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99383- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99384+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99385+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99386
99387 #if defined(CONFIG_DEBUG_SLAB)
99388 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99389@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99390 return s;
99391
99392 page = virt_to_head_page(x);
99393+
99394+ BUG_ON(!PageSlab(page));
99395+
99396 cachep = page->slab_cache;
99397 if (slab_equal_or_root(cachep, s))
99398 return cachep;
99399diff --git a/mm/slab_common.c b/mm/slab_common.c
99400index e03dd6f..c475838 100644
99401--- a/mm/slab_common.c
99402+++ b/mm/slab_common.c
99403@@ -25,11 +25,35 @@
99404
99405 #include "slab.h"
99406
99407-enum slab_state slab_state;
99408+enum slab_state slab_state __read_only;
99409 LIST_HEAD(slab_caches);
99410 DEFINE_MUTEX(slab_mutex);
99411 struct kmem_cache *kmem_cache;
99412
99413+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99414+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99415+static int __init pax_sanitize_slab_setup(char *str)
99416+{
99417+ if (!str)
99418+ return 0;
99419+
99420+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99421+ pr_info("PaX slab sanitization: %s\n", "disabled");
99422+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99423+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99424+ pr_info("PaX slab sanitization: %s\n", "fast");
99425+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99426+ } else if (!strcmp(str, "full")) {
99427+ pr_info("PaX slab sanitization: %s\n", "full");
99428+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99429+ } else
99430+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99431+
99432+ return 0;
99433+}
99434+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99435+#endif
99436+
99437 /*
99438 * Set of flags that will prevent slab merging
99439 */
99440@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99441 * Merge control. If this is set then no merging of slab caches will occur.
99442 * (Could be removed. This was introduced to pacify the merge skeptics.)
99443 */
99444-static int slab_nomerge;
99445+static int slab_nomerge = 1;
99446
99447 static int __init setup_slab_nomerge(char *str)
99448 {
99449@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99450 /*
99451 * We may have set a slab to be unmergeable during bootstrap.
99452 */
99453- if (s->refcount < 0)
99454+ if (atomic_read(&s->refcount) < 0)
99455 return 1;
99456
99457 return 0;
99458@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99459 if (err)
99460 goto out_free_cache;
99461
99462- s->refcount = 1;
99463+ atomic_set(&s->refcount, 1);
99464 list_add(&s->list, &slab_caches);
99465 out:
99466 if (err)
99467@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99468 */
99469 flags &= CACHE_CREATE_MASK;
99470
99471+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99472+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99473+ flags |= SLAB_NO_SANITIZE;
99474+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99475+ flags &= ~SLAB_NO_SANITIZE;
99476+#endif
99477+
99478 s = __kmem_cache_alias(name, size, align, flags, ctor);
99479 if (s)
99480 goto out_unlock;
99481@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99482
99483 mutex_lock(&slab_mutex);
99484
99485- s->refcount--;
99486- if (s->refcount)
99487+ if (!atomic_dec_and_test(&s->refcount))
99488 goto out_unlock;
99489
99490 if (memcg_cleanup_cache_params(s) != 0)
99491@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99492 rcu_barrier();
99493
99494 memcg_free_cache_params(s);
99495-#ifdef SLAB_SUPPORTS_SYSFS
99496+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99497 sysfs_slab_remove(s);
99498 #else
99499 slab_kmem_cache_release(s);
99500@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99501 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99502 name, size, err);
99503
99504- s->refcount = -1; /* Exempt from merging for now */
99505+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99506 }
99507
99508 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99509@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99510
99511 create_boot_cache(s, name, size, flags);
99512 list_add(&s->list, &slab_caches);
99513- s->refcount = 1;
99514+ atomic_set(&s->refcount, 1);
99515 return s;
99516 }
99517
99518@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99519 EXPORT_SYMBOL(kmalloc_dma_caches);
99520 #endif
99521
99522+#ifdef CONFIG_PAX_USERCOPY_SLABS
99523+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99524+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99525+#endif
99526+
99527 /*
99528 * Conversion table for small slabs sizes / 8 to the index in the
99529 * kmalloc array. This is necessary for slabs < 192 since we have non power
99530@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99531 return kmalloc_dma_caches[index];
99532
99533 #endif
99534+
99535+#ifdef CONFIG_PAX_USERCOPY_SLABS
99536+ if (unlikely((flags & GFP_USERCOPY)))
99537+ return kmalloc_usercopy_caches[index];
99538+
99539+#endif
99540+
99541 return kmalloc_caches[index];
99542 }
99543
99544@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99545 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99546 if (!kmalloc_caches[i]) {
99547 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99548- 1 << i, flags);
99549+ 1 << i, SLAB_USERCOPY | flags);
99550 }
99551
99552 /*
99553@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99554 * earlier power of two caches
99555 */
99556 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99557- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99558+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99559
99560 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99561- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99562+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99563 }
99564
99565 /* Kmalloc array is now usable */
99566@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99567 }
99568 }
99569 #endif
99570+
99571+#ifdef CONFIG_PAX_USERCOPY_SLABS
99572+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99573+ struct kmem_cache *s = kmalloc_caches[i];
99574+
99575+ if (s) {
99576+ int size = kmalloc_size(i);
99577+ char *n = kasprintf(GFP_NOWAIT,
99578+ "usercopy-kmalloc-%d", size);
99579+
99580+ BUG_ON(!n);
99581+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99582+ size, SLAB_USERCOPY | flags);
99583+ }
99584+ }
99585+#endif
99586+
99587 }
99588 #endif /* !CONFIG_SLOB */
99589
99590@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99591 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99592 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99593 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99594+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99595+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99596+#endif
99597 #endif
99598 seq_putc(m, '\n');
99599 }
99600@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99601 module_init(slab_proc_init);
99602 #endif /* CONFIG_SLABINFO */
99603
99604-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99605+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99606 gfp_t flags)
99607 {
99608 void *ret;
99609diff --git a/mm/slob.c b/mm/slob.c
99610index 96a8620..46b3f12 100644
99611--- a/mm/slob.c
99612+++ b/mm/slob.c
99613@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99614 /*
99615 * Return the size of a slob block.
99616 */
99617-static slobidx_t slob_units(slob_t *s)
99618+static slobidx_t slob_units(const slob_t *s)
99619 {
99620 if (s->units > 0)
99621 return s->units;
99622@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99623 /*
99624 * Return the next free slob block pointer after this one.
99625 */
99626-static slob_t *slob_next(slob_t *s)
99627+static slob_t *slob_next(const slob_t *s)
99628 {
99629 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99630 slobidx_t next;
99631@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99632 /*
99633 * Returns true if s is the last free block in its page.
99634 */
99635-static int slob_last(slob_t *s)
99636+static int slob_last(const slob_t *s)
99637 {
99638 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99639 }
99640
99641-static void *slob_new_pages(gfp_t gfp, int order, int node)
99642+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99643 {
99644- void *page;
99645+ struct page *page;
99646
99647 #ifdef CONFIG_NUMA
99648 if (node != NUMA_NO_NODE)
99649@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99650 if (!page)
99651 return NULL;
99652
99653- return page_address(page);
99654+ __SetPageSlab(page);
99655+ return page;
99656 }
99657
99658-static void slob_free_pages(void *b, int order)
99659+static void slob_free_pages(struct page *sp, int order)
99660 {
99661 if (current->reclaim_state)
99662 current->reclaim_state->reclaimed_slab += 1 << order;
99663- free_pages((unsigned long)b, order);
99664+ __ClearPageSlab(sp);
99665+ page_mapcount_reset(sp);
99666+ sp->private = 0;
99667+ __free_pages(sp, order);
99668 }
99669
99670 /*
99671@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99672
99673 /* Not enough space: must allocate a new page */
99674 if (!b) {
99675- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99676- if (!b)
99677+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99678+ if (!sp)
99679 return NULL;
99680- sp = virt_to_page(b);
99681- __SetPageSlab(sp);
99682+ b = page_address(sp);
99683
99684 spin_lock_irqsave(&slob_lock, flags);
99685 sp->units = SLOB_UNITS(PAGE_SIZE);
99686 sp->freelist = b;
99687+ sp->private = 0;
99688 INIT_LIST_HEAD(&sp->lru);
99689 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99690 set_slob_page_free(sp, slob_list);
99691@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99692 /*
99693 * slob_free: entry point into the slob allocator.
99694 */
99695-static void slob_free(void *block, int size)
99696+static void slob_free(struct kmem_cache *c, void *block, int size)
99697 {
99698 struct page *sp;
99699 slob_t *prev, *next, *b = (slob_t *)block;
99700@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99701 if (slob_page_free(sp))
99702 clear_slob_page_free(sp);
99703 spin_unlock_irqrestore(&slob_lock, flags);
99704- __ClearPageSlab(sp);
99705- page_mapcount_reset(sp);
99706- slob_free_pages(b, 0);
99707+ slob_free_pages(sp, 0);
99708 return;
99709 }
99710
99711+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99712+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99713+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99714+#endif
99715+
99716 if (!slob_page_free(sp)) {
99717 /* This slob page is about to become partially free. Easy! */
99718 sp->units = units;
99719@@ -424,11 +431,10 @@ out:
99720 */
99721
99722 static __always_inline void *
99723-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99724+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99725 {
99726- unsigned int *m;
99727- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99728- void *ret;
99729+ slob_t *m;
99730+ void *ret = NULL;
99731
99732 gfp &= gfp_allowed_mask;
99733
99734@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99735
99736 if (!m)
99737 return NULL;
99738- *m = size;
99739+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99740+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99741+ m[0].units = size;
99742+ m[1].units = align;
99743 ret = (void *)m + align;
99744
99745 trace_kmalloc_node(caller, ret,
99746 size, size + align, gfp, node);
99747 } else {
99748 unsigned int order = get_order(size);
99749+ struct page *page;
99750
99751 if (likely(order))
99752 gfp |= __GFP_COMP;
99753- ret = slob_new_pages(gfp, order, node);
99754+ page = slob_new_pages(gfp, order, node);
99755+ if (page) {
99756+ ret = page_address(page);
99757+ page->private = size;
99758+ }
99759
99760 trace_kmalloc_node(caller, ret,
99761 size, PAGE_SIZE << order, gfp, node);
99762 }
99763
99764- kmemleak_alloc(ret, size, 1, gfp);
99765 return ret;
99766 }
99767
99768-void *__kmalloc(size_t size, gfp_t gfp)
99769+static __always_inline void *
99770+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99771+{
99772+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99773+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99774+
99775+ if (!ZERO_OR_NULL_PTR(ret))
99776+ kmemleak_alloc(ret, size, 1, gfp);
99777+ return ret;
99778+}
99779+
99780+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99781 {
99782 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99783 }
99784@@ -491,34 +515,112 @@ void kfree(const void *block)
99785 return;
99786 kmemleak_free(block);
99787
99788+ VM_BUG_ON(!virt_addr_valid(block));
99789 sp = virt_to_page(block);
99790- if (PageSlab(sp)) {
99791+ VM_BUG_ON(!PageSlab(sp));
99792+ if (!sp->private) {
99793 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99794- unsigned int *m = (unsigned int *)(block - align);
99795- slob_free(m, *m + align);
99796- } else
99797+ slob_t *m = (slob_t *)(block - align);
99798+ slob_free(NULL, m, m[0].units + align);
99799+ } else {
99800+ __ClearPageSlab(sp);
99801+ page_mapcount_reset(sp);
99802+ sp->private = 0;
99803 __free_pages(sp, compound_order(sp));
99804+ }
99805 }
99806 EXPORT_SYMBOL(kfree);
99807
99808+bool is_usercopy_object(const void *ptr)
99809+{
99810+ if (!slab_is_available())
99811+ return false;
99812+
99813+ // PAX: TODO
99814+
99815+ return false;
99816+}
99817+
99818+#ifdef CONFIG_PAX_USERCOPY
99819+const char *check_heap_object(const void *ptr, unsigned long n)
99820+{
99821+ struct page *page;
99822+ const slob_t *free;
99823+ const void *base;
99824+ unsigned long flags;
99825+
99826+ if (ZERO_OR_NULL_PTR(ptr))
99827+ return "<null>";
99828+
99829+ if (!virt_addr_valid(ptr))
99830+ return NULL;
99831+
99832+ page = virt_to_head_page(ptr);
99833+ if (!PageSlab(page))
99834+ return NULL;
99835+
99836+ if (page->private) {
99837+ base = page;
99838+ if (base <= ptr && n <= page->private - (ptr - base))
99839+ return NULL;
99840+ return "<slob>";
99841+ }
99842+
99843+ /* some tricky double walking to find the chunk */
99844+ spin_lock_irqsave(&slob_lock, flags);
99845+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99846+ free = page->freelist;
99847+
99848+ while (!slob_last(free) && (void *)free <= ptr) {
99849+ base = free + slob_units(free);
99850+ free = slob_next(free);
99851+ }
99852+
99853+ while (base < (void *)free) {
99854+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99855+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99856+ int offset;
99857+
99858+ if (ptr < base + align)
99859+ break;
99860+
99861+ offset = ptr - base - align;
99862+ if (offset >= m) {
99863+ base += size;
99864+ continue;
99865+ }
99866+
99867+ if (n > m - offset)
99868+ break;
99869+
99870+ spin_unlock_irqrestore(&slob_lock, flags);
99871+ return NULL;
99872+ }
99873+
99874+ spin_unlock_irqrestore(&slob_lock, flags);
99875+ return "<slob>";
99876+}
99877+#endif
99878+
99879 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99880 size_t ksize(const void *block)
99881 {
99882 struct page *sp;
99883 int align;
99884- unsigned int *m;
99885+ slob_t *m;
99886
99887 BUG_ON(!block);
99888 if (unlikely(block == ZERO_SIZE_PTR))
99889 return 0;
99890
99891 sp = virt_to_page(block);
99892- if (unlikely(!PageSlab(sp)))
99893- return PAGE_SIZE << compound_order(sp);
99894+ VM_BUG_ON(!PageSlab(sp));
99895+ if (sp->private)
99896+ return sp->private;
99897
99898 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99899- m = (unsigned int *)(block - align);
99900- return SLOB_UNITS(*m) * SLOB_UNIT;
99901+ m = (slob_t *)(block - align);
99902+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99903 }
99904 EXPORT_SYMBOL(ksize);
99905
99906@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99907
99908 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99909 {
99910- void *b;
99911+ void *b = NULL;
99912
99913 flags &= gfp_allowed_mask;
99914
99915 lockdep_trace_alloc(flags);
99916
99917+#ifdef CONFIG_PAX_USERCOPY_SLABS
99918+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99919+#else
99920 if (c->size < PAGE_SIZE) {
99921 b = slob_alloc(c->size, flags, c->align, node);
99922 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99923 SLOB_UNITS(c->size) * SLOB_UNIT,
99924 flags, node);
99925 } else {
99926- b = slob_new_pages(flags, get_order(c->size), node);
99927+ struct page *sp;
99928+
99929+ sp = slob_new_pages(flags, get_order(c->size), node);
99930+ if (sp) {
99931+ b = page_address(sp);
99932+ sp->private = c->size;
99933+ }
99934 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99935 PAGE_SIZE << get_order(c->size),
99936 flags, node);
99937 }
99938+#endif
99939
99940 if (b && c->ctor)
99941 c->ctor(b);
99942@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99943 EXPORT_SYMBOL(kmem_cache_alloc);
99944
99945 #ifdef CONFIG_NUMA
99946-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99947+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99948 {
99949 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99950 }
99951@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99952 EXPORT_SYMBOL(kmem_cache_alloc_node);
99953 #endif
99954
99955-static void __kmem_cache_free(void *b, int size)
99956+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99957 {
99958- if (size < PAGE_SIZE)
99959- slob_free(b, size);
99960+ struct page *sp;
99961+
99962+ sp = virt_to_page(b);
99963+ BUG_ON(!PageSlab(sp));
99964+ if (!sp->private)
99965+ slob_free(c, b, size);
99966 else
99967- slob_free_pages(b, get_order(size));
99968+ slob_free_pages(sp, get_order(size));
99969 }
99970
99971 static void kmem_rcu_free(struct rcu_head *head)
99972@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99973 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99974 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99975
99976- __kmem_cache_free(b, slob_rcu->size);
99977+ __kmem_cache_free(NULL, b, slob_rcu->size);
99978 }
99979
99980 void kmem_cache_free(struct kmem_cache *c, void *b)
99981 {
99982+ int size = c->size;
99983+
99984+#ifdef CONFIG_PAX_USERCOPY_SLABS
99985+ if (size + c->align < PAGE_SIZE) {
99986+ size += c->align;
99987+ b -= c->align;
99988+ }
99989+#endif
99990+
99991 kmemleak_free_recursive(b, c->flags);
99992 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99993 struct slob_rcu *slob_rcu;
99994- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99995- slob_rcu->size = c->size;
99996+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99997+ slob_rcu->size = size;
99998 call_rcu(&slob_rcu->head, kmem_rcu_free);
99999 } else {
100000- __kmem_cache_free(b, c->size);
100001+ __kmem_cache_free(c, b, size);
100002 }
100003
100004+#ifdef CONFIG_PAX_USERCOPY_SLABS
100005+ trace_kfree(_RET_IP_, b);
100006+#else
100007 trace_kmem_cache_free(_RET_IP_, b);
100008+#endif
100009+
100010 }
100011 EXPORT_SYMBOL(kmem_cache_free);
100012
100013diff --git a/mm/slub.c b/mm/slub.c
100014index fe376fe..2f5757c 100644
100015--- a/mm/slub.c
100016+++ b/mm/slub.c
100017@@ -197,7 +197,7 @@ struct track {
100018
100019 enum track_item { TRACK_ALLOC, TRACK_FREE };
100020
100021-#ifdef CONFIG_SYSFS
100022+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100023 static int sysfs_slab_add(struct kmem_cache *);
100024 static int sysfs_slab_alias(struct kmem_cache *, const char *);
100025 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
100026@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
100027 if (!t->addr)
100028 return;
100029
100030- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
100031+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
100032 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
100033 #ifdef CONFIG_STACKTRACE
100034 {
100035@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
100036
100037 slab_free_hook(s, x);
100038
100039+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100040+ if (!(s->flags & SLAB_NO_SANITIZE)) {
100041+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
100042+ if (s->ctor)
100043+ s->ctor(x);
100044+ }
100045+#endif
100046+
100047 redo:
100048 /*
100049 * Determine the currently cpus per cpu slab.
100050@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
100051 s->inuse = size;
100052
100053 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
100054+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100055+ (!(flags & SLAB_NO_SANITIZE)) ||
100056+#endif
100057 s->ctor)) {
100058 /*
100059 * Relocate free pointer after the object if it is not
100060@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
100061
100062 __setup("slub_min_objects=", setup_slub_min_objects);
100063
100064-void *__kmalloc(size_t size, gfp_t flags)
100065+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
100066 {
100067 struct kmem_cache *s;
100068 void *ret;
100069@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
100070 return ptr;
100071 }
100072
100073-void *__kmalloc_node(size_t size, gfp_t flags, int node)
100074+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
100075 {
100076 struct kmem_cache *s;
100077 void *ret;
100078@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
100079 EXPORT_SYMBOL(__kmalloc_node);
100080 #endif
100081
100082+bool is_usercopy_object(const void *ptr)
100083+{
100084+ struct page *page;
100085+ struct kmem_cache *s;
100086+
100087+ if (ZERO_OR_NULL_PTR(ptr))
100088+ return false;
100089+
100090+ if (!slab_is_available())
100091+ return false;
100092+
100093+ if (!virt_addr_valid(ptr))
100094+ return false;
100095+
100096+ page = virt_to_head_page(ptr);
100097+
100098+ if (!PageSlab(page))
100099+ return false;
100100+
100101+ s = page->slab_cache;
100102+ return s->flags & SLAB_USERCOPY;
100103+}
100104+
100105+#ifdef CONFIG_PAX_USERCOPY
100106+const char *check_heap_object(const void *ptr, unsigned long n)
100107+{
100108+ struct page *page;
100109+ struct kmem_cache *s;
100110+ unsigned long offset;
100111+
100112+ if (ZERO_OR_NULL_PTR(ptr))
100113+ return "<null>";
100114+
100115+ if (!virt_addr_valid(ptr))
100116+ return NULL;
100117+
100118+ page = virt_to_head_page(ptr);
100119+
100120+ if (!PageSlab(page))
100121+ return NULL;
100122+
100123+ s = page->slab_cache;
100124+ if (!(s->flags & SLAB_USERCOPY))
100125+ return s->name;
100126+
100127+ offset = (ptr - page_address(page)) % s->size;
100128+ if (offset <= s->object_size && n <= s->object_size - offset)
100129+ return NULL;
100130+
100131+ return s->name;
100132+}
100133+#endif
100134+
100135 size_t ksize(const void *object)
100136 {
100137 struct page *page;
100138@@ -3336,6 +3400,7 @@ void kfree(const void *x)
100139 if (unlikely(ZERO_OR_NULL_PTR(x)))
100140 return;
100141
100142+ VM_BUG_ON(!virt_addr_valid(x));
100143 page = virt_to_head_page(x);
100144 if (unlikely(!PageSlab(page))) {
100145 BUG_ON(!PageCompound(page));
100146@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100147 int i;
100148 struct kmem_cache *c;
100149
100150- s->refcount++;
100151+ atomic_inc(&s->refcount);
100152
100153 /*
100154 * Adjust the object sizes so that we clear
100155@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
100156 }
100157
100158 if (sysfs_slab_alias(s, name)) {
100159- s->refcount--;
100160+ atomic_dec(&s->refcount);
100161 s = NULL;
100162 }
100163 }
100164@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100165 }
100166 #endif
100167
100168-#ifdef CONFIG_SYSFS
100169+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100170 static int count_inuse(struct page *page)
100171 {
100172 return page->inuse;
100173@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100174 len += sprintf(buf + len, "%7ld ", l->count);
100175
100176 if (l->addr)
100177+#ifdef CONFIG_GRKERNSEC_HIDESYM
100178+ len += sprintf(buf + len, "%pS", NULL);
100179+#else
100180 len += sprintf(buf + len, "%pS", (void *)l->addr);
100181+#endif
100182 else
100183 len += sprintf(buf + len, "<not-available>");
100184
100185@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
100186 validate_slab_cache(kmalloc_caches[9]);
100187 }
100188 #else
100189-#ifdef CONFIG_SYSFS
100190+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100191 static void resiliency_test(void) {};
100192 #endif
100193 #endif
100194
100195-#ifdef CONFIG_SYSFS
100196+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100197 enum slab_stat_type {
100198 SL_ALL, /* All slabs */
100199 SL_PARTIAL, /* Only partially allocated slabs */
100200@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100201 {
100202 if (!s->ctor)
100203 return 0;
100204+#ifdef CONFIG_GRKERNSEC_HIDESYM
100205+ return sprintf(buf, "%pS\n", NULL);
100206+#else
100207 return sprintf(buf, "%pS\n", s->ctor);
100208+#endif
100209 }
100210 SLAB_ATTR_RO(ctor);
100211
100212 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100213 {
100214- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100215+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100216 }
100217 SLAB_ATTR_RO(aliases);
100218
100219@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100220 SLAB_ATTR_RO(cache_dma);
100221 #endif
100222
100223+#ifdef CONFIG_PAX_USERCOPY_SLABS
100224+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100225+{
100226+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100227+}
100228+SLAB_ATTR_RO(usercopy);
100229+#endif
100230+
100231+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100232+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100233+{
100234+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100235+}
100236+SLAB_ATTR_RO(sanitize);
100237+#endif
100238+
100239 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100240 {
100241 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100242@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
100243 * as well as cause other issues like converting a mergeable
100244 * cache into an umergeable one.
100245 */
100246- if (s->refcount > 1)
100247+ if (atomic_read(&s->refcount) > 1)
100248 return -EINVAL;
100249
100250 s->flags &= ~SLAB_TRACE;
100251@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
100252 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
100253 size_t length)
100254 {
100255- if (s->refcount > 1)
100256+ if (atomic_read(&s->refcount) > 1)
100257 return -EINVAL;
100258
100259 s->flags &= ~SLAB_FAILSLAB;
100260@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
100261 #ifdef CONFIG_ZONE_DMA
100262 &cache_dma_attr.attr,
100263 #endif
100264+#ifdef CONFIG_PAX_USERCOPY_SLABS
100265+ &usercopy_attr.attr,
100266+#endif
100267+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100268+ &sanitize_attr.attr,
100269+#endif
100270 #ifdef CONFIG_NUMA
100271 &remote_node_defrag_ratio_attr.attr,
100272 #endif
100273@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
100274 return name;
100275 }
100276
100277+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100278 static int sysfs_slab_add(struct kmem_cache *s)
100279 {
100280 int err;
100281@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100282 kobject_del(&s->kobj);
100283 kobject_put(&s->kobj);
100284 }
100285+#endif
100286
100287 /*
100288 * Need to buffer aliases during bootup until sysfs becomes
100289@@ -5161,6 +5258,7 @@ struct saved_alias {
100290
100291 static struct saved_alias *alias_list;
100292
100293+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100294 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100295 {
100296 struct saved_alias *al;
100297@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100298 alias_list = al;
100299 return 0;
100300 }
100301+#endif
100302
100303 static int __init slab_sysfs_init(void)
100304 {
100305diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100306index 4cba9c2..b4f9fcc 100644
100307--- a/mm/sparse-vmemmap.c
100308+++ b/mm/sparse-vmemmap.c
100309@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100310 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100311 if (!p)
100312 return NULL;
100313- pud_populate(&init_mm, pud, p);
100314+ pud_populate_kernel(&init_mm, pud, p);
100315 }
100316 return pud;
100317 }
100318@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100319 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100320 if (!p)
100321 return NULL;
100322- pgd_populate(&init_mm, pgd, p);
100323+ pgd_populate_kernel(&init_mm, pgd, p);
100324 }
100325 return pgd;
100326 }
100327diff --git a/mm/sparse.c b/mm/sparse.c
100328index d1b48b6..6e8590e 100644
100329--- a/mm/sparse.c
100330+++ b/mm/sparse.c
100331@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100332
100333 for (i = 0; i < PAGES_PER_SECTION; i++) {
100334 if (PageHWPoison(&memmap[i])) {
100335- atomic_long_sub(1, &num_poisoned_pages);
100336+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100337 ClearPageHWPoison(&memmap[i]);
100338 }
100339 }
100340diff --git a/mm/swap.c b/mm/swap.c
100341index 8a12b33..7068e78 100644
100342--- a/mm/swap.c
100343+++ b/mm/swap.c
100344@@ -31,6 +31,7 @@
100345 #include <linux/memcontrol.h>
100346 #include <linux/gfp.h>
100347 #include <linux/uio.h>
100348+#include <linux/hugetlb.h>
100349
100350 #include "internal.h"
100351
100352@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100353
100354 __page_cache_release(page);
100355 dtor = get_compound_page_dtor(page);
100356+ if (!PageHuge(page))
100357+ BUG_ON(dtor != free_compound_page);
100358 (*dtor)(page);
100359 }
100360
100361diff --git a/mm/swapfile.c b/mm/swapfile.c
100362index 63f55cc..31874e6 100644
100363--- a/mm/swapfile.c
100364+++ b/mm/swapfile.c
100365@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100366
100367 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100368 /* Activity counter to indicate that a swapon or swapoff has occurred */
100369-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100370+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100371
100372 static inline unsigned char swap_count(unsigned char ent)
100373 {
100374@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100375 spin_unlock(&swap_lock);
100376
100377 err = 0;
100378- atomic_inc(&proc_poll_event);
100379+ atomic_inc_unchecked(&proc_poll_event);
100380 wake_up_interruptible(&proc_poll_wait);
100381
100382 out_dput:
100383@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100384
100385 poll_wait(file, &proc_poll_wait, wait);
100386
100387- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100388- seq->poll_event = atomic_read(&proc_poll_event);
100389+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100390+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100391 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100392 }
100393
100394@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100395 return ret;
100396
100397 seq = file->private_data;
100398- seq->poll_event = atomic_read(&proc_poll_event);
100399+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100400 return 0;
100401 }
100402
100403@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100404 (frontswap_map) ? "FS" : "");
100405
100406 mutex_unlock(&swapon_mutex);
100407- atomic_inc(&proc_poll_event);
100408+ atomic_inc_unchecked(&proc_poll_event);
100409 wake_up_interruptible(&proc_poll_wait);
100410
100411 if (S_ISREG(inode->i_mode))
100412diff --git a/mm/util.c b/mm/util.c
100413index fec39d4..3e60325 100644
100414--- a/mm/util.c
100415+++ b/mm/util.c
100416@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100417 void arch_pick_mmap_layout(struct mm_struct *mm)
100418 {
100419 mm->mmap_base = TASK_UNMAPPED_BASE;
100420+
100421+#ifdef CONFIG_PAX_RANDMMAP
100422+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100423+ mm->mmap_base += mm->delta_mmap;
100424+#endif
100425+
100426 mm->get_unmapped_area = arch_get_unmapped_area;
100427 }
100428 #endif
100429@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100430 if (!mm->arg_end)
100431 goto out_mm; /* Shh! No looking before we're done */
100432
100433+ if (gr_acl_handle_procpidmem(task))
100434+ goto out_mm;
100435+
100436 len = mm->arg_end - mm->arg_start;
100437
100438 if (len > buflen)
100439diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100440index 39c3388..7d976d4 100644
100441--- a/mm/vmalloc.c
100442+++ b/mm/vmalloc.c
100443@@ -39,20 +39,65 @@ struct vfree_deferred {
100444 struct work_struct wq;
100445 };
100446 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100447+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100448+
100449+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100450+struct stack_deferred_llist {
100451+ struct llist_head list;
100452+ void *stack;
100453+ void *lowmem_stack;
100454+};
100455+
100456+struct stack_deferred {
100457+ struct stack_deferred_llist list;
100458+ struct work_struct wq;
100459+};
100460+
100461+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100462+#endif
100463
100464 static void __vunmap(const void *, int);
100465
100466-static void free_work(struct work_struct *w)
100467+static void vfree_work(struct work_struct *w)
100468+{
100469+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100470+ struct llist_node *llnode = llist_del_all(&p->list);
100471+ while (llnode) {
100472+ void *x = llnode;
100473+ llnode = llist_next(llnode);
100474+ __vunmap(x, 1);
100475+ }
100476+}
100477+
100478+static void vunmap_work(struct work_struct *w)
100479 {
100480 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100481 struct llist_node *llnode = llist_del_all(&p->list);
100482 while (llnode) {
100483 void *p = llnode;
100484 llnode = llist_next(llnode);
100485- __vunmap(p, 1);
100486+ __vunmap(p, 0);
100487 }
100488 }
100489
100490+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100491+static void unmap_work(struct work_struct *w)
100492+{
100493+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100494+ struct llist_node *llnode = llist_del_all(&p->list.list);
100495+ while (llnode) {
100496+ struct stack_deferred_llist *x =
100497+ llist_entry((struct llist_head *)llnode,
100498+ struct stack_deferred_llist, list);
100499+ void *stack = ACCESS_ONCE(x->stack);
100500+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100501+ llnode = llist_next(llnode);
100502+ __vunmap(stack, 0);
100503+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100504+ }
100505+}
100506+#endif
100507+
100508 /*** Page table manipulation functions ***/
100509
100510 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100511@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100512
100513 pte = pte_offset_kernel(pmd, addr);
100514 do {
100515- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100516- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100517+
100518+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100519+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100520+ BUG_ON(!pte_exec(*pte));
100521+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100522+ continue;
100523+ }
100524+#endif
100525+
100526+ {
100527+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100528+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100529+ }
100530 } while (pte++, addr += PAGE_SIZE, addr != end);
100531 }
100532
100533@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100534 pte = pte_alloc_kernel(pmd, addr);
100535 if (!pte)
100536 return -ENOMEM;
100537+
100538+ pax_open_kernel();
100539 do {
100540 struct page *page = pages[*nr];
100541
100542- if (WARN_ON(!pte_none(*pte)))
100543+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100544+ if (pgprot_val(prot) & _PAGE_NX)
100545+#endif
100546+
100547+ if (!pte_none(*pte)) {
100548+ pax_close_kernel();
100549+ WARN_ON(1);
100550 return -EBUSY;
100551- if (WARN_ON(!page))
100552+ }
100553+ if (!page) {
100554+ pax_close_kernel();
100555+ WARN_ON(1);
100556 return -ENOMEM;
100557+ }
100558 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100559 (*nr)++;
100560 } while (pte++, addr += PAGE_SIZE, addr != end);
100561+ pax_close_kernel();
100562 return 0;
100563 }
100564
100565@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100566 pmd_t *pmd;
100567 unsigned long next;
100568
100569- pmd = pmd_alloc(&init_mm, pud, addr);
100570+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100571 if (!pmd)
100572 return -ENOMEM;
100573 do {
100574@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100575 pud_t *pud;
100576 unsigned long next;
100577
100578- pud = pud_alloc(&init_mm, pgd, addr);
100579+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100580 if (!pud)
100581 return -ENOMEM;
100582 do {
100583@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100584 if (addr >= MODULES_VADDR && addr < MODULES_END)
100585 return 1;
100586 #endif
100587+
100588+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100589+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100590+ return 1;
100591+#endif
100592+
100593 return is_vmalloc_addr(x);
100594 }
100595
100596@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100597
100598 if (!pgd_none(*pgd)) {
100599 pud_t *pud = pud_offset(pgd, addr);
100600+#ifdef CONFIG_X86
100601+ if (!pud_large(*pud))
100602+#endif
100603 if (!pud_none(*pud)) {
100604 pmd_t *pmd = pmd_offset(pud, addr);
100605+#ifdef CONFIG_X86
100606+ if (!pmd_large(*pmd))
100607+#endif
100608 if (!pmd_none(*pmd)) {
100609 pte_t *ptep, pte;
100610
100611@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100612 * Allocate a region of KVA of the specified size and alignment, within the
100613 * vstart and vend.
100614 */
100615-static struct vmap_area *alloc_vmap_area(unsigned long size,
100616+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100617 unsigned long align,
100618 unsigned long vstart, unsigned long vend,
100619 int node, gfp_t gfp_mask)
100620@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100621 for_each_possible_cpu(i) {
100622 struct vmap_block_queue *vbq;
100623 struct vfree_deferred *p;
100624+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100625+ struct stack_deferred *p2;
100626+#endif
100627
100628 vbq = &per_cpu(vmap_block_queue, i);
100629 spin_lock_init(&vbq->lock);
100630 INIT_LIST_HEAD(&vbq->free);
100631+
100632 p = &per_cpu(vfree_deferred, i);
100633 init_llist_head(&p->list);
100634- INIT_WORK(&p->wq, free_work);
100635+ INIT_WORK(&p->wq, vfree_work);
100636+
100637+ p = &per_cpu(vunmap_deferred, i);
100638+ init_llist_head(&p->list);
100639+ INIT_WORK(&p->wq, vunmap_work);
100640+
100641+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100642+ p2 = &per_cpu(stack_deferred, i);
100643+ init_llist_head(&p2->list.list);
100644+ INIT_WORK(&p2->wq, unmap_work);
100645+#endif
100646 }
100647
100648 /* Import existing vmlist entries. */
100649@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100650 struct vm_struct *area;
100651
100652 BUG_ON(in_interrupt());
100653+
100654+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100655+ if (flags & VM_KERNEXEC) {
100656+ if (start != VMALLOC_START || end != VMALLOC_END)
100657+ return NULL;
100658+ start = (unsigned long)MODULES_EXEC_VADDR;
100659+ end = (unsigned long)MODULES_EXEC_END;
100660+ }
100661+#endif
100662+
100663 if (flags & VM_IOREMAP)
100664 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100665
100666@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100667 */
100668 void vunmap(const void *addr)
100669 {
100670- BUG_ON(in_interrupt());
100671- might_sleep();
100672- if (addr)
100673+ if (!addr)
100674+ return;
100675+
100676+ if (unlikely(in_interrupt())) {
100677+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100678+ if (llist_add((struct llist_node *)addr, &p->list))
100679+ schedule_work(&p->wq);
100680+ } else {
100681+ might_sleep();
100682 __vunmap(addr, 0);
100683+ }
100684 }
100685 EXPORT_SYMBOL(vunmap);
100686
100687+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100688+void unmap_process_stacks(struct task_struct *task)
100689+{
100690+ if (unlikely(in_interrupt())) {
100691+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100692+ struct stack_deferred_llist *list = task->stack;
100693+ list->stack = task->stack;
100694+ list->lowmem_stack = task->lowmem_stack;
100695+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100696+ schedule_work(&p->wq);
100697+ } else {
100698+ __vunmap(task->stack, 0);
100699+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100700+ }
100701+}
100702+#endif
100703+
100704 /**
100705 * vmap - map an array of pages into virtually contiguous space
100706 * @pages: array of page pointers
100707@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100708 if (count > totalram_pages)
100709 return NULL;
100710
100711+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100712+ if (!(pgprot_val(prot) & _PAGE_NX))
100713+ flags |= VM_KERNEXEC;
100714+#endif
100715+
100716 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100717 __builtin_return_address(0));
100718 if (!area)
100719@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100720 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100721 goto fail;
100722
100723+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100724+ if (!(pgprot_val(prot) & _PAGE_NX))
100725+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100726+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100727+ else
100728+#endif
100729+
100730 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100731 start, end, node, gfp_mask, caller);
100732 if (!area)
100733@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100734 * For tight control over page level allocator and protection flags
100735 * use __vmalloc() instead.
100736 */
100737-
100738 void *vmalloc_exec(unsigned long size)
100739 {
100740- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100741+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100742 NUMA_NO_NODE, __builtin_return_address(0));
100743 }
100744
100745@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100746 {
100747 struct vm_struct *area;
100748
100749+ BUG_ON(vma->vm_mirror);
100750+
100751 size = PAGE_ALIGN(size);
100752
100753 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100754@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100755 v->addr, v->addr + v->size, v->size);
100756
100757 if (v->caller)
100758+#ifdef CONFIG_GRKERNSEC_HIDESYM
100759+ seq_printf(m, " %pK", v->caller);
100760+#else
100761 seq_printf(m, " %pS", v->caller);
100762+#endif
100763
100764 if (v->nr_pages)
100765 seq_printf(m, " pages=%d", v->nr_pages);
100766diff --git a/mm/vmstat.c b/mm/vmstat.c
100767index cdac773..7dd324e 100644
100768--- a/mm/vmstat.c
100769+++ b/mm/vmstat.c
100770@@ -24,6 +24,7 @@
100771 #include <linux/mm_inline.h>
100772 #include <linux/page_ext.h>
100773 #include <linux/page_owner.h>
100774+#include <linux/grsecurity.h>
100775
100776 #include "internal.h"
100777
100778@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100779 *
100780 * vm_stat contains the global counters
100781 */
100782-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100783+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100784 EXPORT_SYMBOL(vm_stat);
100785
100786 #ifdef CONFIG_SMP
100787@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100788
100789 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100790 if (diff[i]) {
100791- atomic_long_add(diff[i], &vm_stat[i]);
100792+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100793 changes++;
100794 }
100795 return changes;
100796@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100797 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100798 if (v) {
100799
100800- atomic_long_add(v, &zone->vm_stat[i]);
100801+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100802 global_diff[i] += v;
100803 #ifdef CONFIG_NUMA
100804 /* 3 seconds idle till flush */
100805@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100806
100807 v = p->vm_stat_diff[i];
100808 p->vm_stat_diff[i] = 0;
100809- atomic_long_add(v, &zone->vm_stat[i]);
100810+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100811 global_diff[i] += v;
100812 }
100813 }
100814@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100815 if (pset->vm_stat_diff[i]) {
100816 int v = pset->vm_stat_diff[i];
100817 pset->vm_stat_diff[i] = 0;
100818- atomic_long_add(v, &zone->vm_stat[i]);
100819- atomic_long_add(v, &vm_stat[i]);
100820+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100821+ atomic_long_add_unchecked(v, &vm_stat[i]);
100822 }
100823 }
100824 #endif
100825@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100826 stat_items_size += sizeof(struct vm_event_state);
100827 #endif
100828
100829- v = kmalloc(stat_items_size, GFP_KERNEL);
100830+ v = kzalloc(stat_items_size, GFP_KERNEL);
100831 m->private = v;
100832 if (!v)
100833 return ERR_PTR(-ENOMEM);
100834+
100835+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100836+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100837+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100838+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100839+ && !in_group_p(grsec_proc_gid)
100840+#endif
100841+ )
100842+ return (unsigned long *)m->private + *pos;
100843+#endif
100844+#endif
100845+
100846 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100847 v[i] = global_page_state(i);
100848 v += NR_VM_ZONE_STAT_ITEMS;
100849@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100850 cpu_notifier_register_done();
100851 #endif
100852 #ifdef CONFIG_PROC_FS
100853- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100854- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100855- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100856- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100857+ {
100858+ mode_t gr_mode = S_IRUGO;
100859+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100860+ gr_mode = S_IRUSR;
100861+#endif
100862+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100863+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100864+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100865+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100866+ }
100867 #endif
100868 return 0;
100869 }
100870diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100871index 64c6bed..b79a5de 100644
100872--- a/net/8021q/vlan.c
100873+++ b/net/8021q/vlan.c
100874@@ -481,7 +481,7 @@ out:
100875 return NOTIFY_DONE;
100876 }
100877
100878-static struct notifier_block vlan_notifier_block __read_mostly = {
100879+static struct notifier_block vlan_notifier_block = {
100880 .notifier_call = vlan_device_event,
100881 };
100882
100883@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100884 err = -EPERM;
100885 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100886 break;
100887- if ((args.u.name_type >= 0) &&
100888- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100889+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100890 struct vlan_net *vn;
100891
100892 vn = net_generic(net, vlan_net_id);
100893diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100894index 8ac8a5c..991defc 100644
100895--- a/net/8021q/vlan_netlink.c
100896+++ b/net/8021q/vlan_netlink.c
100897@@ -238,7 +238,7 @@ nla_put_failure:
100898 return -EMSGSIZE;
100899 }
100900
100901-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100902+struct rtnl_link_ops vlan_link_ops = {
100903 .kind = "vlan",
100904 .maxtype = IFLA_VLAN_MAX,
100905 .policy = vlan_policy,
100906diff --git a/net/9p/client.c b/net/9p/client.c
100907index e86a9bea..e91f70e 100644
100908--- a/net/9p/client.c
100909+++ b/net/9p/client.c
100910@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100911 len - inline_len);
100912 } else {
100913 err = copy_from_user(ename + inline_len,
100914- uidata, len - inline_len);
100915+ (char __force_user *)uidata, len - inline_len);
100916 if (err) {
100917 err = -EFAULT;
100918 goto out_err;
100919@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100920 kernel_buf = 1;
100921 indata = data;
100922 } else
100923- indata = (__force char *)udata;
100924+ indata = (__force_kernel char *)udata;
100925 /*
100926 * response header len is 11
100927 * PDU Header(7) + IO Size (4)
100928@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100929 kernel_buf = 1;
100930 odata = data;
100931 } else
100932- odata = (char *)udata;
100933+ odata = (char __force_kernel *)udata;
100934 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100935 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100936 fid->fid, offset, rsize);
100937diff --git a/net/9p/mod.c b/net/9p/mod.c
100938index 6ab36ae..6f1841b 100644
100939--- a/net/9p/mod.c
100940+++ b/net/9p/mod.c
100941@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100942 void v9fs_register_trans(struct p9_trans_module *m)
100943 {
100944 spin_lock(&v9fs_trans_lock);
100945- list_add_tail(&m->list, &v9fs_trans_list);
100946+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100947 spin_unlock(&v9fs_trans_lock);
100948 }
100949 EXPORT_SYMBOL(v9fs_register_trans);
100950@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100951 void v9fs_unregister_trans(struct p9_trans_module *m)
100952 {
100953 spin_lock(&v9fs_trans_lock);
100954- list_del_init(&m->list);
100955+ pax_list_del_init((struct list_head *)&m->list);
100956 spin_unlock(&v9fs_trans_lock);
100957 }
100958 EXPORT_SYMBOL(v9fs_unregister_trans);
100959diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100960index 80d08f6..de63fd1 100644
100961--- a/net/9p/trans_fd.c
100962+++ b/net/9p/trans_fd.c
100963@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100964 oldfs = get_fs();
100965 set_fs(get_ds());
100966 /* The cast to a user pointer is valid due to the set_fs() */
100967- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100968+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100969 set_fs(oldfs);
100970
100971 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100972diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100973index af46bc4..f9adfcd 100644
100974--- a/net/appletalk/atalk_proc.c
100975+++ b/net/appletalk/atalk_proc.c
100976@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100977 struct proc_dir_entry *p;
100978 int rc = -ENOMEM;
100979
100980- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100981+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100982 if (!atalk_proc_dir)
100983 goto out;
100984
100985diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100986index 876fbe8..8bbea9f 100644
100987--- a/net/atm/atm_misc.c
100988+++ b/net/atm/atm_misc.c
100989@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100990 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100991 return 1;
100992 atm_return(vcc, truesize);
100993- atomic_inc(&vcc->stats->rx_drop);
100994+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100995 return 0;
100996 }
100997 EXPORT_SYMBOL(atm_charge);
100998@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100999 }
101000 }
101001 atm_return(vcc, guess);
101002- atomic_inc(&vcc->stats->rx_drop);
101003+ atomic_inc_unchecked(&vcc->stats->rx_drop);
101004 return NULL;
101005 }
101006 EXPORT_SYMBOL(atm_alloc_charge);
101007@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
101008
101009 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101010 {
101011-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101012+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101013 __SONET_ITEMS
101014 #undef __HANDLE_ITEM
101015 }
101016@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
101017
101018 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
101019 {
101020-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101021+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
101022 __SONET_ITEMS
101023 #undef __HANDLE_ITEM
101024 }
101025diff --git a/net/atm/lec.c b/net/atm/lec.c
101026index 4b98f89..5a2f6cb 100644
101027--- a/net/atm/lec.c
101028+++ b/net/atm/lec.c
101029@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
101030 }
101031
101032 static struct lane2_ops lane2_ops = {
101033- lane2_resolve, /* resolve, spec 3.1.3 */
101034- lane2_associate_req, /* associate_req, spec 3.1.4 */
101035- NULL /* associate indicator, spec 3.1.5 */
101036+ .resolve = lane2_resolve,
101037+ .associate_req = lane2_associate_req,
101038+ .associate_indicator = NULL
101039 };
101040
101041 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
101042diff --git a/net/atm/lec.h b/net/atm/lec.h
101043index 4149db1..f2ab682 100644
101044--- a/net/atm/lec.h
101045+++ b/net/atm/lec.h
101046@@ -48,7 +48,7 @@ struct lane2_ops {
101047 const u8 *tlvs, u32 sizeoftlvs);
101048 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
101049 const u8 *tlvs, u32 sizeoftlvs);
101050-};
101051+} __no_const;
101052
101053 /*
101054 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
101055diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
101056index d1b2d9a..d549f7f 100644
101057--- a/net/atm/mpoa_caches.c
101058+++ b/net/atm/mpoa_caches.c
101059@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
101060
101061
101062 static struct in_cache_ops ingress_ops = {
101063- in_cache_add_entry, /* add_entry */
101064- in_cache_get, /* get */
101065- in_cache_get_with_mask, /* get_with_mask */
101066- in_cache_get_by_vcc, /* get_by_vcc */
101067- in_cache_put, /* put */
101068- in_cache_remove_entry, /* remove_entry */
101069- cache_hit, /* cache_hit */
101070- clear_count_and_expired, /* clear_count */
101071- check_resolving_entries, /* check_resolving */
101072- refresh_entries, /* refresh */
101073- in_destroy_cache /* destroy_cache */
101074+ .add_entry = in_cache_add_entry,
101075+ .get = in_cache_get,
101076+ .get_with_mask = in_cache_get_with_mask,
101077+ .get_by_vcc = in_cache_get_by_vcc,
101078+ .put = in_cache_put,
101079+ .remove_entry = in_cache_remove_entry,
101080+ .cache_hit = cache_hit,
101081+ .clear_count = clear_count_and_expired,
101082+ .check_resolving = check_resolving_entries,
101083+ .refresh = refresh_entries,
101084+ .destroy_cache = in_destroy_cache
101085 };
101086
101087 static struct eg_cache_ops egress_ops = {
101088- eg_cache_add_entry, /* add_entry */
101089- eg_cache_get_by_cache_id, /* get_by_cache_id */
101090- eg_cache_get_by_tag, /* get_by_tag */
101091- eg_cache_get_by_vcc, /* get_by_vcc */
101092- eg_cache_get_by_src_ip, /* get_by_src_ip */
101093- eg_cache_put, /* put */
101094- eg_cache_remove_entry, /* remove_entry */
101095- update_eg_cache_entry, /* update */
101096- clear_expired, /* clear_expired */
101097- eg_destroy_cache /* destroy_cache */
101098+ .add_entry = eg_cache_add_entry,
101099+ .get_by_cache_id = eg_cache_get_by_cache_id,
101100+ .get_by_tag = eg_cache_get_by_tag,
101101+ .get_by_vcc = eg_cache_get_by_vcc,
101102+ .get_by_src_ip = eg_cache_get_by_src_ip,
101103+ .put = eg_cache_put,
101104+ .remove_entry = eg_cache_remove_entry,
101105+ .update = update_eg_cache_entry,
101106+ .clear_expired = clear_expired,
101107+ .destroy_cache = eg_destroy_cache
101108 };
101109
101110
101111diff --git a/net/atm/proc.c b/net/atm/proc.c
101112index bbb6461..cf04016 100644
101113--- a/net/atm/proc.c
101114+++ b/net/atm/proc.c
101115@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
101116 const struct k_atm_aal_stats *stats)
101117 {
101118 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
101119- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
101120- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
101121- atomic_read(&stats->rx_drop));
101122+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
101123+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
101124+ atomic_read_unchecked(&stats->rx_drop));
101125 }
101126
101127 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
101128diff --git a/net/atm/resources.c b/net/atm/resources.c
101129index 0447d5d..3cf4728 100644
101130--- a/net/atm/resources.c
101131+++ b/net/atm/resources.c
101132@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
101133 static void copy_aal_stats(struct k_atm_aal_stats *from,
101134 struct atm_aal_stats *to)
101135 {
101136-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
101137+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
101138 __AAL_STAT_ITEMS
101139 #undef __HANDLE_ITEM
101140 }
101141@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
101142 static void subtract_aal_stats(struct k_atm_aal_stats *from,
101143 struct atm_aal_stats *to)
101144 {
101145-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
101146+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
101147 __AAL_STAT_ITEMS
101148 #undef __HANDLE_ITEM
101149 }
101150diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
101151index 919a5ce..cc6b444 100644
101152--- a/net/ax25/sysctl_net_ax25.c
101153+++ b/net/ax25/sysctl_net_ax25.c
101154@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
101155 {
101156 char path[sizeof("net/ax25/") + IFNAMSIZ];
101157 int k;
101158- struct ctl_table *table;
101159+ ctl_table_no_const *table;
101160
101161 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101162 if (!table)
101163diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101164index 1e80539..676c37a 100644
101165--- a/net/batman-adv/bat_iv_ogm.c
101166+++ b/net/batman-adv/bat_iv_ogm.c
101167@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101168
101169 /* randomize initial seqno to avoid collision */
101170 get_random_bytes(&random_seqno, sizeof(random_seqno));
101171- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101172+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101173
101174 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101175 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101176@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101177 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101178
101179 /* change sequence number to network order */
101180- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101181+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101182 batadv_ogm_packet->seqno = htonl(seqno);
101183- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101184+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101185
101186 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101187
101188@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101189 return;
101190
101191 /* could be changed by schedule_own_packet() */
101192- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101193+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101194
101195 if (ogm_packet->flags & BATADV_DIRECTLINK)
101196 has_directlink_flag = true;
101197diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101198index 00f9e14..e1c7203 100644
101199--- a/net/batman-adv/fragmentation.c
101200+++ b/net/batman-adv/fragmentation.c
101201@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101202 frag_header.packet_type = BATADV_UNICAST_FRAG;
101203 frag_header.version = BATADV_COMPAT_VERSION;
101204 frag_header.ttl = BATADV_TTL;
101205- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101206+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101207 frag_header.reserved = 0;
101208 frag_header.no = 0;
101209 frag_header.total_size = htons(skb->len);
101210diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101211index 5467955..75ad4e3 100644
101212--- a/net/batman-adv/soft-interface.c
101213+++ b/net/batman-adv/soft-interface.c
101214@@ -296,7 +296,7 @@ send:
101215 primary_if->net_dev->dev_addr);
101216
101217 /* set broadcast sequence number */
101218- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101219+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101220 bcast_packet->seqno = htonl(seqno);
101221
101222 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101223@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101224 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101225
101226 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101227- atomic_set(&bat_priv->bcast_seqno, 1);
101228+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101229 atomic_set(&bat_priv->tt.vn, 0);
101230 atomic_set(&bat_priv->tt.local_changes, 0);
101231 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101232@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101233
101234 /* randomize initial seqno to avoid collision */
101235 get_random_bytes(&random_seqno, sizeof(random_seqno));
101236- atomic_set(&bat_priv->frag_seqno, random_seqno);
101237+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101238
101239 bat_priv->primary_if = NULL;
101240 bat_priv->num_ifaces = 0;
101241@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
101242 return 0;
101243 }
101244
101245-struct rtnl_link_ops batadv_link_ops __read_mostly = {
101246+struct rtnl_link_ops batadv_link_ops = {
101247 .kind = "batadv",
101248 .priv_size = sizeof(struct batadv_priv),
101249 .setup = batadv_softif_init_early,
101250diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101251index 8854c05..ee5d5497 100644
101252--- a/net/batman-adv/types.h
101253+++ b/net/batman-adv/types.h
101254@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101255 struct batadv_hard_iface_bat_iv {
101256 unsigned char *ogm_buff;
101257 int ogm_buff_len;
101258- atomic_t ogm_seqno;
101259+ atomic_unchecked_t ogm_seqno;
101260 };
101261
101262 /**
101263@@ -768,7 +768,7 @@ struct batadv_priv {
101264 atomic_t bonding;
101265 atomic_t fragmentation;
101266 atomic_t packet_size_max;
101267- atomic_t frag_seqno;
101268+ atomic_unchecked_t frag_seqno;
101269 #ifdef CONFIG_BATMAN_ADV_BLA
101270 atomic_t bridge_loop_avoidance;
101271 #endif
101272@@ -787,7 +787,7 @@ struct batadv_priv {
101273 #endif
101274 uint32_t isolation_mark;
101275 uint32_t isolation_mark_mask;
101276- atomic_t bcast_seqno;
101277+ atomic_unchecked_t bcast_seqno;
101278 atomic_t bcast_queue_left;
101279 atomic_t batman_queue_left;
101280 char num_ifaces;
101281diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101282index 2c245fd..dccf543 100644
101283--- a/net/bluetooth/hci_sock.c
101284+++ b/net/bluetooth/hci_sock.c
101285@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101286 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101287 }
101288
101289- len = min_t(unsigned int, len, sizeof(uf));
101290+ len = min((size_t)len, sizeof(uf));
101291 if (copy_from_user(&uf, optval, len)) {
101292 err = -EFAULT;
101293 break;
101294diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101295index d04dc00..d25d576 100644
101296--- a/net/bluetooth/l2cap_core.c
101297+++ b/net/bluetooth/l2cap_core.c
101298@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101299 break;
101300
101301 case L2CAP_CONF_RFC:
101302- if (olen == sizeof(rfc))
101303- memcpy(&rfc, (void *)val, olen);
101304+ if (olen != sizeof(rfc))
101305+ break;
101306+
101307+ memcpy(&rfc, (void *)val, olen);
101308
101309 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101310 rfc.mode != chan->mode)
101311diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101312index f65caf4..c07110c 100644
101313--- a/net/bluetooth/l2cap_sock.c
101314+++ b/net/bluetooth/l2cap_sock.c
101315@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101316 struct sock *sk = sock->sk;
101317 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101318 struct l2cap_options opts;
101319- int len, err = 0;
101320+ int err = 0;
101321+ size_t len = optlen;
101322 u32 opt;
101323
101324 BT_DBG("sk %p", sk);
101325@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101326 opts.max_tx = chan->max_tx;
101327 opts.txwin_size = chan->tx_win;
101328
101329- len = min_t(unsigned int, sizeof(opts), optlen);
101330+ len = min(sizeof(opts), len);
101331 if (copy_from_user((char *) &opts, optval, len)) {
101332 err = -EFAULT;
101333 break;
101334@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101335 struct bt_security sec;
101336 struct bt_power pwr;
101337 struct l2cap_conn *conn;
101338- int len, err = 0;
101339+ int err = 0;
101340+ size_t len = optlen;
101341 u32 opt;
101342
101343 BT_DBG("sk %p", sk);
101344@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101345
101346 sec.level = BT_SECURITY_LOW;
101347
101348- len = min_t(unsigned int, sizeof(sec), optlen);
101349+ len = min(sizeof(sec), len);
101350 if (copy_from_user((char *) &sec, optval, len)) {
101351 err = -EFAULT;
101352 break;
101353@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101354
101355 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101356
101357- len = min_t(unsigned int, sizeof(pwr), optlen);
101358+ len = min(sizeof(pwr), len);
101359 if (copy_from_user((char *) &pwr, optval, len)) {
101360 err = -EFAULT;
101361 break;
101362diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101363index 2348176..b9b6cf2 100644
101364--- a/net/bluetooth/rfcomm/sock.c
101365+++ b/net/bluetooth/rfcomm/sock.c
101366@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101367 struct sock *sk = sock->sk;
101368 struct bt_security sec;
101369 int err = 0;
101370- size_t len;
101371+ size_t len = optlen;
101372 u32 opt;
101373
101374 BT_DBG("sk %p", sk);
101375@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101376
101377 sec.level = BT_SECURITY_LOW;
101378
101379- len = min_t(unsigned int, sizeof(sec), optlen);
101380+ len = min(sizeof(sec), len);
101381 if (copy_from_user((char *) &sec, optval, len)) {
101382 err = -EFAULT;
101383 break;
101384diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101385index 8e385a0..a5bdd8e 100644
101386--- a/net/bluetooth/rfcomm/tty.c
101387+++ b/net/bluetooth/rfcomm/tty.c
101388@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101389 BT_DBG("tty %p id %d", tty, tty->index);
101390
101391 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101392- dev->channel, dev->port.count);
101393+ dev->channel, atomic_read(&dev->port.count));
101394
101395 err = tty_port_open(&dev->port, tty, filp);
101396 if (err)
101397@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101398 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101399
101400 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101401- dev->port.count);
101402+ atomic_read(&dev->port.count));
101403
101404 tty_port_close(&dev->port, tty, filp);
101405 }
101406diff --git a/net/bridge/br.c b/net/bridge/br.c
101407index 44425af..4ee730e 100644
101408--- a/net/bridge/br.c
101409+++ b/net/bridge/br.c
101410@@ -147,6 +147,8 @@ static int __init br_init(void)
101411 {
101412 int err;
101413
101414+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101415+
101416 err = stp_proto_register(&br_stp_proto);
101417 if (err < 0) {
101418 pr_err("bridge: can't register sap for STP\n");
101419diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101420index 9f5eb55..45ab9c5 100644
101421--- a/net/bridge/br_netlink.c
101422+++ b/net/bridge/br_netlink.c
101423@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101424 .get_link_af_size = br_get_link_af_size,
101425 };
101426
101427-struct rtnl_link_ops br_link_ops __read_mostly = {
101428+struct rtnl_link_ops br_link_ops = {
101429 .kind = "bridge",
101430 .priv_size = sizeof(struct net_bridge),
101431 .setup = br_dev_setup,
101432diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101433index d9a8c05..8dadc6c6 100644
101434--- a/net/bridge/netfilter/ebtables.c
101435+++ b/net/bridge/netfilter/ebtables.c
101436@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101437 tmp.valid_hooks = t->table->valid_hooks;
101438 }
101439 mutex_unlock(&ebt_mutex);
101440- if (copy_to_user(user, &tmp, *len) != 0) {
101441+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101442 BUGPRINT("c2u Didn't work\n");
101443 ret = -EFAULT;
101444 break;
101445@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101446 goto out;
101447 tmp.valid_hooks = t->valid_hooks;
101448
101449- if (copy_to_user(user, &tmp, *len) != 0) {
101450+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101451 ret = -EFAULT;
101452 break;
101453 }
101454@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101455 tmp.entries_size = t->table->entries_size;
101456 tmp.valid_hooks = t->table->valid_hooks;
101457
101458- if (copy_to_user(user, &tmp, *len) != 0) {
101459+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101460 ret = -EFAULT;
101461 break;
101462 }
101463diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101464index f5afda1..dcf770a 100644
101465--- a/net/caif/cfctrl.c
101466+++ b/net/caif/cfctrl.c
101467@@ -10,6 +10,7 @@
101468 #include <linux/spinlock.h>
101469 #include <linux/slab.h>
101470 #include <linux/pkt_sched.h>
101471+#include <linux/sched.h>
101472 #include <net/caif/caif_layer.h>
101473 #include <net/caif/cfpkt.h>
101474 #include <net/caif/cfctrl.h>
101475@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101476 memset(&dev_info, 0, sizeof(dev_info));
101477 dev_info.id = 0xff;
101478 cfsrvl_init(&this->serv, 0, &dev_info, false);
101479- atomic_set(&this->req_seq_no, 1);
101480- atomic_set(&this->rsp_seq_no, 1);
101481+ atomic_set_unchecked(&this->req_seq_no, 1);
101482+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101483 this->serv.layer.receive = cfctrl_recv;
101484 sprintf(this->serv.layer.name, "ctrl");
101485 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101486@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101487 struct cfctrl_request_info *req)
101488 {
101489 spin_lock_bh(&ctrl->info_list_lock);
101490- atomic_inc(&ctrl->req_seq_no);
101491- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101492+ atomic_inc_unchecked(&ctrl->req_seq_no);
101493+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101494 list_add_tail(&req->list, &ctrl->list);
101495 spin_unlock_bh(&ctrl->info_list_lock);
101496 }
101497@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101498 if (p != first)
101499 pr_warn("Requests are not received in order\n");
101500
101501- atomic_set(&ctrl->rsp_seq_no,
101502+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101503 p->sequence_no);
101504 list_del(&p->list);
101505 goto out;
101506diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101507index 67a4a36..8d28068 100644
101508--- a/net/caif/chnl_net.c
101509+++ b/net/caif/chnl_net.c
101510@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101511 };
101512
101513
101514-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101515+static struct rtnl_link_ops ipcaif_link_ops = {
101516 .kind = "caif",
101517 .priv_size = sizeof(struct chnl_net),
101518 .setup = ipcaif_net_setup,
101519diff --git a/net/can/af_can.c b/net/can/af_can.c
101520index 32d710e..93bcf05 100644
101521--- a/net/can/af_can.c
101522+++ b/net/can/af_can.c
101523@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101524 };
101525
101526 /* notifier block for netdevice event */
101527-static struct notifier_block can_netdev_notifier __read_mostly = {
101528+static struct notifier_block can_netdev_notifier = {
101529 .notifier_call = can_notifier,
101530 };
101531
101532diff --git a/net/can/bcm.c b/net/can/bcm.c
101533index ee9ffd9..dfdf3d4 100644
101534--- a/net/can/bcm.c
101535+++ b/net/can/bcm.c
101536@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101537 }
101538
101539 /* create /proc/net/can-bcm directory */
101540- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101541+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101542 return 0;
101543 }
101544
101545diff --git a/net/can/gw.c b/net/can/gw.c
101546index 295f62e..0c3b09e 100644
101547--- a/net/can/gw.c
101548+++ b/net/can/gw.c
101549@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101550 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101551
101552 static HLIST_HEAD(cgw_list);
101553-static struct notifier_block notifier;
101554
101555 static struct kmem_cache *cgw_cache __read_mostly;
101556
101557@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101558 return err;
101559 }
101560
101561+static struct notifier_block notifier = {
101562+ .notifier_call = cgw_notifier
101563+};
101564+
101565 static __init int cgw_module_init(void)
101566 {
101567 /* sanitize given module parameter */
101568@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101569 return -ENOMEM;
101570
101571 /* set notifier */
101572- notifier.notifier_call = cgw_notifier;
101573 register_netdevice_notifier(&notifier);
101574
101575 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101576diff --git a/net/can/proc.c b/net/can/proc.c
101577index 1a19b98..df2b4ec 100644
101578--- a/net/can/proc.c
101579+++ b/net/can/proc.c
101580@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101581 void can_init_proc(void)
101582 {
101583 /* create /proc/net/can directory */
101584- can_dir = proc_mkdir("can", init_net.proc_net);
101585+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101586
101587 if (!can_dir) {
101588 printk(KERN_INFO "can: failed to create /proc/net/can . "
101589diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101590index 74d30ec..25df678 100644
101591--- a/net/ceph/messenger.c
101592+++ b/net/ceph/messenger.c
101593@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101594 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101595
101596 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101597-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101598+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101599
101600 static struct page *zero_page; /* used in certain error cases */
101601
101602@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101603 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101604 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101605
101606- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101607+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101608 s = addr_str[i];
101609
101610 switch (ss->ss_family) {
101611diff --git a/net/compat.c b/net/compat.c
101612index f7bd286..76ea56a 100644
101613--- a/net/compat.c
101614+++ b/net/compat.c
101615@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101616
101617 #define CMSG_COMPAT_FIRSTHDR(msg) \
101618 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101619- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101620+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101621 (struct compat_cmsghdr __user *)NULL)
101622
101623 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101624 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101625 (ucmlen) <= (unsigned long) \
101626 ((mhdr)->msg_controllen - \
101627- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101628+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101629
101630 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101631 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101632 {
101633 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101634- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101635+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101636 msg->msg_controllen)
101637 return NULL;
101638 return (struct compat_cmsghdr __user *)ptr;
101639@@ -203,7 +203,7 @@ Efault:
101640
101641 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101642 {
101643- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101644+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101645 struct compat_cmsghdr cmhdr;
101646 struct compat_timeval ctv;
101647 struct compat_timespec cts[3];
101648@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101649
101650 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101651 {
101652- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101653+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101654 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101655 int fdnum = scm->fp->count;
101656 struct file **fp = scm->fp->fp;
101657@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101658 return -EFAULT;
101659 old_fs = get_fs();
101660 set_fs(KERNEL_DS);
101661- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101662+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101663 set_fs(old_fs);
101664
101665 return err;
101666@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101667 len = sizeof(ktime);
101668 old_fs = get_fs();
101669 set_fs(KERNEL_DS);
101670- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101671+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101672 set_fs(old_fs);
101673
101674 if (!err) {
101675@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101676 case MCAST_JOIN_GROUP:
101677 case MCAST_LEAVE_GROUP:
101678 {
101679- struct compat_group_req __user *gr32 = (void *)optval;
101680+ struct compat_group_req __user *gr32 = (void __user *)optval;
101681 struct group_req __user *kgr =
101682 compat_alloc_user_space(sizeof(struct group_req));
101683 u32 interface;
101684@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101685 case MCAST_BLOCK_SOURCE:
101686 case MCAST_UNBLOCK_SOURCE:
101687 {
101688- struct compat_group_source_req __user *gsr32 = (void *)optval;
101689+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101690 struct group_source_req __user *kgsr = compat_alloc_user_space(
101691 sizeof(struct group_source_req));
101692 u32 interface;
101693@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101694 }
101695 case MCAST_MSFILTER:
101696 {
101697- struct compat_group_filter __user *gf32 = (void *)optval;
101698+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101699 struct group_filter __user *kgf;
101700 u32 interface, fmode, numsrc;
101701
101702@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101703 char __user *optval, int __user *optlen,
101704 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101705 {
101706- struct compat_group_filter __user *gf32 = (void *)optval;
101707+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101708 struct group_filter __user *kgf;
101709 int __user *koptlen;
101710 u32 interface, fmode, numsrc;
101711@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101712
101713 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101714 return -EINVAL;
101715- if (copy_from_user(a, args, nas[call]))
101716+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101717 return -EFAULT;
101718 a0 = a[0];
101719 a1 = a[1];
101720diff --git a/net/core/datagram.c b/net/core/datagram.c
101721index df493d6..1145766 100644
101722--- a/net/core/datagram.c
101723+++ b/net/core/datagram.c
101724@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101725 }
101726
101727 kfree_skb(skb);
101728- atomic_inc(&sk->sk_drops);
101729+ atomic_inc_unchecked(&sk->sk_drops);
101730 sk_mem_reclaim_partial(sk);
101731
101732 return err;
101733diff --git a/net/core/dev.c b/net/core/dev.c
101734index 4ff46f8..e877e78 100644
101735--- a/net/core/dev.c
101736+++ b/net/core/dev.c
101737@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101738 {
101739 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101740 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101741- atomic_long_inc(&dev->rx_dropped);
101742+ atomic_long_inc_unchecked(&dev->rx_dropped);
101743 kfree_skb(skb);
101744 return NET_RX_DROP;
101745 }
101746 }
101747
101748 if (unlikely(!is_skb_forwardable(dev, skb))) {
101749- atomic_long_inc(&dev->rx_dropped);
101750+ atomic_long_inc_unchecked(&dev->rx_dropped);
101751 kfree_skb(skb);
101752 return NET_RX_DROP;
101753 }
101754@@ -2958,7 +2958,7 @@ recursion_alert:
101755 drop:
101756 rcu_read_unlock_bh();
101757
101758- atomic_long_inc(&dev->tx_dropped);
101759+ atomic_long_inc_unchecked(&dev->tx_dropped);
101760 kfree_skb_list(skb);
101761 return rc;
101762 out:
101763@@ -3301,7 +3301,7 @@ enqueue:
101764
101765 local_irq_restore(flags);
101766
101767- atomic_long_inc(&skb->dev->rx_dropped);
101768+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101769 kfree_skb(skb);
101770 return NET_RX_DROP;
101771 }
101772@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101773 }
101774 EXPORT_SYMBOL(netif_rx_ni);
101775
101776-static void net_tx_action(struct softirq_action *h)
101777+static __latent_entropy void net_tx_action(void)
101778 {
101779 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101780
101781@@ -3711,7 +3711,7 @@ ncls:
101782 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101783 } else {
101784 drop:
101785- atomic_long_inc(&skb->dev->rx_dropped);
101786+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101787 kfree_skb(skb);
101788 /* Jamal, now you will not able to escape explaining
101789 * me how you were going to use this. :-)
101790@@ -4599,7 +4599,7 @@ out_unlock:
101791 return work;
101792 }
101793
101794-static void net_rx_action(struct softirq_action *h)
101795+static __latent_entropy void net_rx_action(void)
101796 {
101797 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101798 unsigned long time_limit = jiffies + 2;
101799@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101800 } else {
101801 netdev_stats_to_stats64(storage, &dev->stats);
101802 }
101803- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101804- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101805+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101806+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101807 return storage;
101808 }
101809 EXPORT_SYMBOL(dev_get_stats);
101810diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101811index b94b1d2..da3ed7c 100644
101812--- a/net/core/dev_ioctl.c
101813+++ b/net/core/dev_ioctl.c
101814@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101815 no_module = !dev;
101816 if (no_module && capable(CAP_NET_ADMIN))
101817 no_module = request_module("netdev-%s", name);
101818- if (no_module && capable(CAP_SYS_MODULE))
101819+ if (no_module && capable(CAP_SYS_MODULE)) {
101820+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101821+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101822+#else
101823 request_module("%s", name);
101824+#endif
101825+ }
101826 }
101827 EXPORT_SYMBOL(dev_load);
101828
101829diff --git a/net/core/filter.c b/net/core/filter.c
101830index ec9baea..dd6195d 100644
101831--- a/net/core/filter.c
101832+++ b/net/core/filter.c
101833@@ -533,7 +533,11 @@ do_pass:
101834
101835 /* Unkown instruction. */
101836 default:
101837- goto err;
101838+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101839+ fp->code, fp->jt, fp->jf, fp->k);
101840+ kfree(addrs);
101841+ BUG();
101842+ return -EINVAL;
101843 }
101844
101845 insn++;
101846@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101847 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101848 int pc, ret = 0;
101849
101850- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101851+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101852
101853 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101854 if (!masks)
101855@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101856 if (!fp)
101857 return -ENOMEM;
101858
101859- memcpy(fp->insns, fprog->filter, fsize);
101860+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101861
101862 fp->len = fprog->len;
101863 /* Since unattached filters are not copied back to user
101864diff --git a/net/core/flow.c b/net/core/flow.c
101865index 1033725..340f65d 100644
101866--- a/net/core/flow.c
101867+++ b/net/core/flow.c
101868@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101869 static int flow_entry_valid(struct flow_cache_entry *fle,
101870 struct netns_xfrm *xfrm)
101871 {
101872- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101873+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101874 return 0;
101875 if (fle->object && !fle->object->ops->check(fle->object))
101876 return 0;
101877@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101878 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101879 fcp->hash_count++;
101880 }
101881- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101882+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101883 flo = fle->object;
101884 if (!flo)
101885 goto ret_object;
101886@@ -263,7 +263,7 @@ nocache:
101887 }
101888 flo = resolver(net, key, family, dir, flo, ctx);
101889 if (fle) {
101890- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101891+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101892 if (!IS_ERR(flo))
101893 fle->object = flo;
101894 else
101895diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101896index 8d614c9..55752ea 100644
101897--- a/net/core/neighbour.c
101898+++ b/net/core/neighbour.c
101899@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101900 void __user *buffer, size_t *lenp, loff_t *ppos)
101901 {
101902 int size, ret;
101903- struct ctl_table tmp = *ctl;
101904+ ctl_table_no_const tmp = *ctl;
101905
101906 tmp.extra1 = &zero;
101907 tmp.extra2 = &unres_qlen_max;
101908@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101909 void __user *buffer,
101910 size_t *lenp, loff_t *ppos)
101911 {
101912- struct ctl_table tmp = *ctl;
101913+ ctl_table_no_const tmp = *ctl;
101914 int ret;
101915
101916 tmp.extra1 = &zero;
101917diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101918index 2bf8329..2eb1423 100644
101919--- a/net/core/net-procfs.c
101920+++ b/net/core/net-procfs.c
101921@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101922 struct rtnl_link_stats64 temp;
101923 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101924
101925- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101926+ if (gr_proc_is_restricted())
101927+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101928+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101929+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101930+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101931+ else
101932+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101933 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101934 dev->name, stats->rx_bytes, stats->rx_packets,
101935 stats->rx_errors,
101936@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101937 return 0;
101938 }
101939
101940-static const struct seq_operations dev_seq_ops = {
101941+const struct seq_operations dev_seq_ops = {
101942 .start = dev_seq_start,
101943 .next = dev_seq_next,
101944 .stop = dev_seq_stop,
101945@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101946
101947 static int softnet_seq_open(struct inode *inode, struct file *file)
101948 {
101949- return seq_open(file, &softnet_seq_ops);
101950+ return seq_open_restrict(file, &softnet_seq_ops);
101951 }
101952
101953 static const struct file_operations softnet_seq_fops = {
101954@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101955 else
101956 seq_printf(seq, "%04x", ntohs(pt->type));
101957
101958+#ifdef CONFIG_GRKERNSEC_HIDESYM
101959+ seq_printf(seq, " %-8s %pf\n",
101960+ pt->dev ? pt->dev->name : "", NULL);
101961+#else
101962 seq_printf(seq, " %-8s %pf\n",
101963 pt->dev ? pt->dev->name : "", pt->func);
101964+#endif
101965 }
101966
101967 return 0;
101968diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101969index 9993412..2a4672b 100644
101970--- a/net/core/net-sysfs.c
101971+++ b/net/core/net-sysfs.c
101972@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101973 {
101974 struct net_device *netdev = to_net_dev(dev);
101975 return sprintf(buf, fmt_dec,
101976- atomic_read(&netdev->carrier_changes));
101977+ atomic_read_unchecked(&netdev->carrier_changes));
101978 }
101979 static DEVICE_ATTR_RO(carrier_changes);
101980
101981diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101982index ce780c7..6d296b3 100644
101983--- a/net/core/net_namespace.c
101984+++ b/net/core/net_namespace.c
101985@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101986 int error;
101987 LIST_HEAD(net_exit_list);
101988
101989- list_add_tail(&ops->list, list);
101990+ pax_list_add_tail((struct list_head *)&ops->list, list);
101991 if (ops->init || (ops->id && ops->size)) {
101992 for_each_net(net) {
101993 error = ops_init(ops, net);
101994@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101995
101996 out_undo:
101997 /* If I have an error cleanup all namespaces I initialized */
101998- list_del(&ops->list);
101999+ pax_list_del((struct list_head *)&ops->list);
102000 ops_exit_list(ops, &net_exit_list);
102001 ops_free_list(ops, &net_exit_list);
102002 return error;
102003@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
102004 struct net *net;
102005 LIST_HEAD(net_exit_list);
102006
102007- list_del(&ops->list);
102008+ pax_list_del((struct list_head *)&ops->list);
102009 for_each_net(net)
102010 list_add_tail(&net->exit_list, &net_exit_list);
102011 ops_exit_list(ops, &net_exit_list);
102012@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
102013 mutex_lock(&net_mutex);
102014 error = register_pernet_operations(&pernet_list, ops);
102015 if (!error && (first_device == &pernet_list))
102016- first_device = &ops->list;
102017+ first_device = (struct list_head *)&ops->list;
102018 mutex_unlock(&net_mutex);
102019 return error;
102020 }
102021diff --git a/net/core/netpoll.c b/net/core/netpoll.c
102022index e0ad5d1..04fa7f7 100644
102023--- a/net/core/netpoll.c
102024+++ b/net/core/netpoll.c
102025@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102026 struct udphdr *udph;
102027 struct iphdr *iph;
102028 struct ethhdr *eth;
102029- static atomic_t ip_ident;
102030+ static atomic_unchecked_t ip_ident;
102031 struct ipv6hdr *ip6h;
102032
102033 udp_len = len + sizeof(*udph);
102034@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
102035 put_unaligned(0x45, (unsigned char *)iph);
102036 iph->tos = 0;
102037 put_unaligned(htons(ip_len), &(iph->tot_len));
102038- iph->id = htons(atomic_inc_return(&ip_ident));
102039+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
102040 iph->frag_off = 0;
102041 iph->ttl = 64;
102042 iph->protocol = IPPROTO_UDP;
102043diff --git a/net/core/pktgen.c b/net/core/pktgen.c
102044index 352d183..1bddfaf 100644
102045--- a/net/core/pktgen.c
102046+++ b/net/core/pktgen.c
102047@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
102048 pn->net = net;
102049 INIT_LIST_HEAD(&pn->pktgen_threads);
102050 pn->pktgen_exiting = false;
102051- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
102052+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
102053 if (!pn->proc_dir) {
102054 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
102055 return -ENODEV;
102056diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
102057index 76ec6c5..9cfb81c 100644
102058--- a/net/core/rtnetlink.c
102059+++ b/net/core/rtnetlink.c
102060@@ -60,7 +60,7 @@ struct rtnl_link {
102061 rtnl_doit_func doit;
102062 rtnl_dumpit_func dumpit;
102063 rtnl_calcit_func calcit;
102064-};
102065+} __no_const;
102066
102067 static DEFINE_MUTEX(rtnl_mutex);
102068
102069@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
102070 * to use the ops for creating device. So do not
102071 * fill up dellink as well. That disables rtnl_dellink.
102072 */
102073- if (ops->setup && !ops->dellink)
102074- ops->dellink = unregister_netdevice_queue;
102075+ if (ops->setup && !ops->dellink) {
102076+ pax_open_kernel();
102077+ *(void **)&ops->dellink = unregister_netdevice_queue;
102078+ pax_close_kernel();
102079+ }
102080
102081- list_add_tail(&ops->list, &link_ops);
102082+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
102083 return 0;
102084 }
102085 EXPORT_SYMBOL_GPL(__rtnl_link_register);
102086@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
102087 for_each_net(net) {
102088 __rtnl_kill_links(net, ops);
102089 }
102090- list_del(&ops->list);
102091+ pax_list_del((struct list_head *)&ops->list);
102092 }
102093 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
102094
102095@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
102096 (dev->ifalias &&
102097 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
102098 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
102099- atomic_read(&dev->carrier_changes)))
102100+ atomic_read_unchecked(&dev->carrier_changes)))
102101 goto nla_put_failure;
102102
102103 if (1) {
102104@@ -2094,6 +2097,10 @@ replay:
102105 if (IS_ERR(dest_net))
102106 return PTR_ERR(dest_net);
102107
102108+ err = -EPERM;
102109+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
102110+ goto out;
102111+
102112 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
102113 if (IS_ERR(dev)) {
102114 err = PTR_ERR(dev);
102115diff --git a/net/core/scm.c b/net/core/scm.c
102116index 3b6899b..cf36238 100644
102117--- a/net/core/scm.c
102118+++ b/net/core/scm.c
102119@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
102120 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102121 {
102122 struct cmsghdr __user *cm
102123- = (__force struct cmsghdr __user *)msg->msg_control;
102124+ = (struct cmsghdr __force_user *)msg->msg_control;
102125 struct cmsghdr cmhdr;
102126 int cmlen = CMSG_LEN(len);
102127 int err;
102128@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
102129 err = -EFAULT;
102130 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
102131 goto out;
102132- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
102133+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
102134 goto out;
102135 cmlen = CMSG_SPACE(len);
102136 if (msg->msg_controllen < cmlen)
102137@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
102138 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102139 {
102140 struct cmsghdr __user *cm
102141- = (__force struct cmsghdr __user*)msg->msg_control;
102142+ = (struct cmsghdr __force_user *)msg->msg_control;
102143
102144 int fdmax = 0;
102145 int fdnum = scm->fp->count;
102146@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
102147 if (fdnum < fdmax)
102148 fdmax = fdnum;
102149
102150- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
102151+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
102152 i++, cmfptr++)
102153 {
102154 struct socket *sock;
102155diff --git a/net/core/skbuff.c b/net/core/skbuff.c
102156index 62c67be..361c354 100644
102157--- a/net/core/skbuff.c
102158+++ b/net/core/skbuff.c
102159@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
102160 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102161 int len, __wsum csum)
102162 {
102163- const struct skb_checksum_ops ops = {
102164+ static const struct skb_checksum_ops ops = {
102165 .update = csum_partial_ext,
102166 .combine = csum_block_add_ext,
102167 };
102168@@ -3363,12 +3363,14 @@ void __init skb_init(void)
102169 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102170 sizeof(struct sk_buff),
102171 0,
102172- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102173+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102174+ SLAB_NO_SANITIZE,
102175 NULL);
102176 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102177 sizeof(struct sk_buff_fclones),
102178 0,
102179- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102180+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102181+ SLAB_NO_SANITIZE,
102182 NULL);
102183 }
102184
102185@@ -4141,18 +4143,20 @@ EXPORT_SYMBOL(skb_try_coalesce);
102186 */
102187 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
102188 {
102189- if (xnet)
102190- skb_orphan(skb);
102191 skb->tstamp.tv64 = 0;
102192 skb->pkt_type = PACKET_HOST;
102193 skb->skb_iif = 0;
102194 skb->ignore_df = 0;
102195 skb_dst_drop(skb);
102196- skb->mark = 0;
102197- skb_init_secmark(skb);
102198 secpath_reset(skb);
102199 nf_reset(skb);
102200 nf_reset_trace(skb);
102201+
102202+ if (!xnet)
102203+ return;
102204+
102205+ skb_orphan(skb);
102206+ skb->mark = 0;
102207 }
102208 EXPORT_SYMBOL_GPL(skb_scrub_packet);
102209
102210diff --git a/net/core/sock.c b/net/core/sock.c
102211index 1c7a33d..a3817e2 100644
102212--- a/net/core/sock.c
102213+++ b/net/core/sock.c
102214@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102215 struct sk_buff_head *list = &sk->sk_receive_queue;
102216
102217 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102218- atomic_inc(&sk->sk_drops);
102219+ atomic_inc_unchecked(&sk->sk_drops);
102220 trace_sock_rcvqueue_full(sk, skb);
102221 return -ENOMEM;
102222 }
102223@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102224 return err;
102225
102226 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102227- atomic_inc(&sk->sk_drops);
102228+ atomic_inc_unchecked(&sk->sk_drops);
102229 return -ENOBUFS;
102230 }
102231
102232@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102233 skb_dst_force(skb);
102234
102235 spin_lock_irqsave(&list->lock, flags);
102236- skb->dropcount = atomic_read(&sk->sk_drops);
102237+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102238 __skb_queue_tail(list, skb);
102239 spin_unlock_irqrestore(&list->lock, flags);
102240
102241@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102242 skb->dev = NULL;
102243
102244 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102245- atomic_inc(&sk->sk_drops);
102246+ atomic_inc_unchecked(&sk->sk_drops);
102247 goto discard_and_relse;
102248 }
102249 if (nested)
102250@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102251 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102252 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102253 bh_unlock_sock(sk);
102254- atomic_inc(&sk->sk_drops);
102255+ atomic_inc_unchecked(&sk->sk_drops);
102256 goto discard_and_relse;
102257 }
102258
102259@@ -888,6 +888,7 @@ set_rcvbuf:
102260 }
102261 break;
102262
102263+#ifndef GRKERNSEC_BPF_HARDEN
102264 case SO_ATTACH_BPF:
102265 ret = -EINVAL;
102266 if (optlen == sizeof(u32)) {
102267@@ -900,7 +901,7 @@ set_rcvbuf:
102268 ret = sk_attach_bpf(ufd, sk);
102269 }
102270 break;
102271-
102272+#endif
102273 case SO_DETACH_FILTER:
102274 ret = sk_detach_filter(sk);
102275 break;
102276@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102277 struct timeval tm;
102278 } v;
102279
102280- int lv = sizeof(int);
102281- int len;
102282+ unsigned int lv = sizeof(int);
102283+ unsigned int len;
102284
102285 if (get_user(len, optlen))
102286 return -EFAULT;
102287- if (len < 0)
102288+ if (len > INT_MAX)
102289 return -EINVAL;
102290
102291 memset(&v, 0, sizeof(v));
102292@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102293
102294 case SO_PEERNAME:
102295 {
102296- char address[128];
102297+ char address[_K_SS_MAXSIZE];
102298
102299 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102300 return -ENOTCONN;
102301- if (lv < len)
102302+ if (lv < len || sizeof address < len)
102303 return -EINVAL;
102304 if (copy_to_user(optval, address, len))
102305 return -EFAULT;
102306@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102307
102308 if (len > lv)
102309 len = lv;
102310- if (copy_to_user(optval, &v, len))
102311+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102312 return -EFAULT;
102313 lenout:
102314 if (put_user(len, optlen))
102315@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102316 */
102317 smp_wmb();
102318 atomic_set(&sk->sk_refcnt, 1);
102319- atomic_set(&sk->sk_drops, 0);
102320+ atomic_set_unchecked(&sk->sk_drops, 0);
102321 }
102322 EXPORT_SYMBOL(sock_init_data);
102323
102324@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102325 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102326 int level, int type)
102327 {
102328+ struct sock_extended_err ee;
102329 struct sock_exterr_skb *serr;
102330 struct sk_buff *skb;
102331 int copied, err;
102332@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102333 sock_recv_timestamp(msg, sk, skb);
102334
102335 serr = SKB_EXT_ERR(skb);
102336- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102337+ ee = serr->ee;
102338+ put_cmsg(msg, level, type, sizeof ee, &ee);
102339
102340 msg->msg_flags |= MSG_ERRQUEUE;
102341 err = copied;
102342diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102343index ad704c7..ca48aff 100644
102344--- a/net/core/sock_diag.c
102345+++ b/net/core/sock_diag.c
102346@@ -9,26 +9,33 @@
102347 #include <linux/inet_diag.h>
102348 #include <linux/sock_diag.h>
102349
102350-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102351+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102352 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102353 static DEFINE_MUTEX(sock_diag_table_mutex);
102354
102355 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102356 {
102357+#ifndef CONFIG_GRKERNSEC_HIDESYM
102358 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102359 cookie[1] != INET_DIAG_NOCOOKIE) &&
102360 ((u32)(unsigned long)sk != cookie[0] ||
102361 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102362 return -ESTALE;
102363 else
102364+#endif
102365 return 0;
102366 }
102367 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102368
102369 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102370 {
102371+#ifdef CONFIG_GRKERNSEC_HIDESYM
102372+ cookie[0] = 0;
102373+ cookie[1] = 0;
102374+#else
102375 cookie[0] = (u32)(unsigned long)sk;
102376 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102377+#endif
102378 }
102379 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102380
102381@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102382 mutex_lock(&sock_diag_table_mutex);
102383 if (sock_diag_handlers[hndl->family])
102384 err = -EBUSY;
102385- else
102386+ else {
102387+ pax_open_kernel();
102388 sock_diag_handlers[hndl->family] = hndl;
102389+ pax_close_kernel();
102390+ }
102391 mutex_unlock(&sock_diag_table_mutex);
102392
102393 return err;
102394@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102395
102396 mutex_lock(&sock_diag_table_mutex);
102397 BUG_ON(sock_diag_handlers[family] != hnld);
102398+ pax_open_kernel();
102399 sock_diag_handlers[family] = NULL;
102400+ pax_close_kernel();
102401 mutex_unlock(&sock_diag_table_mutex);
102402 }
102403 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102404diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102405index bbb1d5a..754e2e5 100644
102406--- a/net/core/sysctl_net_core.c
102407+++ b/net/core/sysctl_net_core.c
102408@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102409 {
102410 unsigned int orig_size, size;
102411 int ret, i;
102412- struct ctl_table tmp = {
102413+ ctl_table_no_const tmp = {
102414 .data = &size,
102415 .maxlen = sizeof(size),
102416 .mode = table->mode
102417@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102418 void __user *buffer, size_t *lenp, loff_t *ppos)
102419 {
102420 char id[IFNAMSIZ];
102421- struct ctl_table tbl = {
102422+ ctl_table_no_const tbl = {
102423 .data = id,
102424 .maxlen = IFNAMSIZ,
102425 };
102426@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102427 static int proc_do_rss_key(struct ctl_table *table, int write,
102428 void __user *buffer, size_t *lenp, loff_t *ppos)
102429 {
102430- struct ctl_table fake_table;
102431+ ctl_table_no_const fake_table;
102432 char buf[NETDEV_RSS_KEY_LEN * 3];
102433
102434 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102435@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102436 .mode = 0444,
102437 .proc_handler = proc_do_rss_key,
102438 },
102439-#ifdef CONFIG_BPF_JIT
102440+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102441 {
102442 .procname = "bpf_jit_enable",
102443 .data = &bpf_jit_enable,
102444@@ -402,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
102445
102446 static __net_init int sysctl_core_net_init(struct net *net)
102447 {
102448- struct ctl_table *tbl;
102449+ ctl_table_no_const *tbl = NULL;
102450
102451 net->core.sysctl_somaxconn = SOMAXCONN;
102452
102453- tbl = netns_core_table;
102454 if (!net_eq(net, &init_net)) {
102455- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102456+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102457 if (tbl == NULL)
102458 goto err_dup;
102459
102460@@ -418,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102461 if (net->user_ns != &init_user_ns) {
102462 tbl[0].procname = NULL;
102463 }
102464- }
102465-
102466- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102467+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102468+ } else
102469+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102470 if (net->core.sysctl_hdr == NULL)
102471 goto err_reg;
102472
102473 return 0;
102474
102475 err_reg:
102476- if (tbl != netns_core_table)
102477- kfree(tbl);
102478+ kfree(tbl);
102479 err_dup:
102480 return -ENOMEM;
102481 }
102482@@ -443,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102483 kfree(tbl);
102484 }
102485
102486-static __net_initdata struct pernet_operations sysctl_core_ops = {
102487+static __net_initconst struct pernet_operations sysctl_core_ops = {
102488 .init = sysctl_core_net_init,
102489 .exit = sysctl_core_net_exit,
102490 };
102491diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102492index 8102286..a0c2755 100644
102493--- a/net/decnet/af_decnet.c
102494+++ b/net/decnet/af_decnet.c
102495@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102496 .sysctl_rmem = sysctl_decnet_rmem,
102497 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102498 .obj_size = sizeof(struct dn_sock),
102499+ .slab_flags = SLAB_USERCOPY,
102500 };
102501
102502 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102503diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102504index 4400da7..3429972 100644
102505--- a/net/decnet/dn_dev.c
102506+++ b/net/decnet/dn_dev.c
102507@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102508 .extra1 = &min_t3,
102509 .extra2 = &max_t3
102510 },
102511- {0}
102512+ { }
102513 },
102514 };
102515
102516diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102517index 5325b54..a0d4d69 100644
102518--- a/net/decnet/sysctl_net_decnet.c
102519+++ b/net/decnet/sysctl_net_decnet.c
102520@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102521
102522 if (len > *lenp) len = *lenp;
102523
102524- if (copy_to_user(buffer, addr, len))
102525+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102526 return -EFAULT;
102527
102528 *lenp = len;
102529@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102530
102531 if (len > *lenp) len = *lenp;
102532
102533- if (copy_to_user(buffer, devname, len))
102534+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102535 return -EFAULT;
102536
102537 *lenp = len;
102538diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102539index a2c7e4c..3dc9f67 100644
102540--- a/net/hsr/hsr_netlink.c
102541+++ b/net/hsr/hsr_netlink.c
102542@@ -102,7 +102,7 @@ nla_put_failure:
102543 return -EMSGSIZE;
102544 }
102545
102546-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102547+static struct rtnl_link_ops hsr_link_ops = {
102548 .kind = "hsr",
102549 .maxtype = IFLA_HSR_MAX,
102550 .policy = hsr_policy,
102551diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102552index 27eaa65..7083217 100644
102553--- a/net/ieee802154/6lowpan_rtnl.c
102554+++ b/net/ieee802154/6lowpan_rtnl.c
102555@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102556 dev_put(real_dev);
102557 }
102558
102559-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102560+static struct rtnl_link_ops lowpan_link_ops = {
102561 .kind = "lowpan",
102562 .priv_size = sizeof(struct lowpan_dev_info),
102563 .setup = lowpan_setup,
102564diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102565index 9d980ed..7d01e12 100644
102566--- a/net/ieee802154/reassembly.c
102567+++ b/net/ieee802154/reassembly.c
102568@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102569
102570 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102571 {
102572- struct ctl_table *table;
102573+ ctl_table_no_const *table = NULL;
102574 struct ctl_table_header *hdr;
102575 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102576 net_ieee802154_lowpan(net);
102577
102578- table = lowpan_frags_ns_ctl_table;
102579 if (!net_eq(net, &init_net)) {
102580- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102581+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102582 GFP_KERNEL);
102583 if (table == NULL)
102584 goto err_alloc;
102585@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102586 /* Don't export sysctls to unprivileged users */
102587 if (net->user_ns != &init_user_ns)
102588 table[0].procname = NULL;
102589- }
102590-
102591- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102592+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102593+ } else
102594+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102595 if (hdr == NULL)
102596 goto err_reg;
102597
102598@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102599 return 0;
102600
102601 err_reg:
102602- if (!net_eq(net, &init_net))
102603- kfree(table);
102604+ kfree(table);
102605 err_alloc:
102606 return -ENOMEM;
102607 }
102608diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102609index a44773c..a6ae415 100644
102610--- a/net/ipv4/af_inet.c
102611+++ b/net/ipv4/af_inet.c
102612@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102613 return ip_recv_error(sk, msg, len, addr_len);
102614 #if IS_ENABLED(CONFIG_IPV6)
102615 if (sk->sk_family == AF_INET6)
102616- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102617+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102618 #endif
102619 return -EINVAL;
102620 }
102621diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102622index 214882e..ec032f6 100644
102623--- a/net/ipv4/devinet.c
102624+++ b/net/ipv4/devinet.c
102625@@ -69,7 +69,8 @@
102626
102627 static struct ipv4_devconf ipv4_devconf = {
102628 .data = {
102629- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102630+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102631+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102632 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102633 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102634 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102635@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102636
102637 static struct ipv4_devconf ipv4_devconf_dflt = {
102638 .data = {
102639- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102640+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102641+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102642 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102643 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102644 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102645@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102646 idx = 0;
102647 head = &net->dev_index_head[h];
102648 rcu_read_lock();
102649- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102650+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102651 net->dev_base_seq;
102652 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102653 if (idx < s_idx)
102654@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102655 idx = 0;
102656 head = &net->dev_index_head[h];
102657 rcu_read_lock();
102658- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102659+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102660 net->dev_base_seq;
102661 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102662 if (idx < s_idx)
102663@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102664 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102665 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102666
102667-static struct devinet_sysctl_table {
102668+static const struct devinet_sysctl_table {
102669 struct ctl_table_header *sysctl_header;
102670 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102671 } devinet_sysctl = {
102672@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102673 int err;
102674 struct ipv4_devconf *all, *dflt;
102675 #ifdef CONFIG_SYSCTL
102676- struct ctl_table *tbl = ctl_forward_entry;
102677+ ctl_table_no_const *tbl = NULL;
102678 struct ctl_table_header *forw_hdr;
102679 #endif
102680
102681@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102682 goto err_alloc_dflt;
102683
102684 #ifdef CONFIG_SYSCTL
102685- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102686+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102687 if (tbl == NULL)
102688 goto err_alloc_ctl;
102689
102690@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102691 goto err_reg_dflt;
102692
102693 err = -ENOMEM;
102694- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102695+ if (!net_eq(net, &init_net))
102696+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102697+ else
102698+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102699 if (forw_hdr == NULL)
102700 goto err_reg_ctl;
102701 net->ipv4.forw_hdr = forw_hdr;
102702@@ -2287,8 +2292,7 @@ err_reg_ctl:
102703 err_reg_dflt:
102704 __devinet_sysctl_unregister(all);
102705 err_reg_all:
102706- if (tbl != ctl_forward_entry)
102707- kfree(tbl);
102708+ kfree(tbl);
102709 err_alloc_ctl:
102710 #endif
102711 if (dflt != &ipv4_devconf_dflt)
102712diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102713index 23104a3..9f5570b 100644
102714--- a/net/ipv4/fib_frontend.c
102715+++ b/net/ipv4/fib_frontend.c
102716@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102717 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102718 fib_sync_up(dev);
102719 #endif
102720- atomic_inc(&net->ipv4.dev_addr_genid);
102721+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102722 rt_cache_flush(dev_net(dev));
102723 break;
102724 case NETDEV_DOWN:
102725 fib_del_ifaddr(ifa, NULL);
102726- atomic_inc(&net->ipv4.dev_addr_genid);
102727+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102728 if (ifa->ifa_dev->ifa_list == NULL) {
102729 /* Last address was deleted from this interface.
102730 * Disable IP.
102731@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102732 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102733 fib_sync_up(dev);
102734 #endif
102735- atomic_inc(&net->ipv4.dev_addr_genid);
102736+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102737 rt_cache_flush(net);
102738 break;
102739 case NETDEV_DOWN:
102740diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102741index f99f41b..1879da9 100644
102742--- a/net/ipv4/fib_semantics.c
102743+++ b/net/ipv4/fib_semantics.c
102744@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102745 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102746 nh->nh_gw,
102747 nh->nh_parent->fib_scope);
102748- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102749+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102750
102751 return nh->nh_saddr;
102752 }
102753diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
102754index b986298..7e726da 100644
102755--- a/net/ipv4/fou.c
102756+++ b/net/ipv4/fou.c
102757@@ -765,12 +765,12 @@ EXPORT_SYMBOL(gue_build_header);
102758
102759 #ifdef CONFIG_NET_FOU_IP_TUNNELS
102760
102761-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
102762+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
102763 .encap_hlen = fou_encap_hlen,
102764 .build_header = fou_build_header,
102765 };
102766
102767-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
102768+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
102769 .encap_hlen = gue_encap_hlen,
102770 .build_header = gue_build_header,
102771 };
102772diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102773index 9111a4e..3576905 100644
102774--- a/net/ipv4/inet_hashtables.c
102775+++ b/net/ipv4/inet_hashtables.c
102776@@ -18,6 +18,7 @@
102777 #include <linux/sched.h>
102778 #include <linux/slab.h>
102779 #include <linux/wait.h>
102780+#include <linux/security.h>
102781
102782 #include <net/inet_connection_sock.h>
102783 #include <net/inet_hashtables.h>
102784@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102785 return inet_ehashfn(net, laddr, lport, faddr, fport);
102786 }
102787
102788+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102789+
102790 /*
102791 * Allocate and initialize a new local port bind bucket.
102792 * The bindhash mutex for snum's hash chain must be held here.
102793@@ -554,6 +557,8 @@ ok:
102794 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102795 spin_unlock(&head->lock);
102796
102797+ gr_update_task_in_ip_table(inet_sk(sk));
102798+
102799 if (tw) {
102800 inet_twsk_deschedule(tw, death_row);
102801 while (twrefcnt) {
102802diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102803index 241afd7..31b95d5 100644
102804--- a/net/ipv4/inetpeer.c
102805+++ b/net/ipv4/inetpeer.c
102806@@ -461,7 +461,7 @@ relookup:
102807 if (p) {
102808 p->daddr = *daddr;
102809 atomic_set(&p->refcnt, 1);
102810- atomic_set(&p->rid, 0);
102811+ atomic_set_unchecked(&p->rid, 0);
102812 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102813 p->rate_tokens = 0;
102814 /* 60*HZ is arbitrary, but chosen enough high so that the first
102815diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102816index 145a50c..5dd8cc5 100644
102817--- a/net/ipv4/ip_fragment.c
102818+++ b/net/ipv4/ip_fragment.c
102819@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102820 return 0;
102821
102822 start = qp->rid;
102823- end = atomic_inc_return(&peer->rid);
102824+ end = atomic_inc_return_unchecked(&peer->rid);
102825 qp->rid = end;
102826
102827 rc = qp->q.fragments && (end - start) > max;
102828@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102829
102830 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102831 {
102832- struct ctl_table *table;
102833+ ctl_table_no_const *table = NULL;
102834 struct ctl_table_header *hdr;
102835
102836- table = ip4_frags_ns_ctl_table;
102837 if (!net_eq(net, &init_net)) {
102838- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102839+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102840 if (table == NULL)
102841 goto err_alloc;
102842
102843@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102844 /* Don't export sysctls to unprivileged users */
102845 if (net->user_ns != &init_user_ns)
102846 table[0].procname = NULL;
102847- }
102848+ hdr = register_net_sysctl(net, "net/ipv4", table);
102849+ } else
102850+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102851
102852- hdr = register_net_sysctl(net, "net/ipv4", table);
102853 if (hdr == NULL)
102854 goto err_reg;
102855
102856@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102857 return 0;
102858
102859 err_reg:
102860- if (!net_eq(net, &init_net))
102861- kfree(table);
102862+ kfree(table);
102863 err_alloc:
102864 return -ENOMEM;
102865 }
102866diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102867index 4f4bf5b..2c936fe 100644
102868--- a/net/ipv4/ip_gre.c
102869+++ b/net/ipv4/ip_gre.c
102870@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102871 module_param(log_ecn_error, bool, 0644);
102872 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102873
102874-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102875+static struct rtnl_link_ops ipgre_link_ops;
102876 static int ipgre_tunnel_init(struct net_device *dev);
102877
102878 static int ipgre_net_id __read_mostly;
102879@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102880 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102881 };
102882
102883-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102884+static struct rtnl_link_ops ipgre_link_ops = {
102885 .kind = "gre",
102886 .maxtype = IFLA_GRE_MAX,
102887 .policy = ipgre_policy,
102888@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102889 .fill_info = ipgre_fill_info,
102890 };
102891
102892-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102893+static struct rtnl_link_ops ipgre_tap_ops = {
102894 .kind = "gretap",
102895 .maxtype = IFLA_GRE_MAX,
102896 .policy = ipgre_policy,
102897diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102898index 3d4da2c..40f9c29 100644
102899--- a/net/ipv4/ip_input.c
102900+++ b/net/ipv4/ip_input.c
102901@@ -147,6 +147,10 @@
102902 #include <linux/mroute.h>
102903 #include <linux/netlink.h>
102904
102905+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102906+extern int grsec_enable_blackhole;
102907+#endif
102908+
102909 /*
102910 * Process Router Attention IP option (RFC 2113)
102911 */
102912@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102913 if (!raw) {
102914 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102915 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102916+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102917+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102918+#endif
102919 icmp_send(skb, ICMP_DEST_UNREACH,
102920 ICMP_PROT_UNREACH, 0);
102921 }
102922diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102923index 6b85adb..cd7e5d3 100644
102924--- a/net/ipv4/ip_sockglue.c
102925+++ b/net/ipv4/ip_sockglue.c
102926@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102927 len = min_t(unsigned int, len, opt->optlen);
102928 if (put_user(len, optlen))
102929 return -EFAULT;
102930- if (copy_to_user(optval, opt->__data, len))
102931+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102932+ copy_to_user(optval, opt->__data, len))
102933 return -EFAULT;
102934 return 0;
102935 }
102936@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102937 if (sk->sk_type != SOCK_STREAM)
102938 return -ENOPROTOOPT;
102939
102940- msg.msg_control = (__force void *) optval;
102941+ msg.msg_control = (__force_kernel void *) optval;
102942 msg.msg_controllen = len;
102943 msg.msg_flags = flags;
102944
102945diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102946index 1a7e979..fd05aa4 100644
102947--- a/net/ipv4/ip_vti.c
102948+++ b/net/ipv4/ip_vti.c
102949@@ -45,7 +45,7 @@
102950 #include <net/net_namespace.h>
102951 #include <net/netns/generic.h>
102952
102953-static struct rtnl_link_ops vti_link_ops __read_mostly;
102954+static struct rtnl_link_ops vti_link_ops;
102955
102956 static int vti_net_id __read_mostly;
102957 static int vti_tunnel_init(struct net_device *dev);
102958@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102959 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102960 };
102961
102962-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102963+static struct rtnl_link_ops vti_link_ops = {
102964 .kind = "vti",
102965 .maxtype = IFLA_VTI_MAX,
102966 .policy = vti_policy,
102967diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102968index 7fa18bc..bea16af 100644
102969--- a/net/ipv4/ipconfig.c
102970+++ b/net/ipv4/ipconfig.c
102971@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102972
102973 mm_segment_t oldfs = get_fs();
102974 set_fs(get_ds());
102975- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102976+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102977 set_fs(oldfs);
102978 return res;
102979 }
102980@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102981
102982 mm_segment_t oldfs = get_fs();
102983 set_fs(get_ds());
102984- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102985+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102986 set_fs(oldfs);
102987 return res;
102988 }
102989@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102990
102991 mm_segment_t oldfs = get_fs();
102992 set_fs(get_ds());
102993- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102994+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102995 set_fs(oldfs);
102996 return res;
102997 }
102998diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102999index 40403114..c35c647 100644
103000--- a/net/ipv4/ipip.c
103001+++ b/net/ipv4/ipip.c
103002@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
103003 static int ipip_net_id __read_mostly;
103004
103005 static int ipip_tunnel_init(struct net_device *dev);
103006-static struct rtnl_link_ops ipip_link_ops __read_mostly;
103007+static struct rtnl_link_ops ipip_link_ops;
103008
103009 static int ipip_err(struct sk_buff *skb, u32 info)
103010 {
103011@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
103012 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
103013 };
103014
103015-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
103016+static struct rtnl_link_ops ipip_link_ops = {
103017 .kind = "ipip",
103018 .maxtype = IFLA_IPTUN_MAX,
103019 .policy = ipip_policy,
103020diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
103021index f95b6f9..2ee2097 100644
103022--- a/net/ipv4/netfilter/arp_tables.c
103023+++ b/net/ipv4/netfilter/arp_tables.c
103024@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
103025 #endif
103026
103027 static int get_info(struct net *net, void __user *user,
103028- const int *len, int compat)
103029+ int len, int compat)
103030 {
103031 char name[XT_TABLE_MAXNAMELEN];
103032 struct xt_table *t;
103033 int ret;
103034
103035- if (*len != sizeof(struct arpt_getinfo)) {
103036- duprintf("length %u != %Zu\n", *len,
103037+ if (len != sizeof(struct arpt_getinfo)) {
103038+ duprintf("length %u != %Zu\n", len,
103039 sizeof(struct arpt_getinfo));
103040 return -EINVAL;
103041 }
103042@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
103043 info.size = private->size;
103044 strcpy(info.name, name);
103045
103046- if (copy_to_user(user, &info, *len) != 0)
103047+ if (copy_to_user(user, &info, len) != 0)
103048 ret = -EFAULT;
103049 else
103050 ret = 0;
103051@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
103052
103053 switch (cmd) {
103054 case ARPT_SO_GET_INFO:
103055- ret = get_info(sock_net(sk), user, len, 1);
103056+ ret = get_info(sock_net(sk), user, *len, 1);
103057 break;
103058 case ARPT_SO_GET_ENTRIES:
103059 ret = compat_get_entries(sock_net(sk), user, len);
103060@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
103061
103062 switch (cmd) {
103063 case ARPT_SO_GET_INFO:
103064- ret = get_info(sock_net(sk), user, len, 0);
103065+ ret = get_info(sock_net(sk), user, *len, 0);
103066 break;
103067
103068 case ARPT_SO_GET_ENTRIES:
103069diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
103070index 99e810f..3711b81 100644
103071--- a/net/ipv4/netfilter/ip_tables.c
103072+++ b/net/ipv4/netfilter/ip_tables.c
103073@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
103074 #endif
103075
103076 static int get_info(struct net *net, void __user *user,
103077- const int *len, int compat)
103078+ int len, int compat)
103079 {
103080 char name[XT_TABLE_MAXNAMELEN];
103081 struct xt_table *t;
103082 int ret;
103083
103084- if (*len != sizeof(struct ipt_getinfo)) {
103085- duprintf("length %u != %zu\n", *len,
103086+ if (len != sizeof(struct ipt_getinfo)) {
103087+ duprintf("length %u != %zu\n", len,
103088 sizeof(struct ipt_getinfo));
103089 return -EINVAL;
103090 }
103091@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
103092 info.size = private->size;
103093 strcpy(info.name, name);
103094
103095- if (copy_to_user(user, &info, *len) != 0)
103096+ if (copy_to_user(user, &info, len) != 0)
103097 ret = -EFAULT;
103098 else
103099 ret = 0;
103100@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103101
103102 switch (cmd) {
103103 case IPT_SO_GET_INFO:
103104- ret = get_info(sock_net(sk), user, len, 1);
103105+ ret = get_info(sock_net(sk), user, *len, 1);
103106 break;
103107 case IPT_SO_GET_ENTRIES:
103108 ret = compat_get_entries(sock_net(sk), user, len);
103109@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103110
103111 switch (cmd) {
103112 case IPT_SO_GET_INFO:
103113- ret = get_info(sock_net(sk), user, len, 0);
103114+ ret = get_info(sock_net(sk), user, *len, 0);
103115 break;
103116
103117 case IPT_SO_GET_ENTRIES:
103118diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103119index e90f83a..3e6acca 100644
103120--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
103121+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
103122@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
103123 spin_lock_init(&cn->lock);
103124
103125 #ifdef CONFIG_PROC_FS
103126- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
103127+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
103128 if (!cn->procdir) {
103129 pr_err("Unable to proc dir entry\n");
103130 return -ENOMEM;
103131diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
103132index 0ae28f5..d32b565 100644
103133--- a/net/ipv4/ping.c
103134+++ b/net/ipv4/ping.c
103135@@ -59,7 +59,7 @@ struct ping_table {
103136 };
103137
103138 static struct ping_table ping_table;
103139-struct pingv6_ops pingv6_ops;
103140+struct pingv6_ops *pingv6_ops;
103141 EXPORT_SYMBOL_GPL(pingv6_ops);
103142
103143 static u16 ping_port_rover;
103144@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
103145 return -ENODEV;
103146 }
103147 }
103148- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
103149+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
103150 scoped);
103151 rcu_read_unlock();
103152
103153@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103154 }
103155 #if IS_ENABLED(CONFIG_IPV6)
103156 } else if (skb->protocol == htons(ETH_P_IPV6)) {
103157- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
103158+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
103159 #endif
103160 }
103161
103162@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
103163 info, (u8 *)icmph);
103164 #if IS_ENABLED(CONFIG_IPV6)
103165 } else if (family == AF_INET6) {
103166- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
103167+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
103168 info, (u8 *)icmph);
103169 #endif
103170 }
103171@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103172 }
103173
103174 if (inet6_sk(sk)->rxopt.all)
103175- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
103176+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
103177 if (skb->protocol == htons(ETH_P_IPV6) &&
103178 inet6_sk(sk)->rxopt.all)
103179- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
103180+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
103181 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
103182 ip_cmsg_recv(msg, skb);
103183 #endif
103184@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103185 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103186 0, sock_i_ino(sp),
103187 atomic_read(&sp->sk_refcnt), sp,
103188- atomic_read(&sp->sk_drops));
103189+ atomic_read_unchecked(&sp->sk_drops));
103190 }
103191
103192 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103193diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103194index 0bb68df..59405fc 100644
103195--- a/net/ipv4/raw.c
103196+++ b/net/ipv4/raw.c
103197@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103198 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103199 {
103200 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103201- atomic_inc(&sk->sk_drops);
103202+ atomic_inc_unchecked(&sk->sk_drops);
103203 kfree_skb(skb);
103204 return NET_RX_DROP;
103205 }
103206@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
103207
103208 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103209 {
103210+ struct icmp_filter filter;
103211+
103212 if (optlen > sizeof(struct icmp_filter))
103213 optlen = sizeof(struct icmp_filter);
103214- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103215+ if (copy_from_user(&filter, optval, optlen))
103216 return -EFAULT;
103217+ raw_sk(sk)->filter = filter;
103218 return 0;
103219 }
103220
103221 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103222 {
103223 int len, ret = -EFAULT;
103224+ struct icmp_filter filter;
103225
103226 if (get_user(len, optlen))
103227 goto out;
103228@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103229 if (len > sizeof(struct icmp_filter))
103230 len = sizeof(struct icmp_filter);
103231 ret = -EFAULT;
103232- if (put_user(len, optlen) ||
103233- copy_to_user(optval, &raw_sk(sk)->filter, len))
103234+ filter = raw_sk(sk)->filter;
103235+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103236 goto out;
103237 ret = 0;
103238 out: return ret;
103239@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103240 0, 0L, 0,
103241 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103242 0, sock_i_ino(sp),
103243- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103244+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103245 }
103246
103247 static int raw_seq_show(struct seq_file *seq, void *v)
103248diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103249index 52e1f2b..e736cb4 100644
103250--- a/net/ipv4/route.c
103251+++ b/net/ipv4/route.c
103252@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103253
103254 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103255 {
103256- return seq_open(file, &rt_cache_seq_ops);
103257+ return seq_open_restrict(file, &rt_cache_seq_ops);
103258 }
103259
103260 static const struct file_operations rt_cache_seq_fops = {
103261@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103262
103263 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103264 {
103265- return seq_open(file, &rt_cpu_seq_ops);
103266+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103267 }
103268
103269 static const struct file_operations rt_cpu_seq_fops = {
103270@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103271
103272 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103273 {
103274- return single_open(file, rt_acct_proc_show, NULL);
103275+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103276 }
103277
103278 static const struct file_operations rt_acct_proc_fops = {
103279@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103280
103281 #define IP_IDENTS_SZ 2048u
103282 struct ip_ident_bucket {
103283- atomic_t id;
103284+ atomic_unchecked_t id;
103285 u32 stamp32;
103286 };
103287
103288-static struct ip_ident_bucket *ip_idents __read_mostly;
103289+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103290
103291 /* In order to protect privacy, we add a perturbation to identifiers
103292 * if one generator is seldom used. This makes hard for an attacker
103293@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103294 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103295 delta = prandom_u32_max(now - old);
103296
103297- return atomic_add_return(segs + delta, &bucket->id) - segs;
103298+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103299 }
103300 EXPORT_SYMBOL(ip_idents_reserve);
103301
103302@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103303 .maxlen = sizeof(int),
103304 .mode = 0200,
103305 .proc_handler = ipv4_sysctl_rtcache_flush,
103306+ .extra1 = &init_net,
103307 },
103308 { },
103309 };
103310
103311 static __net_init int sysctl_route_net_init(struct net *net)
103312 {
103313- struct ctl_table *tbl;
103314+ ctl_table_no_const *tbl = NULL;
103315
103316- tbl = ipv4_route_flush_table;
103317 if (!net_eq(net, &init_net)) {
103318- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103319+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103320 if (tbl == NULL)
103321 goto err_dup;
103322
103323 /* Don't export sysctls to unprivileged users */
103324 if (net->user_ns != &init_user_ns)
103325 tbl[0].procname = NULL;
103326- }
103327- tbl[0].extra1 = net;
103328+ tbl[0].extra1 = net;
103329+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103330+ } else
103331+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103332
103333- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103334 if (net->ipv4.route_hdr == NULL)
103335 goto err_reg;
103336 return 0;
103337
103338 err_reg:
103339- if (tbl != ipv4_route_flush_table)
103340- kfree(tbl);
103341+ kfree(tbl);
103342 err_dup:
103343 return -ENOMEM;
103344 }
103345@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103346
103347 static __net_init int rt_genid_init(struct net *net)
103348 {
103349- atomic_set(&net->ipv4.rt_genid, 0);
103350- atomic_set(&net->fnhe_genid, 0);
103351+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103352+ atomic_set_unchecked(&net->fnhe_genid, 0);
103353 get_random_bytes(&net->ipv4.dev_addr_genid,
103354 sizeof(net->ipv4.dev_addr_genid));
103355 return 0;
103356@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
103357 {
103358 int rc = 0;
103359
103360- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103361- if (!ip_idents)
103362- panic("IP: failed to allocate ip_idents\n");
103363-
103364- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103365+ prandom_bytes(ip_idents, sizeof(ip_idents));
103366
103367 #ifdef CONFIG_IP_ROUTE_CLASSID
103368 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103369diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103370index e0ee384..e2688d9 100644
103371--- a/net/ipv4/sysctl_net_ipv4.c
103372+++ b/net/ipv4/sysctl_net_ipv4.c
103373@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103374 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103375 int ret;
103376 int range[2];
103377- struct ctl_table tmp = {
103378+ ctl_table_no_const tmp = {
103379 .data = &range,
103380 .maxlen = sizeof(range),
103381 .mode = table->mode,
103382@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103383 int ret;
103384 gid_t urange[2];
103385 kgid_t low, high;
103386- struct ctl_table tmp = {
103387+ ctl_table_no_const tmp = {
103388 .data = &urange,
103389 .maxlen = sizeof(urange),
103390 .mode = table->mode,
103391@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103392 void __user *buffer, size_t *lenp, loff_t *ppos)
103393 {
103394 char val[TCP_CA_NAME_MAX];
103395- struct ctl_table tbl = {
103396+ ctl_table_no_const tbl = {
103397 .data = val,
103398 .maxlen = TCP_CA_NAME_MAX,
103399 };
103400@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103401 void __user *buffer, size_t *lenp,
103402 loff_t *ppos)
103403 {
103404- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103405+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103406 int ret;
103407
103408 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103409@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103410 void __user *buffer, size_t *lenp,
103411 loff_t *ppos)
103412 {
103413- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103414+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103415 int ret;
103416
103417 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103418@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103419 void __user *buffer, size_t *lenp,
103420 loff_t *ppos)
103421 {
103422- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103423+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103424 struct tcp_fastopen_context *ctxt;
103425 int ret;
103426 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103427@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103428
103429 static __net_init int ipv4_sysctl_init_net(struct net *net)
103430 {
103431- struct ctl_table *table;
103432+ ctl_table_no_const *table = NULL;
103433
103434- table = ipv4_net_table;
103435 if (!net_eq(net, &init_net)) {
103436 int i;
103437
103438- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103439+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103440 if (table == NULL)
103441 goto err_alloc;
103442
103443@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103444 table[i].data += (void *)net - (void *)&init_net;
103445 }
103446
103447- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103448+ if (!net_eq(net, &init_net))
103449+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103450+ else
103451+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103452 if (net->ipv4.ipv4_hdr == NULL)
103453 goto err_reg;
103454
103455diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
103456index 3075723..aa6f6e5 100644
103457--- a/net/ipv4/tcp.c
103458+++ b/net/ipv4/tcp.c
103459@@ -520,8 +520,10 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
103460
103461 /* Race breaker. If space is freed after
103462 * wspace test but before the flags are set,
103463- * IO signal will be lost.
103464+ * IO signal will be lost. Memory barrier
103465+ * pairs with the input side.
103466 */
103467+ smp_mb__after_atomic();
103468 if (sk_stream_is_writeable(sk))
103469 mask |= POLLOUT | POLLWRNORM;
103470 }
103471diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103472index 075ab4d..8d0580a 100644
103473--- a/net/ipv4/tcp_input.c
103474+++ b/net/ipv4/tcp_input.c
103475@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103476 * without any lock. We want to make sure compiler wont store
103477 * intermediate values in this location.
103478 */
103479- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103480+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103481 sk->sk_max_pacing_rate);
103482 }
103483
103484@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103485 * simplifies code)
103486 */
103487 static void
103488-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103489+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103490 struct sk_buff *head, struct sk_buff *tail,
103491 u32 start, u32 end)
103492 {
103493@@ -4786,6 +4786,8 @@ static void tcp_check_space(struct sock *sk)
103494 {
103495 if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
103496 sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
103497+ /* pairs with tcp_poll() */
103498+ smp_mb__after_atomic();
103499 if (sk->sk_socket &&
103500 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
103501 tcp_new_space(sk);
103502@@ -5506,6 +5508,7 @@ discard:
103503 tcp_paws_reject(&tp->rx_opt, 0))
103504 goto discard_and_undo;
103505
103506+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103507 if (th->syn) {
103508 /* We see SYN without ACK. It is attempt of
103509 * simultaneous connect with crossed SYNs.
103510@@ -5556,6 +5559,7 @@ discard:
103511 goto discard;
103512 #endif
103513 }
103514+#endif
103515 /* "fifth, if neither of the SYN or RST bits is set then
103516 * drop the segment and return."
103517 */
103518@@ -5602,7 +5606,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103519 goto discard;
103520
103521 if (th->syn) {
103522- if (th->fin)
103523+ if (th->fin || th->urg || th->psh)
103524 goto discard;
103525 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103526 return 1;
103527diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103528index d22f544..62f6787 100644
103529--- a/net/ipv4/tcp_ipv4.c
103530+++ b/net/ipv4/tcp_ipv4.c
103531@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103532 int sysctl_tcp_low_latency __read_mostly;
103533 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103534
103535+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103536+extern int grsec_enable_blackhole;
103537+#endif
103538+
103539 #ifdef CONFIG_TCP_MD5SIG
103540 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103541 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103542@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103543 return 0;
103544
103545 reset:
103546+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103547+ if (!grsec_enable_blackhole)
103548+#endif
103549 tcp_v4_send_reset(rsk, skb);
103550 discard:
103551 kfree_skb(skb);
103552@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103553 TCP_SKB_CB(skb)->sacked = 0;
103554
103555 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103556- if (!sk)
103557+ if (!sk) {
103558+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103559+ ret = 1;
103560+#endif
103561 goto no_tcp_socket;
103562-
103563+ }
103564 process:
103565- if (sk->sk_state == TCP_TIME_WAIT)
103566+ if (sk->sk_state == TCP_TIME_WAIT) {
103567+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103568+ ret = 2;
103569+#endif
103570 goto do_time_wait;
103571+ }
103572
103573 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103574 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103575@@ -1698,6 +1712,10 @@ csum_error:
103576 bad_packet:
103577 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103578 } else {
103579+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103580+ if (!grsec_enable_blackhole || (ret == 1 &&
103581+ (skb->dev->flags & IFF_LOOPBACK)))
103582+#endif
103583 tcp_v4_send_reset(NULL, skb);
103584 }
103585
103586diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103587index 63d2680..2db9d6b 100644
103588--- a/net/ipv4/tcp_minisocks.c
103589+++ b/net/ipv4/tcp_minisocks.c
103590@@ -27,6 +27,10 @@
103591 #include <net/inet_common.h>
103592 #include <net/xfrm.h>
103593
103594+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103595+extern int grsec_enable_blackhole;
103596+#endif
103597+
103598 int sysctl_tcp_syncookies __read_mostly = 1;
103599 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103600
103601@@ -739,7 +743,10 @@ embryonic_reset:
103602 * avoid becoming vulnerable to outside attack aiming at
103603 * resetting legit local connections.
103604 */
103605- req->rsk_ops->send_reset(sk, skb);
103606+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103607+ if (!grsec_enable_blackhole)
103608+#endif
103609+ req->rsk_ops->send_reset(sk, skb);
103610 } else if (fastopen) { /* received a valid RST pkt */
103611 reqsk_fastopen_remove(sk, req, true);
103612 tcp_reset(sk);
103613diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
103614index 9790f39..9f29453 100644
103615--- a/net/ipv4/tcp_output.c
103616+++ b/net/ipv4/tcp_output.c
103617@@ -2931,6 +2931,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
103618 }
103619 #endif
103620
103621+ /* Do not fool tcpdump (if any), clean our debris */
103622+ skb->tstamp.tv64 = 0;
103623 return skb;
103624 }
103625 EXPORT_SYMBOL(tcp_make_synack);
103626diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103627index ebf5ff5..4d1ff32 100644
103628--- a/net/ipv4/tcp_probe.c
103629+++ b/net/ipv4/tcp_probe.c
103630@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103631 if (cnt + width >= len)
103632 break;
103633
103634- if (copy_to_user(buf + cnt, tbuf, width))
103635+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103636 return -EFAULT;
103637 cnt += width;
103638 }
103639diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103640index 1829c7f..c0b3d52 100644
103641--- a/net/ipv4/tcp_timer.c
103642+++ b/net/ipv4/tcp_timer.c
103643@@ -22,6 +22,10 @@
103644 #include <linux/gfp.h>
103645 #include <net/tcp.h>
103646
103647+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103648+extern int grsec_lastack_retries;
103649+#endif
103650+
103651 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103652 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103653 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103654@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103655 }
103656 }
103657
103658+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103659+ if ((sk->sk_state == TCP_LAST_ACK) &&
103660+ (grsec_lastack_retries > 0) &&
103661+ (grsec_lastack_retries < retry_until))
103662+ retry_until = grsec_lastack_retries;
103663+#endif
103664+
103665 if (retransmits_timed_out(sk, retry_until,
103666 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103667 /* Has it gone just too far? */
103668diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103669index 13b4dcf..b866a2a 100644
103670--- a/net/ipv4/udp.c
103671+++ b/net/ipv4/udp.c
103672@@ -87,6 +87,7 @@
103673 #include <linux/types.h>
103674 #include <linux/fcntl.h>
103675 #include <linux/module.h>
103676+#include <linux/security.h>
103677 #include <linux/socket.h>
103678 #include <linux/sockios.h>
103679 #include <linux/igmp.h>
103680@@ -114,6 +115,10 @@
103681 #include <net/busy_poll.h>
103682 #include "udp_impl.h"
103683
103684+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103685+extern int grsec_enable_blackhole;
103686+#endif
103687+
103688 struct udp_table udp_table __read_mostly;
103689 EXPORT_SYMBOL(udp_table);
103690
103691@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103692 return true;
103693 }
103694
103695+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103696+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103697+
103698 /*
103699 * This routine is called by the ICMP module when it gets some
103700 * sort of error condition. If err < 0 then the socket should
103701@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103702 dport = usin->sin_port;
103703 if (dport == 0)
103704 return -EINVAL;
103705+
103706+ err = gr_search_udp_sendmsg(sk, usin);
103707+ if (err)
103708+ return err;
103709 } else {
103710 if (sk->sk_state != TCP_ESTABLISHED)
103711 return -EDESTADDRREQ;
103712+
103713+ err = gr_search_udp_sendmsg(sk, NULL);
103714+ if (err)
103715+ return err;
103716+
103717 daddr = inet->inet_daddr;
103718 dport = inet->inet_dport;
103719 /* Open fast path for connected socket.
103720@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103721 IS_UDPLITE(sk));
103722 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103723 IS_UDPLITE(sk));
103724- atomic_inc(&sk->sk_drops);
103725+ atomic_inc_unchecked(&sk->sk_drops);
103726 __skb_unlink(skb, rcvq);
103727 __skb_queue_tail(&list_kill, skb);
103728 }
103729@@ -1275,6 +1292,10 @@ try_again:
103730 if (!skb)
103731 goto out;
103732
103733+ err = gr_search_udp_recvmsg(sk, skb);
103734+ if (err)
103735+ goto out_free;
103736+
103737 ulen = skb->len - sizeof(struct udphdr);
103738 copied = len;
103739 if (copied > ulen)
103740@@ -1307,7 +1328,7 @@ try_again:
103741 if (unlikely(err)) {
103742 trace_kfree_skb(skb, udp_recvmsg);
103743 if (!peeked) {
103744- atomic_inc(&sk->sk_drops);
103745+ atomic_inc_unchecked(&sk->sk_drops);
103746 UDP_INC_STATS_USER(sock_net(sk),
103747 UDP_MIB_INERRORS, is_udplite);
103748 }
103749@@ -1605,7 +1626,7 @@ csum_error:
103750 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103751 drop:
103752 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103753- atomic_inc(&sk->sk_drops);
103754+ atomic_inc_unchecked(&sk->sk_drops);
103755 kfree_skb(skb);
103756 return -1;
103757 }
103758@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103759 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103760
103761 if (!skb1) {
103762- atomic_inc(&sk->sk_drops);
103763+ atomic_inc_unchecked(&sk->sk_drops);
103764 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103765 IS_UDPLITE(sk));
103766 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103767@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103768 goto csum_error;
103769
103770 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103771+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103772+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103773+#endif
103774 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103775
103776 /*
103777@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103778 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103779 0, sock_i_ino(sp),
103780 atomic_read(&sp->sk_refcnt), sp,
103781- atomic_read(&sp->sk_drops));
103782+ atomic_read_unchecked(&sp->sk_drops));
103783 }
103784
103785 int udp4_seq_show(struct seq_file *seq, void *v)
103786diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103787index 6156f68..d6ab46d 100644
103788--- a/net/ipv4/xfrm4_policy.c
103789+++ b/net/ipv4/xfrm4_policy.c
103790@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103791 fl4->flowi4_tos = iph->tos;
103792 }
103793
103794-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103795+static int xfrm4_garbage_collect(struct dst_ops *ops)
103796 {
103797 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103798
103799- xfrm4_policy_afinfo.garbage_collect(net);
103800+ xfrm_garbage_collect_deferred(net);
103801 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103802 }
103803
103804@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103805
103806 static int __net_init xfrm4_net_init(struct net *net)
103807 {
103808- struct ctl_table *table;
103809+ ctl_table_no_const *table = NULL;
103810 struct ctl_table_header *hdr;
103811
103812- table = xfrm4_policy_table;
103813 if (!net_eq(net, &init_net)) {
103814- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103815+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103816 if (!table)
103817 goto err_alloc;
103818
103819 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103820- }
103821-
103822- hdr = register_net_sysctl(net, "net/ipv4", table);
103823+ hdr = register_net_sysctl(net, "net/ipv4", table);
103824+ } else
103825+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103826 if (!hdr)
103827 goto err_reg;
103828
103829@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103830 return 0;
103831
103832 err_reg:
103833- if (!net_eq(net, &init_net))
103834- kfree(table);
103835+ kfree(table);
103836 err_alloc:
103837 return -ENOMEM;
103838 }
103839diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103840index dac9419..534fa31 100644
103841--- a/net/ipv6/addrconf.c
103842+++ b/net/ipv6/addrconf.c
103843@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103844 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103845 .mtu6 = IPV6_MIN_MTU,
103846 .accept_ra = 1,
103847- .accept_redirects = 1,
103848+ .accept_redirects = 0,
103849 .autoconf = 1,
103850 .force_mld_version = 0,
103851 .mldv1_unsolicited_report_interval = 10 * HZ,
103852@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103853 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103854 .mtu6 = IPV6_MIN_MTU,
103855 .accept_ra = 1,
103856- .accept_redirects = 1,
103857+ .accept_redirects = 0,
103858 .autoconf = 1,
103859 .force_mld_version = 0,
103860 .mldv1_unsolicited_report_interval = 10 * HZ,
103861@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103862 idx = 0;
103863 head = &net->dev_index_head[h];
103864 rcu_read_lock();
103865- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103866+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103867 net->dev_base_seq;
103868 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103869 if (idx < s_idx)
103870@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103871 p.iph.ihl = 5;
103872 p.iph.protocol = IPPROTO_IPV6;
103873 p.iph.ttl = 64;
103874- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103875+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103876
103877 if (ops->ndo_do_ioctl) {
103878 mm_segment_t oldfs = get_fs();
103879@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103880 .release = seq_release_net,
103881 };
103882
103883+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103884+extern void unregister_ipv6_seq_ops_addr(void);
103885+
103886 static int __net_init if6_proc_net_init(struct net *net)
103887 {
103888- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103889+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103890+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103891+ unregister_ipv6_seq_ops_addr();
103892 return -ENOMEM;
103893+ }
103894 return 0;
103895 }
103896
103897 static void __net_exit if6_proc_net_exit(struct net *net)
103898 {
103899 remove_proc_entry("if_inet6", net->proc_net);
103900+ unregister_ipv6_seq_ops_addr();
103901 }
103902
103903 static struct pernet_operations if6_proc_net_ops = {
103904@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103905 s_ip_idx = ip_idx = cb->args[2];
103906
103907 rcu_read_lock();
103908- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103909+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103910 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103911 idx = 0;
103912 head = &net->dev_index_head[h];
103913@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103914 rt_genid_bump_ipv6(net);
103915 break;
103916 }
103917- atomic_inc(&net->ipv6.dev_addr_genid);
103918+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103919 }
103920
103921 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103922@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103923 int *valp = ctl->data;
103924 int val = *valp;
103925 loff_t pos = *ppos;
103926- struct ctl_table lctl;
103927+ ctl_table_no_const lctl;
103928 int ret;
103929
103930 /*
103931@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103932 int *valp = ctl->data;
103933 int val = *valp;
103934 loff_t pos = *ppos;
103935- struct ctl_table lctl;
103936+ ctl_table_no_const lctl;
103937 int ret;
103938
103939 /*
103940diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103941index e8c4400..a4cd5da 100644
103942--- a/net/ipv6/af_inet6.c
103943+++ b/net/ipv6/af_inet6.c
103944@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103945 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103946 net->ipv6.sysctl.flowlabel_consistency = 1;
103947 net->ipv6.sysctl.auto_flowlabels = 0;
103948- atomic_set(&net->ipv6.fib6_sernum, 1);
103949+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103950
103951 err = ipv6_init_mibs(net);
103952 if (err)
103953diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103954index 49f5e73..ae02d54 100644
103955--- a/net/ipv6/datagram.c
103956+++ b/net/ipv6/datagram.c
103957@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103958 0,
103959 sock_i_ino(sp),
103960 atomic_read(&sp->sk_refcnt), sp,
103961- atomic_read(&sp->sk_drops));
103962+ atomic_read_unchecked(&sp->sk_drops));
103963 }
103964diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103965index d674152..fb5a01d 100644
103966--- a/net/ipv6/icmp.c
103967+++ b/net/ipv6/icmp.c
103968@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103969
103970 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103971 {
103972- struct ctl_table *table;
103973+ ctl_table_no_const *table;
103974
103975 table = kmemdup(ipv6_icmp_table_template,
103976 sizeof(ipv6_icmp_table_template),
103977diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103978index f1c6d5e..faabef6 100644
103979--- a/net/ipv6/ip6_fib.c
103980+++ b/net/ipv6/ip6_fib.c
103981@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103982 int new, old;
103983
103984 do {
103985- old = atomic_read(&net->ipv6.fib6_sernum);
103986+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103987 new = old < INT_MAX ? old + 1 : 1;
103988- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103989+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103990 old, new) != old);
103991 return new;
103992 }
103993diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103994index 01ccc28..66861c7 100644
103995--- a/net/ipv6/ip6_gre.c
103996+++ b/net/ipv6/ip6_gre.c
103997@@ -71,8 +71,8 @@ struct ip6gre_net {
103998 struct net_device *fb_tunnel_dev;
103999 };
104000
104001-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
104002-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
104003+static struct rtnl_link_ops ip6gre_link_ops;
104004+static struct rtnl_link_ops ip6gre_tap_ops;
104005 static int ip6gre_tunnel_init(struct net_device *dev);
104006 static void ip6gre_tunnel_setup(struct net_device *dev);
104007 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
104008@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
104009 }
104010
104011
104012-static struct inet6_protocol ip6gre_protocol __read_mostly = {
104013+static struct inet6_protocol ip6gre_protocol = {
104014 .handler = ip6gre_rcv,
104015 .err_handler = ip6gre_err,
104016 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
104017@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
104018 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
104019 };
104020
104021-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104022+static struct rtnl_link_ops ip6gre_link_ops = {
104023 .kind = "ip6gre",
104024 .maxtype = IFLA_GRE_MAX,
104025 .policy = ip6gre_policy,
104026@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
104027 .fill_info = ip6gre_fill_info,
104028 };
104029
104030-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
104031+static struct rtnl_link_ops ip6gre_tap_ops = {
104032 .kind = "ip6gretap",
104033 .maxtype = IFLA_GRE_MAX,
104034 .policy = ip6gre_policy,
104035diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
104036index 92b3da5..77837b8 100644
104037--- a/net/ipv6/ip6_tunnel.c
104038+++ b/net/ipv6/ip6_tunnel.c
104039@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104040
104041 static int ip6_tnl_dev_init(struct net_device *dev);
104042 static void ip6_tnl_dev_setup(struct net_device *dev);
104043-static struct rtnl_link_ops ip6_link_ops __read_mostly;
104044+static struct rtnl_link_ops ip6_link_ops;
104045
104046 static int ip6_tnl_net_id __read_mostly;
104047 struct ip6_tnl_net {
104048@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
104049 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
104050 };
104051
104052-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
104053+static struct rtnl_link_ops ip6_link_ops = {
104054 .kind = "ip6tnl",
104055 .maxtype = IFLA_IPTUN_MAX,
104056 .policy = ip6_tnl_policy,
104057diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
104058index ace10d0..97a8b49 100644
104059--- a/net/ipv6/ip6_vti.c
104060+++ b/net/ipv6/ip6_vti.c
104061@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
104062
104063 static int vti6_dev_init(struct net_device *dev);
104064 static void vti6_dev_setup(struct net_device *dev);
104065-static struct rtnl_link_ops vti6_link_ops __read_mostly;
104066+static struct rtnl_link_ops vti6_link_ops;
104067
104068 static int vti6_net_id __read_mostly;
104069 struct vti6_net {
104070@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
104071 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
104072 };
104073
104074-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
104075+static struct rtnl_link_ops vti6_link_ops = {
104076 .kind = "vti6",
104077 .maxtype = IFLA_VTI_MAX,
104078 .policy = vti6_policy,
104079diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
104080index 66980d8d..8aef0d1 100644
104081--- a/net/ipv6/ipv6_sockglue.c
104082+++ b/net/ipv6/ipv6_sockglue.c
104083@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
104084 if (sk->sk_type != SOCK_STREAM)
104085 return -ENOPROTOOPT;
104086
104087- msg.msg_control = optval;
104088+ msg.msg_control = (void __force_kernel *)optval;
104089 msg.msg_controllen = len;
104090 msg.msg_flags = flags;
104091
104092diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
104093index e080fbb..412b3cf 100644
104094--- a/net/ipv6/netfilter/ip6_tables.c
104095+++ b/net/ipv6/netfilter/ip6_tables.c
104096@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
104097 #endif
104098
104099 static int get_info(struct net *net, void __user *user,
104100- const int *len, int compat)
104101+ int len, int compat)
104102 {
104103 char name[XT_TABLE_MAXNAMELEN];
104104 struct xt_table *t;
104105 int ret;
104106
104107- if (*len != sizeof(struct ip6t_getinfo)) {
104108- duprintf("length %u != %zu\n", *len,
104109+ if (len != sizeof(struct ip6t_getinfo)) {
104110+ duprintf("length %u != %zu\n", len,
104111 sizeof(struct ip6t_getinfo));
104112 return -EINVAL;
104113 }
104114@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
104115 info.size = private->size;
104116 strcpy(info.name, name);
104117
104118- if (copy_to_user(user, &info, *len) != 0)
104119+ if (copy_to_user(user, &info, len) != 0)
104120 ret = -EFAULT;
104121 else
104122 ret = 0;
104123@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104124
104125 switch (cmd) {
104126 case IP6T_SO_GET_INFO:
104127- ret = get_info(sock_net(sk), user, len, 1);
104128+ ret = get_info(sock_net(sk), user, *len, 1);
104129 break;
104130 case IP6T_SO_GET_ENTRIES:
104131 ret = compat_get_entries(sock_net(sk), user, len);
104132@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
104133
104134 switch (cmd) {
104135 case IP6T_SO_GET_INFO:
104136- ret = get_info(sock_net(sk), user, len, 0);
104137+ ret = get_info(sock_net(sk), user, *len, 0);
104138 break;
104139
104140 case IP6T_SO_GET_ENTRIES:
104141diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
104142index 6f187c8..34b367f 100644
104143--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
104144+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
104145@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
104146
104147 static int nf_ct_frag6_sysctl_register(struct net *net)
104148 {
104149- struct ctl_table *table;
104150+ ctl_table_no_const *table = NULL;
104151 struct ctl_table_header *hdr;
104152
104153- table = nf_ct_frag6_sysctl_table;
104154 if (!net_eq(net, &init_net)) {
104155- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
104156+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
104157 GFP_KERNEL);
104158 if (table == NULL)
104159 goto err_alloc;
104160@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104161 table[2].data = &net->nf_frag.frags.high_thresh;
104162 table[2].extra1 = &net->nf_frag.frags.low_thresh;
104163 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
104164- }
104165-
104166- hdr = register_net_sysctl(net, "net/netfilter", table);
104167+ hdr = register_net_sysctl(net, "net/netfilter", table);
104168+ } else
104169+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
104170 if (hdr == NULL)
104171 goto err_reg;
104172
104173@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
104174 return 0;
104175
104176 err_reg:
104177- if (!net_eq(net, &init_net))
104178- kfree(table);
104179+ kfree(table);
104180 err_alloc:
104181 return -ENOMEM;
104182 }
104183diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
104184index fe7e3e4..47aba96 100644
104185--- a/net/ipv6/ping.c
104186+++ b/net/ipv6/ping.c
104187@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
104188 };
104189 #endif
104190
104191+static struct pingv6_ops real_pingv6_ops = {
104192+ .ipv6_recv_error = ipv6_recv_error,
104193+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
104194+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
104195+ .icmpv6_err_convert = icmpv6_err_convert,
104196+ .ipv6_icmp_error = ipv6_icmp_error,
104197+ .ipv6_chk_addr = ipv6_chk_addr,
104198+};
104199+
104200+static struct pingv6_ops dummy_pingv6_ops = {
104201+ .ipv6_recv_error = dummy_ipv6_recv_error,
104202+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
104203+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
104204+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
104205+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
104206+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
104207+};
104208+
104209 int __init pingv6_init(void)
104210 {
104211 #ifdef CONFIG_PROC_FS
104212@@ -249,13 +267,7 @@ int __init pingv6_init(void)
104213 if (ret)
104214 return ret;
104215 #endif
104216- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104217- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104218- pingv6_ops.ip6_datagram_recv_specific_ctl =
104219- ip6_datagram_recv_specific_ctl;
104220- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104221- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104222- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104223+ pingv6_ops = &real_pingv6_ops;
104224 return inet6_register_protosw(&pingv6_protosw);
104225 }
104226
104227@@ -264,14 +276,9 @@ int __init pingv6_init(void)
104228 */
104229 void pingv6_exit(void)
104230 {
104231- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104232- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104233- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104234- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104235- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104236- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104237 #ifdef CONFIG_PROC_FS
104238 unregister_pernet_subsys(&ping_v6_net_ops);
104239 #endif
104240+ pingv6_ops = &dummy_pingv6_ops;
104241 inet6_unregister_protosw(&pingv6_protosw);
104242 }
104243diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104244index 679253d0..70b653c 100644
104245--- a/net/ipv6/proc.c
104246+++ b/net/ipv6/proc.c
104247@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104248 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104249 goto proc_snmp6_fail;
104250
104251- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104252+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104253 if (!net->mib.proc_net_devsnmp6)
104254 goto proc_dev_snmp6_fail;
104255 return 0;
104256diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104257index ee25631..3c3ac5d 100644
104258--- a/net/ipv6/raw.c
104259+++ b/net/ipv6/raw.c
104260@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104261 {
104262 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104263 skb_checksum_complete(skb)) {
104264- atomic_inc(&sk->sk_drops);
104265+ atomic_inc_unchecked(&sk->sk_drops);
104266 kfree_skb(skb);
104267 return NET_RX_DROP;
104268 }
104269@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104270 struct raw6_sock *rp = raw6_sk(sk);
104271
104272 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104273- atomic_inc(&sk->sk_drops);
104274+ atomic_inc_unchecked(&sk->sk_drops);
104275 kfree_skb(skb);
104276 return NET_RX_DROP;
104277 }
104278@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104279
104280 if (inet->hdrincl) {
104281 if (skb_checksum_complete(skb)) {
104282- atomic_inc(&sk->sk_drops);
104283+ atomic_inc_unchecked(&sk->sk_drops);
104284 kfree_skb(skb);
104285 return NET_RX_DROP;
104286 }
104287@@ -609,7 +609,7 @@ out:
104288 return err;
104289 }
104290
104291-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
104292+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
104293 struct flowi6 *fl6, struct dst_entry **dstp,
104294 unsigned int flags)
104295 {
104296@@ -916,12 +916,15 @@ do_confirm:
104297 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104298 char __user *optval, int optlen)
104299 {
104300+ struct icmp6_filter filter;
104301+
104302 switch (optname) {
104303 case ICMPV6_FILTER:
104304 if (optlen > sizeof(struct icmp6_filter))
104305 optlen = sizeof(struct icmp6_filter);
104306- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104307+ if (copy_from_user(&filter, optval, optlen))
104308 return -EFAULT;
104309+ raw6_sk(sk)->filter = filter;
104310 return 0;
104311 default:
104312 return -ENOPROTOOPT;
104313@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104314 char __user *optval, int __user *optlen)
104315 {
104316 int len;
104317+ struct icmp6_filter filter;
104318
104319 switch (optname) {
104320 case ICMPV6_FILTER:
104321@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104322 len = sizeof(struct icmp6_filter);
104323 if (put_user(len, optlen))
104324 return -EFAULT;
104325- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104326+ filter = raw6_sk(sk)->filter;
104327+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104328 return -EFAULT;
104329 return 0;
104330 default:
104331diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104332index d7d70e6..bd5e9fc 100644
104333--- a/net/ipv6/reassembly.c
104334+++ b/net/ipv6/reassembly.c
104335@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104336
104337 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104338 {
104339- struct ctl_table *table;
104340+ ctl_table_no_const *table = NULL;
104341 struct ctl_table_header *hdr;
104342
104343- table = ip6_frags_ns_ctl_table;
104344 if (!net_eq(net, &init_net)) {
104345- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104346+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104347 if (table == NULL)
104348 goto err_alloc;
104349
104350@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104351 /* Don't export sysctls to unprivileged users */
104352 if (net->user_ns != &init_user_ns)
104353 table[0].procname = NULL;
104354- }
104355+ hdr = register_net_sysctl(net, "net/ipv6", table);
104356+ } else
104357+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104358
104359- hdr = register_net_sysctl(net, "net/ipv6", table);
104360 if (hdr == NULL)
104361 goto err_reg;
104362
104363@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104364 return 0;
104365
104366 err_reg:
104367- if (!net_eq(net, &init_net))
104368- kfree(table);
104369+ kfree(table);
104370 err_alloc:
104371 return -ENOMEM;
104372 }
104373diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104374index 1528d84..f393960 100644
104375--- a/net/ipv6/route.c
104376+++ b/net/ipv6/route.c
104377@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
104378
104379 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104380 {
104381- struct ctl_table *table;
104382+ ctl_table_no_const *table;
104383
104384 table = kmemdup(ipv6_route_table_template,
104385 sizeof(ipv6_route_table_template),
104386diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104387index cdbfe5a..e13eb31 100644
104388--- a/net/ipv6/sit.c
104389+++ b/net/ipv6/sit.c
104390@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104391 static void ipip6_dev_free(struct net_device *dev);
104392 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104393 __be32 *v4dst);
104394-static struct rtnl_link_ops sit_link_ops __read_mostly;
104395+static struct rtnl_link_ops sit_link_ops;
104396
104397 static int sit_net_id __read_mostly;
104398 struct sit_net {
104399@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104400 unregister_netdevice_queue(dev, head);
104401 }
104402
104403-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104404+static struct rtnl_link_ops sit_link_ops = {
104405 .kind = "sit",
104406 .maxtype = IFLA_IPTUN_MAX,
104407 .policy = ipip6_policy,
104408diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104409index c5c10fa..2577d51 100644
104410--- a/net/ipv6/sysctl_net_ipv6.c
104411+++ b/net/ipv6/sysctl_net_ipv6.c
104412@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104413
104414 static int __net_init ipv6_sysctl_net_init(struct net *net)
104415 {
104416- struct ctl_table *ipv6_table;
104417+ ctl_table_no_const *ipv6_table;
104418 struct ctl_table *ipv6_route_table;
104419 struct ctl_table *ipv6_icmp_table;
104420 int err;
104421diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104422index 9c0b54e..5e7bd8f 100644
104423--- a/net/ipv6/tcp_ipv6.c
104424+++ b/net/ipv6/tcp_ipv6.c
104425@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104426 }
104427 }
104428
104429+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104430+extern int grsec_enable_blackhole;
104431+#endif
104432+
104433 static void tcp_v6_hash(struct sock *sk)
104434 {
104435 if (sk->sk_state != TCP_CLOSE) {
104436@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104437 return 0;
104438
104439 reset:
104440+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104441+ if (!grsec_enable_blackhole)
104442+#endif
104443 tcp_v6_send_reset(sk, skb);
104444 discard:
104445 if (opt_skb)
104446@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104447
104448 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104449 inet6_iif(skb));
104450- if (!sk)
104451+ if (!sk) {
104452+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104453+ ret = 1;
104454+#endif
104455 goto no_tcp_socket;
104456+ }
104457
104458 process:
104459- if (sk->sk_state == TCP_TIME_WAIT)
104460+ if (sk->sk_state == TCP_TIME_WAIT) {
104461+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104462+ ret = 2;
104463+#endif
104464 goto do_time_wait;
104465+ }
104466
104467 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104468 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104469@@ -1499,6 +1514,10 @@ csum_error:
104470 bad_packet:
104471 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104472 } else {
104473+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104474+ if (!grsec_enable_blackhole || (ret == 1 &&
104475+ (skb->dev->flags & IFF_LOOPBACK)))
104476+#endif
104477 tcp_v6_send_reset(NULL, skb);
104478 }
104479
104480diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104481index 189dc4a..458bec0 100644
104482--- a/net/ipv6/udp.c
104483+++ b/net/ipv6/udp.c
104484@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104485 udp_ipv6_hash_secret + net_hash_mix(net));
104486 }
104487
104488+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104489+extern int grsec_enable_blackhole;
104490+#endif
104491+
104492 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104493 {
104494 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104495@@ -448,7 +452,7 @@ try_again:
104496 if (unlikely(err)) {
104497 trace_kfree_skb(skb, udpv6_recvmsg);
104498 if (!peeked) {
104499- atomic_inc(&sk->sk_drops);
104500+ atomic_inc_unchecked(&sk->sk_drops);
104501 if (is_udp4)
104502 UDP_INC_STATS_USER(sock_net(sk),
104503 UDP_MIB_INERRORS,
104504@@ -714,7 +718,7 @@ csum_error:
104505 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104506 drop:
104507 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104508- atomic_inc(&sk->sk_drops);
104509+ atomic_inc_unchecked(&sk->sk_drops);
104510 kfree_skb(skb);
104511 return -1;
104512 }
104513@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104514 if (likely(skb1 == NULL))
104515 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104516 if (!skb1) {
104517- atomic_inc(&sk->sk_drops);
104518+ atomic_inc_unchecked(&sk->sk_drops);
104519 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104520 IS_UDPLITE(sk));
104521 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104522@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104523 goto csum_error;
104524
104525 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104526+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104527+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104528+#endif
104529 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104530
104531 kfree_skb(skb);
104532diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104533index 48bf5a0..691985a 100644
104534--- a/net/ipv6/xfrm6_policy.c
104535+++ b/net/ipv6/xfrm6_policy.c
104536@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104537 }
104538 }
104539
104540-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104541+static int xfrm6_garbage_collect(struct dst_ops *ops)
104542 {
104543 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104544
104545- xfrm6_policy_afinfo.garbage_collect(net);
104546+ xfrm_garbage_collect_deferred(net);
104547 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104548 }
104549
104550@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104551
104552 static int __net_init xfrm6_net_init(struct net *net)
104553 {
104554- struct ctl_table *table;
104555+ ctl_table_no_const *table = NULL;
104556 struct ctl_table_header *hdr;
104557
104558- table = xfrm6_policy_table;
104559 if (!net_eq(net, &init_net)) {
104560- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104561+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104562 if (!table)
104563 goto err_alloc;
104564
104565 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104566- }
104567+ hdr = register_net_sysctl(net, "net/ipv6", table);
104568+ } else
104569+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104570
104571- hdr = register_net_sysctl(net, "net/ipv6", table);
104572 if (!hdr)
104573 goto err_reg;
104574
104575@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104576 return 0;
104577
104578 err_reg:
104579- if (!net_eq(net, &init_net))
104580- kfree(table);
104581+ kfree(table);
104582 err_alloc:
104583 return -ENOMEM;
104584 }
104585diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104586index c1d247e..9e5949d 100644
104587--- a/net/ipx/ipx_proc.c
104588+++ b/net/ipx/ipx_proc.c
104589@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104590 struct proc_dir_entry *p;
104591 int rc = -ENOMEM;
104592
104593- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104594+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104595
104596 if (!ipx_proc_dir)
104597 goto out;
104598diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104599index 4efe486..dee966e 100644
104600--- a/net/irda/ircomm/ircomm_tty.c
104601+++ b/net/irda/ircomm/ircomm_tty.c
104602@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104603 add_wait_queue(&port->open_wait, &wait);
104604
104605 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104606- __FILE__, __LINE__, tty->driver->name, port->count);
104607+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104608
104609 spin_lock_irqsave(&port->lock, flags);
104610- port->count--;
104611+ atomic_dec(&port->count);
104612 port->blocked_open++;
104613 spin_unlock_irqrestore(&port->lock, flags);
104614
104615@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104616 }
104617
104618 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104619- __FILE__, __LINE__, tty->driver->name, port->count);
104620+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104621
104622 schedule();
104623 }
104624@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104625
104626 spin_lock_irqsave(&port->lock, flags);
104627 if (!tty_hung_up_p(filp))
104628- port->count++;
104629+ atomic_inc(&port->count);
104630 port->blocked_open--;
104631 spin_unlock_irqrestore(&port->lock, flags);
104632
104633 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104634- __FILE__, __LINE__, tty->driver->name, port->count);
104635+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104636
104637 if (!retval)
104638 port->flags |= ASYNC_NORMAL_ACTIVE;
104639@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104640
104641 /* ++ is not atomic, so this should be protected - Jean II */
104642 spin_lock_irqsave(&self->port.lock, flags);
104643- self->port.count++;
104644+ atomic_inc(&self->port.count);
104645 spin_unlock_irqrestore(&self->port.lock, flags);
104646 tty_port_tty_set(&self->port, tty);
104647
104648 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104649- self->line, self->port.count);
104650+ self->line, atomic_read(&self->port.count));
104651
104652 /* Not really used by us, but lets do it anyway */
104653 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104654@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104655 tty_kref_put(port->tty);
104656 }
104657 port->tty = NULL;
104658- port->count = 0;
104659+ atomic_set(&port->count, 0);
104660 spin_unlock_irqrestore(&port->lock, flags);
104661
104662 wake_up_interruptible(&port->open_wait);
104663@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104664 seq_putc(m, '\n');
104665
104666 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104667- seq_printf(m, "Open count: %d\n", self->port.count);
104668+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104669 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104670 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104671
104672diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104673index b9ac598..f88cc56 100644
104674--- a/net/irda/irproc.c
104675+++ b/net/irda/irproc.c
104676@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104677 {
104678 int i;
104679
104680- proc_irda = proc_mkdir("irda", init_net.proc_net);
104681+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104682 if (proc_irda == NULL)
104683 return;
104684
104685diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104686index 2e9953b..ed06350 100644
104687--- a/net/iucv/af_iucv.c
104688+++ b/net/iucv/af_iucv.c
104689@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104690 {
104691 char name[12];
104692
104693- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104694+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104695 while (__iucv_get_sock_by_name(name)) {
104696 sprintf(name, "%08x",
104697- atomic_inc_return(&iucv_sk_list.autobind_name));
104698+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104699 }
104700 memcpy(iucv->src_name, name, 8);
104701 }
104702diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104703index 2a6a1fd..6c112b0 100644
104704--- a/net/iucv/iucv.c
104705+++ b/net/iucv/iucv.c
104706@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104707 return NOTIFY_OK;
104708 }
104709
104710-static struct notifier_block __refdata iucv_cpu_notifier = {
104711+static struct notifier_block iucv_cpu_notifier = {
104712 .notifier_call = iucv_cpu_notify,
104713 };
104714
104715diff --git a/net/key/af_key.c b/net/key/af_key.c
104716index f8ac939..1e189bf 100644
104717--- a/net/key/af_key.c
104718+++ b/net/key/af_key.c
104719@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104720 static u32 get_acqseq(void)
104721 {
104722 u32 res;
104723- static atomic_t acqseq;
104724+ static atomic_unchecked_t acqseq;
104725
104726 do {
104727- res = atomic_inc_return(&acqseq);
104728+ res = atomic_inc_return_unchecked(&acqseq);
104729 } while (!res);
104730 return res;
104731 }
104732diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104733index 781b3a2..73a7434 100644
104734--- a/net/l2tp/l2tp_eth.c
104735+++ b/net/l2tp/l2tp_eth.c
104736@@ -42,12 +42,12 @@ struct l2tp_eth {
104737 struct sock *tunnel_sock;
104738 struct l2tp_session *session;
104739 struct list_head list;
104740- atomic_long_t tx_bytes;
104741- atomic_long_t tx_packets;
104742- atomic_long_t tx_dropped;
104743- atomic_long_t rx_bytes;
104744- atomic_long_t rx_packets;
104745- atomic_long_t rx_errors;
104746+ atomic_long_unchecked_t tx_bytes;
104747+ atomic_long_unchecked_t tx_packets;
104748+ atomic_long_unchecked_t tx_dropped;
104749+ atomic_long_unchecked_t rx_bytes;
104750+ atomic_long_unchecked_t rx_packets;
104751+ atomic_long_unchecked_t rx_errors;
104752 };
104753
104754 /* via l2tp_session_priv() */
104755@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104756 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104757
104758 if (likely(ret == NET_XMIT_SUCCESS)) {
104759- atomic_long_add(len, &priv->tx_bytes);
104760- atomic_long_inc(&priv->tx_packets);
104761+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104762+ atomic_long_inc_unchecked(&priv->tx_packets);
104763 } else {
104764- atomic_long_inc(&priv->tx_dropped);
104765+ atomic_long_inc_unchecked(&priv->tx_dropped);
104766 }
104767 return NETDEV_TX_OK;
104768 }
104769@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104770 {
104771 struct l2tp_eth *priv = netdev_priv(dev);
104772
104773- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104774- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104775- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104776- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104777- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104778- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104779+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104780+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104781+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104782+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104783+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104784+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104785 return stats;
104786 }
104787
104788@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104789 nf_reset(skb);
104790
104791 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104792- atomic_long_inc(&priv->rx_packets);
104793- atomic_long_add(data_len, &priv->rx_bytes);
104794+ atomic_long_inc_unchecked(&priv->rx_packets);
104795+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104796 } else {
104797- atomic_long_inc(&priv->rx_errors);
104798+ atomic_long_inc_unchecked(&priv->rx_errors);
104799 }
104800 return;
104801
104802 error:
104803- atomic_long_inc(&priv->rx_errors);
104804+ atomic_long_inc_unchecked(&priv->rx_errors);
104805 kfree_skb(skb);
104806 }
104807
104808diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104809index 1a3c7e0..80f8b0c 100644
104810--- a/net/llc/llc_proc.c
104811+++ b/net/llc/llc_proc.c
104812@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104813 int rc = -ENOMEM;
104814 struct proc_dir_entry *p;
104815
104816- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104817+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104818 if (!llc_proc_dir)
104819 goto out;
104820
104821diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104822index e75d5c5..429fc95 100644
104823--- a/net/mac80211/cfg.c
104824+++ b/net/mac80211/cfg.c
104825@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104826 ret = ieee80211_vif_use_channel(sdata, chandef,
104827 IEEE80211_CHANCTX_EXCLUSIVE);
104828 }
104829- } else if (local->open_count == local->monitors) {
104830+ } else if (local_read(&local->open_count) == local->monitors) {
104831 local->_oper_chandef = *chandef;
104832 ieee80211_hw_config(local, 0);
104833 }
104834@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104835 else
104836 local->probe_req_reg--;
104837
104838- if (!local->open_count)
104839+ if (!local_read(&local->open_count))
104840 break;
104841
104842 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104843@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104844 if (chanctx_conf) {
104845 *chandef = sdata->vif.bss_conf.chandef;
104846 ret = 0;
104847- } else if (local->open_count > 0 &&
104848- local->open_count == local->monitors &&
104849+ } else if (local_read(&local->open_count) > 0 &&
104850+ local_read(&local->open_count) == local->monitors &&
104851 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104852 if (local->use_chanctx)
104853 *chandef = local->monitor_chandef;
104854diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104855index fa7568c..74c815c 100644
104856--- a/net/mac80211/ieee80211_i.h
104857+++ b/net/mac80211/ieee80211_i.h
104858@@ -29,6 +29,7 @@
104859 #include <net/ieee80211_radiotap.h>
104860 #include <net/cfg80211.h>
104861 #include <net/mac80211.h>
104862+#include <asm/local.h>
104863 #include "key.h"
104864 #include "sta_info.h"
104865 #include "debug.h"
104866@@ -1125,7 +1126,7 @@ struct ieee80211_local {
104867 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104868 spinlock_t queue_stop_reason_lock;
104869
104870- int open_count;
104871+ local_t open_count;
104872 int monitors, cooked_mntrs;
104873 /* number of interfaces with corresponding FIF_ flags */
104874 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104875diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104876index 4173553..e3b5a3f 100644
104877--- a/net/mac80211/iface.c
104878+++ b/net/mac80211/iface.c
104879@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104880 break;
104881 }
104882
104883- if (local->open_count == 0) {
104884+ if (local_read(&local->open_count) == 0) {
104885 res = drv_start(local);
104886 if (res)
104887 goto err_del_bss;
104888@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104889 res = drv_add_interface(local, sdata);
104890 if (res)
104891 goto err_stop;
104892- } else if (local->monitors == 0 && local->open_count == 0) {
104893+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104894 res = ieee80211_add_virtual_monitor(local);
104895 if (res)
104896 goto err_stop;
104897@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104898 atomic_inc(&local->iff_promiscs);
104899
104900 if (coming_up)
104901- local->open_count++;
104902+ local_inc(&local->open_count);
104903
104904 if (hw_reconf_flags)
104905 ieee80211_hw_config(local, hw_reconf_flags);
104906@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104907 err_del_interface:
104908 drv_remove_interface(local, sdata);
104909 err_stop:
104910- if (!local->open_count)
104911+ if (!local_read(&local->open_count))
104912 drv_stop(local);
104913 err_del_bss:
104914 sdata->bss = NULL;
104915@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104916 }
104917
104918 if (going_down)
104919- local->open_count--;
104920+ local_dec(&local->open_count);
104921
104922 switch (sdata->vif.type) {
104923 case NL80211_IFTYPE_AP_VLAN:
104924@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104925 }
104926 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104927
104928- if (local->open_count == 0)
104929+ if (local_read(&local->open_count) == 0)
104930 ieee80211_clear_tx_pending(local);
104931
104932 /*
104933@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104934 if (cancel_scan)
104935 flush_delayed_work(&local->scan_work);
104936
104937- if (local->open_count == 0) {
104938+ if (local_read(&local->open_count) == 0) {
104939 ieee80211_stop_device(local);
104940
104941 /* no reconfiguring after stop! */
104942@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104943 ieee80211_configure_filter(local);
104944 ieee80211_hw_config(local, hw_reconf_flags);
104945
104946- if (local->monitors == local->open_count)
104947+ if (local->monitors == local_read(&local->open_count))
104948 ieee80211_add_virtual_monitor(local);
104949 }
104950
104951diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104952index 6ab99da..f9502d4 100644
104953--- a/net/mac80211/main.c
104954+++ b/net/mac80211/main.c
104955@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104956 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104957 IEEE80211_CONF_CHANGE_POWER);
104958
104959- if (changed && local->open_count) {
104960+ if (changed && local_read(&local->open_count)) {
104961 ret = drv_config(local, changed);
104962 /*
104963 * Goal:
104964diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104965index 4a95fe3..0bfd713 100644
104966--- a/net/mac80211/pm.c
104967+++ b/net/mac80211/pm.c
104968@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104969 struct ieee80211_sub_if_data *sdata;
104970 struct sta_info *sta;
104971
104972- if (!local->open_count)
104973+ if (!local_read(&local->open_count))
104974 goto suspend;
104975
104976 ieee80211_scan_cancel(local);
104977@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104978 cancel_work_sync(&local->dynamic_ps_enable_work);
104979 del_timer_sync(&local->dynamic_ps_timer);
104980
104981- local->wowlan = wowlan && local->open_count;
104982+ local->wowlan = wowlan && local_read(&local->open_count);
104983 if (local->wowlan) {
104984 int err = drv_suspend(local, wowlan);
104985 if (err < 0) {
104986@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104987 WARN_ON(!list_empty(&local->chanctx_list));
104988
104989 /* stop hardware - this must stop RX */
104990- if (local->open_count)
104991+ if (local_read(&local->open_count))
104992 ieee80211_stop_device(local);
104993
104994 suspend:
104995diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104996index d53355b..21f583a 100644
104997--- a/net/mac80211/rate.c
104998+++ b/net/mac80211/rate.c
104999@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
105000
105001 ASSERT_RTNL();
105002
105003- if (local->open_count)
105004+ if (local_read(&local->open_count))
105005 return -EBUSY;
105006
105007 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
105008diff --git a/net/mac80211/util.c b/net/mac80211/util.c
105009index 1ce38e7..77267ad 100644
105010--- a/net/mac80211/util.c
105011+++ b/net/mac80211/util.c
105012@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105013 }
105014 #endif
105015 /* everything else happens only if HW was up & running */
105016- if (!local->open_count)
105017+ if (!local_read(&local->open_count))
105018 goto wake_up;
105019
105020 /*
105021@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
105022 local->in_reconfig = false;
105023 barrier();
105024
105025- if (local->monitors == local->open_count && local->monitors > 0)
105026+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
105027 ieee80211_add_virtual_monitor(local);
105028
105029 /*
105030diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
105031index b02660f..c0f791c 100644
105032--- a/net/netfilter/Kconfig
105033+++ b/net/netfilter/Kconfig
105034@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
105035
105036 To compile it as a module, choose M here. If unsure, say N.
105037
105038+config NETFILTER_XT_MATCH_GRADM
105039+ tristate '"gradm" match support'
105040+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
105041+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
105042+ ---help---
105043+ The gradm match allows to match on grsecurity RBAC being enabled.
105044+ It is useful when iptables rules are applied early on bootup to
105045+ prevent connections to the machine (except from a trusted host)
105046+ while the RBAC system is disabled.
105047+
105048 config NETFILTER_XT_MATCH_HASHLIMIT
105049 tristate '"hashlimit" match support'
105050 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
105051diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
105052index 89f73a9..e4e5bd9 100644
105053--- a/net/netfilter/Makefile
105054+++ b/net/netfilter/Makefile
105055@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
105056 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
105057 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
105058 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
105059+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
105060 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
105061 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
105062 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
105063diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
105064index d259da3..6a32b2c 100644
105065--- a/net/netfilter/ipset/ip_set_core.c
105066+++ b/net/netfilter/ipset/ip_set_core.c
105067@@ -1952,7 +1952,7 @@ done:
105068 return ret;
105069 }
105070
105071-static struct nf_sockopt_ops so_set __read_mostly = {
105072+static struct nf_sockopt_ops so_set = {
105073 .pf = PF_INET,
105074 .get_optmin = SO_IP_SET,
105075 .get_optmax = SO_IP_SET + 1,
105076diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
105077index b0f7b62..0541842 100644
105078--- a/net/netfilter/ipvs/ip_vs_conn.c
105079+++ b/net/netfilter/ipvs/ip_vs_conn.c
105080@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
105081 /* Increase the refcnt counter of the dest */
105082 ip_vs_dest_hold(dest);
105083
105084- conn_flags = atomic_read(&dest->conn_flags);
105085+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
105086 if (cp->protocol != IPPROTO_UDP)
105087 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
105088 flags = cp->flags;
105089@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
105090
105091 cp->control = NULL;
105092 atomic_set(&cp->n_control, 0);
105093- atomic_set(&cp->in_pkts, 0);
105094+ atomic_set_unchecked(&cp->in_pkts, 0);
105095
105096 cp->packet_xmit = NULL;
105097 cp->app = NULL;
105098@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
105099
105100 /* Don't drop the entry if its number of incoming packets is not
105101 located in [0, 8] */
105102- i = atomic_read(&cp->in_pkts);
105103+ i = atomic_read_unchecked(&cp->in_pkts);
105104 if (i > 8 || i < 0) return 0;
105105
105106 if (!todrop_rate[i]) return 0;
105107diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
105108index b87ca32..76c7799 100644
105109--- a/net/netfilter/ipvs/ip_vs_core.c
105110+++ b/net/netfilter/ipvs/ip_vs_core.c
105111@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
105112 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
105113 /* do not touch skb anymore */
105114
105115- atomic_inc(&cp->in_pkts);
105116+ atomic_inc_unchecked(&cp->in_pkts);
105117 ip_vs_conn_put(cp);
105118 return ret;
105119 }
105120@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
105121 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
105122 pkts = sysctl_sync_threshold(ipvs);
105123 else
105124- pkts = atomic_add_return(1, &cp->in_pkts);
105125+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105126
105127 if (ipvs->sync_state & IP_VS_STATE_MASTER)
105128 ip_vs_sync_conn(net, cp, pkts);
105129diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
105130index fdcda8b..dbc1979 100644
105131--- a/net/netfilter/ipvs/ip_vs_ctl.c
105132+++ b/net/netfilter/ipvs/ip_vs_ctl.c
105133@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
105134 */
105135 ip_vs_rs_hash(ipvs, dest);
105136 }
105137- atomic_set(&dest->conn_flags, conn_flags);
105138+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
105139
105140 /* bind the service */
105141 old_svc = rcu_dereference_protected(dest->svc, 1);
105142@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
105143 * align with netns init in ip_vs_control_net_init()
105144 */
105145
105146-static struct ctl_table vs_vars[] = {
105147+static ctl_table_no_const vs_vars[] __read_only = {
105148 {
105149 .procname = "amemthresh",
105150 .maxlen = sizeof(int),
105151@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105152 " %-7s %-6d %-10d %-10d\n",
105153 &dest->addr.in6,
105154 ntohs(dest->port),
105155- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105156+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105157 atomic_read(&dest->weight),
105158 atomic_read(&dest->activeconns),
105159 atomic_read(&dest->inactconns));
105160@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
105161 "%-7s %-6d %-10d %-10d\n",
105162 ntohl(dest->addr.ip),
105163 ntohs(dest->port),
105164- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
105165+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
105166 atomic_read(&dest->weight),
105167 atomic_read(&dest->activeconns),
105168 atomic_read(&dest->inactconns));
105169@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
105170
105171 entry.addr = dest->addr.ip;
105172 entry.port = dest->port;
105173- entry.conn_flags = atomic_read(&dest->conn_flags);
105174+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
105175 entry.weight = atomic_read(&dest->weight);
105176 entry.u_threshold = dest->u_threshold;
105177 entry.l_threshold = dest->l_threshold;
105178@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
105179 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
105180 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
105181 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
105182- (atomic_read(&dest->conn_flags) &
105183+ (atomic_read_unchecked(&dest->conn_flags) &
105184 IP_VS_CONN_F_FWD_MASK)) ||
105185 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
105186 atomic_read(&dest->weight)) ||
105187@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
105188 {
105189 int idx;
105190 struct netns_ipvs *ipvs = net_ipvs(net);
105191- struct ctl_table *tbl;
105192+ ctl_table_no_const *tbl;
105193
105194 atomic_set(&ipvs->dropentry, 0);
105195 spin_lock_init(&ipvs->dropentry_lock);
105196diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
105197index 127f140..553d652 100644
105198--- a/net/netfilter/ipvs/ip_vs_lblc.c
105199+++ b/net/netfilter/ipvs/ip_vs_lblc.c
105200@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
105201 * IPVS LBLC sysctl table
105202 */
105203 #ifdef CONFIG_SYSCTL
105204-static struct ctl_table vs_vars_table[] = {
105205+static ctl_table_no_const vs_vars_table[] __read_only = {
105206 {
105207 .procname = "lblc_expiration",
105208 .data = NULL,
105209diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105210index 2229d2d..b32b785 100644
105211--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105212+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105213@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105214 * IPVS LBLCR sysctl table
105215 */
105216
105217-static struct ctl_table vs_vars_table[] = {
105218+static ctl_table_no_const vs_vars_table[] __read_only = {
105219 {
105220 .procname = "lblcr_expiration",
105221 .data = NULL,
105222diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105223index d93ceeb..4556144 100644
105224--- a/net/netfilter/ipvs/ip_vs_sync.c
105225+++ b/net/netfilter/ipvs/ip_vs_sync.c
105226@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105227 cp = cp->control;
105228 if (cp) {
105229 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105230- pkts = atomic_add_return(1, &cp->in_pkts);
105231+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105232 else
105233 pkts = sysctl_sync_threshold(ipvs);
105234 ip_vs_sync_conn(net, cp->control, pkts);
105235@@ -771,7 +771,7 @@ control:
105236 if (!cp)
105237 return;
105238 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105239- pkts = atomic_add_return(1, &cp->in_pkts);
105240+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105241 else
105242 pkts = sysctl_sync_threshold(ipvs);
105243 goto sloop;
105244@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105245
105246 if (opt)
105247 memcpy(&cp->in_seq, opt, sizeof(*opt));
105248- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105249+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105250 cp->state = state;
105251 cp->old_state = cp->state;
105252 /*
105253diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105254index 3aedbda..6a63567 100644
105255--- a/net/netfilter/ipvs/ip_vs_xmit.c
105256+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105257@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105258 else
105259 rc = NF_ACCEPT;
105260 /* do not touch skb anymore */
105261- atomic_inc(&cp->in_pkts);
105262+ atomic_inc_unchecked(&cp->in_pkts);
105263 goto out;
105264 }
105265
105266@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105267 else
105268 rc = NF_ACCEPT;
105269 /* do not touch skb anymore */
105270- atomic_inc(&cp->in_pkts);
105271+ atomic_inc_unchecked(&cp->in_pkts);
105272 goto out;
105273 }
105274
105275diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105276index a4b5e2a..13b1de3 100644
105277--- a/net/netfilter/nf_conntrack_acct.c
105278+++ b/net/netfilter/nf_conntrack_acct.c
105279@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105280 #ifdef CONFIG_SYSCTL
105281 static int nf_conntrack_acct_init_sysctl(struct net *net)
105282 {
105283- struct ctl_table *table;
105284+ ctl_table_no_const *table;
105285
105286 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105287 GFP_KERNEL);
105288diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105289index 46d1b26..b7f3b76 100644
105290--- a/net/netfilter/nf_conntrack_core.c
105291+++ b/net/netfilter/nf_conntrack_core.c
105292@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
105293 #define DYING_NULLS_VAL ((1<<30)+1)
105294 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105295
105296+#ifdef CONFIG_GRKERNSEC_HIDESYM
105297+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105298+#endif
105299+
105300 int nf_conntrack_init_net(struct net *net)
105301 {
105302 int ret = -ENOMEM;
105303@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
105304 if (!net->ct.stat)
105305 goto err_pcpu_lists;
105306
105307+#ifdef CONFIG_GRKERNSEC_HIDESYM
105308+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105309+#else
105310 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105311+#endif
105312 if (!net->ct.slabname)
105313 goto err_slabname;
105314
105315diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105316index 4e78c57..ec8fb74 100644
105317--- a/net/netfilter/nf_conntrack_ecache.c
105318+++ b/net/netfilter/nf_conntrack_ecache.c
105319@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105320 #ifdef CONFIG_SYSCTL
105321 static int nf_conntrack_event_init_sysctl(struct net *net)
105322 {
105323- struct ctl_table *table;
105324+ ctl_table_no_const *table;
105325
105326 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105327 GFP_KERNEL);
105328diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105329index bd9d315..989947e 100644
105330--- a/net/netfilter/nf_conntrack_helper.c
105331+++ b/net/netfilter/nf_conntrack_helper.c
105332@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105333
105334 static int nf_conntrack_helper_init_sysctl(struct net *net)
105335 {
105336- struct ctl_table *table;
105337+ ctl_table_no_const *table;
105338
105339 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105340 GFP_KERNEL);
105341diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105342index b65d586..beec902 100644
105343--- a/net/netfilter/nf_conntrack_proto.c
105344+++ b/net/netfilter/nf_conntrack_proto.c
105345@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105346
105347 static void
105348 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105349- struct ctl_table **table,
105350+ ctl_table_no_const **table,
105351 unsigned int users)
105352 {
105353 if (users > 0)
105354diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105355index fc823fa..8311af3 100644
105356--- a/net/netfilter/nf_conntrack_standalone.c
105357+++ b/net/netfilter/nf_conntrack_standalone.c
105358@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105359
105360 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105361 {
105362- struct ctl_table *table;
105363+ ctl_table_no_const *table;
105364
105365 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105366 GFP_KERNEL);
105367diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105368index 7a394df..bd91a8a 100644
105369--- a/net/netfilter/nf_conntrack_timestamp.c
105370+++ b/net/netfilter/nf_conntrack_timestamp.c
105371@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105372 #ifdef CONFIG_SYSCTL
105373 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105374 {
105375- struct ctl_table *table;
105376+ ctl_table_no_const *table;
105377
105378 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105379 GFP_KERNEL);
105380diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105381index 43c926c..a5731d8 100644
105382--- a/net/netfilter/nf_log.c
105383+++ b/net/netfilter/nf_log.c
105384@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
105385
105386 #ifdef CONFIG_SYSCTL
105387 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105388-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105389+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105390
105391 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105392 void __user *buffer, size_t *lenp, loff_t *ppos)
105393@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105394 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105395 mutex_unlock(&nf_log_mutex);
105396 } else {
105397+ ctl_table_no_const nf_log_table = *table;
105398+
105399 mutex_lock(&nf_log_mutex);
105400 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105401 if (!logger)
105402- table->data = "NONE";
105403+ nf_log_table.data = "NONE";
105404 else
105405- table->data = logger->name;
105406- r = proc_dostring(table, write, buffer, lenp, ppos);
105407+ nf_log_table.data = logger->name;
105408+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105409 mutex_unlock(&nf_log_mutex);
105410 }
105411
105412diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105413index c68c1e5..8b5d670 100644
105414--- a/net/netfilter/nf_sockopt.c
105415+++ b/net/netfilter/nf_sockopt.c
105416@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105417 }
105418 }
105419
105420- list_add(&reg->list, &nf_sockopts);
105421+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105422 out:
105423 mutex_unlock(&nf_sockopt_mutex);
105424 return ret;
105425@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105426 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105427 {
105428 mutex_lock(&nf_sockopt_mutex);
105429- list_del(&reg->list);
105430+ pax_list_del((struct list_head *)&reg->list);
105431 mutex_unlock(&nf_sockopt_mutex);
105432 }
105433 EXPORT_SYMBOL(nf_unregister_sockopt);
105434diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105435index 11d85b3..7fcc420 100644
105436--- a/net/netfilter/nfnetlink_log.c
105437+++ b/net/netfilter/nfnetlink_log.c
105438@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105439 struct nfnl_log_net {
105440 spinlock_t instances_lock;
105441 struct hlist_head instance_table[INSTANCE_BUCKETS];
105442- atomic_t global_seq;
105443+ atomic_unchecked_t global_seq;
105444 };
105445
105446 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105447@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105448 /* global sequence number */
105449 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105450 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105451- htonl(atomic_inc_return(&log->global_seq))))
105452+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105453 goto nla_put_failure;
105454
105455 if (data_len) {
105456diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105457index b636486..9898807 100644
105458--- a/net/netfilter/nft_compat.c
105459+++ b/net/netfilter/nft_compat.c
105460@@ -274,14 +274,7 @@ static void nft_match_eval(const struct nft_expr *expr,
105461 return;
105462 }
105463
105464- switch(ret) {
105465- case true:
105466- data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
105467- break;
105468- case false:
105469- data[NFT_REG_VERDICT].verdict = NFT_BREAK;
105470- break;
105471- }
105472+ data[NFT_REG_VERDICT].verdict = ret ? NFT_CONTINUE : NFT_BREAK;
105473 }
105474
105475 static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
105476diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105477new file mode 100644
105478index 0000000..c566332
105479--- /dev/null
105480+++ b/net/netfilter/xt_gradm.c
105481@@ -0,0 +1,51 @@
105482+/*
105483+ * gradm match for netfilter
105484