]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-3.19.5-201504190814.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-3.19.5-201504190814.patch
CommitLineData
6ae3ce5b
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index a311db8..415b28c 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 176d4fe..6eabd3c 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1191,6 +1191,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2283,6 +2290,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2584,6 +2595,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Makefile b/Makefile
376index 633b5f0..10aa54f 100644
377--- a/Makefile
378+++ b/Makefile
379@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
380 HOSTCC = gcc
381 HOSTCXX = g++
382 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
383-HOSTCXXFLAGS = -O2
384+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks
385+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
386+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
387
388 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
389 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
390@@ -446,8 +448,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
391 # Rules shared between *config targets and build targets
392
393 # Basic helpers built in scripts/
394-PHONY += scripts_basic
395-scripts_basic:
396+PHONY += scripts_basic gcc-plugins
397+scripts_basic: gcc-plugins
398 $(Q)$(MAKE) $(build)=scripts/basic
399 $(Q)rm -f .tmp_quiet_recordmcount
400
401@@ -622,6 +624,72 @@ endif
402 # Tell gcc to never replace conditional load with a non-conditional one
403 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
404
405+ifndef DISABLE_PAX_PLUGINS
406+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
408+else
409+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
410+endif
411+ifneq ($(PLUGINCC),)
412+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
413+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
414+endif
415+ifdef CONFIG_PAX_MEMORY_STACKLEAK
416+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
417+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
418+endif
419+ifdef CONFIG_KALLOCSTAT_PLUGIN
420+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
421+endif
422+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
424+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
425+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
426+endif
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
428+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
429+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
430+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
431+endif
432+endif
433+ifdef CONFIG_CHECKER_PLUGIN
434+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
435+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
436+endif
437+endif
438+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
439+ifdef CONFIG_PAX_SIZE_OVERFLOW
440+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
441+endif
442+ifdef CONFIG_PAX_LATENT_ENTROPY
443+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
444+endif
445+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
446+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
447+endif
448+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
450+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
451+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
452+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
453+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
454+ifeq ($(KBUILD_EXTMOD),)
455+gcc-plugins:
456+ $(Q)$(MAKE) $(build)=tools/gcc
457+else
458+gcc-plugins: ;
459+endif
460+else
461+gcc-plugins:
462+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
463+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
464+else
465+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
466+endif
467+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
468+endif
469+endif
470+
471 ifdef CONFIG_READABLE_ASM
472 # Disable optimizations that make assembler listings hard to read.
473 # reorder blocks reorders the control in the function
474@@ -714,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
475 else
476 KBUILD_CFLAGS += -g
477 endif
478-KBUILD_AFLAGS += -Wa,-gdwarf-2
479+KBUILD_AFLAGS += -Wa,--gdwarf-2
480 endif
481 ifdef CONFIG_DEBUG_INFO_DWARF4
482 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
483@@ -879,7 +947,7 @@ export mod_sign_cmd
484
485
486 ifeq ($(KBUILD_EXTMOD),)
487-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
488+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
489
490 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
491 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
492@@ -926,6 +994,8 @@ endif
493
494 # The actual objects are generated when descending,
495 # make sure no implicit rule kicks in
496+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
497+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
498 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
499
500 # Handle descending into subdirectories listed in $(vmlinux-dirs)
501@@ -935,7 +1005,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
502 # Error messages still appears in the original language
503
504 PHONY += $(vmlinux-dirs)
505-$(vmlinux-dirs): prepare scripts
506+$(vmlinux-dirs): gcc-plugins prepare scripts
507 $(Q)$(MAKE) $(build)=$@
508
509 define filechk_kernel.release
510@@ -978,10 +1048,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
511
512 archprepare: archheaders archscripts prepare1 scripts_basic
513
514+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
515+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
516 prepare0: archprepare FORCE
517 $(Q)$(MAKE) $(build)=.
518
519 # All the preparing..
520+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
521 prepare: prepare0
522
523 # Generate some files
524@@ -1095,6 +1168,8 @@ all: modules
525 # using awk while concatenating to the final file.
526
527 PHONY += modules
528+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
529+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
530 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
531 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
532 @$(kecho) ' Building modules, stage 2.';
533@@ -1110,7 +1185,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
534
535 # Target to prepare building external modules
536 PHONY += modules_prepare
537-modules_prepare: prepare scripts
538+modules_prepare: gcc-plugins prepare scripts
539
540 # Target to install modules
541 PHONY += modules_install
542@@ -1176,7 +1251,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
543 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
544 signing_key.priv signing_key.x509 x509.genkey \
545 extra_certificates signing_key.x509.keyid \
546- signing_key.x509.signer
547+ signing_key.x509.signer \
548+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
549+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
550+ tools/gcc/randomize_layout_seed.h
551
552 # clean - Delete most, but leave enough to build external modules
553 #
554@@ -1215,7 +1293,7 @@ distclean: mrproper
555 @find $(srctree) $(RCS_FIND_IGNORE) \
556 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
557 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
558- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
559+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
560 -type f -print | xargs rm -f
561
562
563@@ -1381,6 +1459,8 @@ PHONY += $(module-dirs) modules
564 $(module-dirs): crmodverdir $(objtree)/Module.symvers
565 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
566
567+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
568+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
569 modules: $(module-dirs)
570 @$(kecho) ' Building modules, stage 2.';
571 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572@@ -1521,17 +1601,21 @@ else
573 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
574 endif
575
576-%.s: %.c prepare scripts FORCE
577+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
578+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
579+%.s: %.c gcc-plugins prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581 %.i: %.c prepare scripts FORCE
582 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
583-%.o: %.c prepare scripts FORCE
584+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
585+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
586+%.o: %.c gcc-plugins prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588 %.lst: %.c prepare scripts FORCE
589 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
590-%.s: %.S prepare scripts FORCE
591+%.s: %.S gcc-plugins prepare scripts FORCE
592 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
593-%.o: %.S prepare scripts FORCE
594+%.o: %.S gcc-plugins prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596 %.symtypes: %.c prepare scripts FORCE
597 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
598@@ -1543,11 +1627,15 @@ endif
599 $(build)=$(build-dir)
600 # Make sure the latest headers are built for Documentation
601 Documentation/: headers_install
602-%/: prepare scripts FORCE
603+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
604+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
605+%/: gcc-plugins prepare scripts FORCE
606 $(cmd_crmodverdir)
607 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
608 $(build)=$(build-dir)
609-%.ko: prepare scripts FORCE
610+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
611+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
612+%.ko: gcc-plugins prepare scripts FORCE
613 $(cmd_crmodverdir)
614 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
615 $(build)=$(build-dir) $(@:.ko=.o)
616diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
617index 8f8eafb..3405f46 100644
618--- a/arch/alpha/include/asm/atomic.h
619+++ b/arch/alpha/include/asm/atomic.h
620@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
621 #define atomic_dec(v) atomic_sub(1,(v))
622 #define atomic64_dec(v) atomic64_sub(1,(v))
623
624+#define atomic64_read_unchecked(v) atomic64_read(v)
625+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
626+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
627+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
628+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
629+#define atomic64_inc_unchecked(v) atomic64_inc(v)
630+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
631+#define atomic64_dec_unchecked(v) atomic64_dec(v)
632+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
633+
634 #endif /* _ALPHA_ATOMIC_H */
635diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
636index ad368a9..fbe0f25 100644
637--- a/arch/alpha/include/asm/cache.h
638+++ b/arch/alpha/include/asm/cache.h
639@@ -4,19 +4,19 @@
640 #ifndef __ARCH_ALPHA_CACHE_H
641 #define __ARCH_ALPHA_CACHE_H
642
643+#include <linux/const.h>
644
645 /* Bytes per L1 (data) cache line. */
646 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
647-# define L1_CACHE_BYTES 64
648 # define L1_CACHE_SHIFT 6
649 #else
650 /* Both EV4 and EV5 are write-through, read-allocate,
651 direct-mapped, physical.
652 */
653-# define L1_CACHE_BYTES 32
654 # define L1_CACHE_SHIFT 5
655 #endif
656
657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
658 #define SMP_CACHE_BYTES L1_CACHE_BYTES
659
660 #endif
661diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
662index 968d999..d36b2df 100644
663--- a/arch/alpha/include/asm/elf.h
664+++ b/arch/alpha/include/asm/elf.h
665@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
666
667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
668
669+#ifdef CONFIG_PAX_ASLR
670+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
671+
672+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
673+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
674+#endif
675+
676 /* $0 is set by ld.so to a pointer to a function which might be
677 registered using atexit. This provides a mean for the dynamic
678 linker to call DT_FINI functions for shared libraries that have
679diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
680index aab14a0..b4fa3e7 100644
681--- a/arch/alpha/include/asm/pgalloc.h
682+++ b/arch/alpha/include/asm/pgalloc.h
683@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
684 pgd_set(pgd, pmd);
685 }
686
687+static inline void
688+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
689+{
690+ pgd_populate(mm, pgd, pmd);
691+}
692+
693 extern pgd_t *pgd_alloc(struct mm_struct *mm);
694
695 static inline void
696diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
697index d8f9b7e..f6222fa 100644
698--- a/arch/alpha/include/asm/pgtable.h
699+++ b/arch/alpha/include/asm/pgtable.h
700@@ -102,6 +102,17 @@ struct vm_area_struct;
701 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
702 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
703 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
704+
705+#ifdef CONFIG_PAX_PAGEEXEC
706+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
707+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
708+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
709+#else
710+# define PAGE_SHARED_NOEXEC PAGE_SHARED
711+# define PAGE_COPY_NOEXEC PAGE_COPY
712+# define PAGE_READONLY_NOEXEC PAGE_READONLY
713+#endif
714+
715 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
716
717 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
718diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
719index 2fd00b7..cfd5069 100644
720--- a/arch/alpha/kernel/module.c
721+++ b/arch/alpha/kernel/module.c
722@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
723
724 /* The small sections were sorted to the end of the segment.
725 The following should definitely cover them. */
726- gp = (u64)me->module_core + me->core_size - 0x8000;
727+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
728 got = sechdrs[me->arch.gotsecindex].sh_addr;
729
730 for (i = 0; i < n; i++) {
731diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
732index e51f578..16c64a3 100644
733--- a/arch/alpha/kernel/osf_sys.c
734+++ b/arch/alpha/kernel/osf_sys.c
735@@ -1296,10 +1296,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
736 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
737
738 static unsigned long
739-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
740- unsigned long limit)
741+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
742+ unsigned long limit, unsigned long flags)
743 {
744 struct vm_unmapped_area_info info;
745+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
746
747 info.flags = 0;
748 info.length = len;
749@@ -1307,6 +1308,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
750 info.high_limit = limit;
751 info.align_mask = 0;
752 info.align_offset = 0;
753+ info.threadstack_offset = offset;
754 return vm_unmapped_area(&info);
755 }
756
757@@ -1339,20 +1341,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
758 merely specific addresses, but regions of memory -- perhaps
759 this feature should be incorporated into all ports? */
760
761+#ifdef CONFIG_PAX_RANDMMAP
762+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
763+#endif
764+
765 if (addr) {
766- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
767+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
768 if (addr != (unsigned long) -ENOMEM)
769 return addr;
770 }
771
772 /* Next, try allocating at TASK_UNMAPPED_BASE. */
773- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
774- len, limit);
775+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
776+
777 if (addr != (unsigned long) -ENOMEM)
778 return addr;
779
780 /* Finally, try allocating in low memory. */
781- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
782+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
783
784 return addr;
785 }
786diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
787index 9d0ac09..479a962 100644
788--- a/arch/alpha/mm/fault.c
789+++ b/arch/alpha/mm/fault.c
790@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
791 __reload_thread(pcb);
792 }
793
794+#ifdef CONFIG_PAX_PAGEEXEC
795+/*
796+ * PaX: decide what to do with offenders (regs->pc = fault address)
797+ *
798+ * returns 1 when task should be killed
799+ * 2 when patched PLT trampoline was detected
800+ * 3 when unpatched PLT trampoline was detected
801+ */
802+static int pax_handle_fetch_fault(struct pt_regs *regs)
803+{
804+
805+#ifdef CONFIG_PAX_EMUPLT
806+ int err;
807+
808+ do { /* PaX: patched PLT emulation #1 */
809+ unsigned int ldah, ldq, jmp;
810+
811+ err = get_user(ldah, (unsigned int *)regs->pc);
812+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
813+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
814+
815+ if (err)
816+ break;
817+
818+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
819+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
820+ jmp == 0x6BFB0000U)
821+ {
822+ unsigned long r27, addr;
823+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
824+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
825+
826+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
827+ err = get_user(r27, (unsigned long *)addr);
828+ if (err)
829+ break;
830+
831+ regs->r27 = r27;
832+ regs->pc = r27;
833+ return 2;
834+ }
835+ } while (0);
836+
837+ do { /* PaX: patched PLT emulation #2 */
838+ unsigned int ldah, lda, br;
839+
840+ err = get_user(ldah, (unsigned int *)regs->pc);
841+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
842+ err |= get_user(br, (unsigned int *)(regs->pc+8));
843+
844+ if (err)
845+ break;
846+
847+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
848+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
849+ (br & 0xFFE00000U) == 0xC3E00000U)
850+ {
851+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
852+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
853+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
854+
855+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
856+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
857+ return 2;
858+ }
859+ } while (0);
860+
861+ do { /* PaX: unpatched PLT emulation */
862+ unsigned int br;
863+
864+ err = get_user(br, (unsigned int *)regs->pc);
865+
866+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
867+ unsigned int br2, ldq, nop, jmp;
868+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
869+
870+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
871+ err = get_user(br2, (unsigned int *)addr);
872+ err |= get_user(ldq, (unsigned int *)(addr+4));
873+ err |= get_user(nop, (unsigned int *)(addr+8));
874+ err |= get_user(jmp, (unsigned int *)(addr+12));
875+ err |= get_user(resolver, (unsigned long *)(addr+16));
876+
877+ if (err)
878+ break;
879+
880+ if (br2 == 0xC3600000U &&
881+ ldq == 0xA77B000CU &&
882+ nop == 0x47FF041FU &&
883+ jmp == 0x6B7B0000U)
884+ {
885+ regs->r28 = regs->pc+4;
886+ regs->r27 = addr+16;
887+ regs->pc = resolver;
888+ return 3;
889+ }
890+ }
891+ } while (0);
892+#endif
893+
894+ return 1;
895+}
896+
897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
898+{
899+ unsigned long i;
900+
901+ printk(KERN_ERR "PAX: bytes at PC: ");
902+ for (i = 0; i < 5; i++) {
903+ unsigned int c;
904+ if (get_user(c, (unsigned int *)pc+i))
905+ printk(KERN_CONT "???????? ");
906+ else
907+ printk(KERN_CONT "%08x ", c);
908+ }
909+ printk("\n");
910+}
911+#endif
912
913 /*
914 * This routine handles page faults. It determines the address,
915@@ -133,8 +251,29 @@ retry:
916 good_area:
917 si_code = SEGV_ACCERR;
918 if (cause < 0) {
919- if (!(vma->vm_flags & VM_EXEC))
920+ if (!(vma->vm_flags & VM_EXEC)) {
921+
922+#ifdef CONFIG_PAX_PAGEEXEC
923+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
924+ goto bad_area;
925+
926+ up_read(&mm->mmap_sem);
927+ switch (pax_handle_fetch_fault(regs)) {
928+
929+#ifdef CONFIG_PAX_EMUPLT
930+ case 2:
931+ case 3:
932+ return;
933+#endif
934+
935+ }
936+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
937+ do_group_exit(SIGKILL);
938+#else
939 goto bad_area;
940+#endif
941+
942+ }
943 } else if (!cause) {
944 /* Allow reads even for write-only mappings */
945 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
946diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
947index 97d07ed..2931f2b 100644
948--- a/arch/arm/Kconfig
949+++ b/arch/arm/Kconfig
950@@ -1727,7 +1727,7 @@ config ALIGNMENT_TRAP
951
952 config UACCESS_WITH_MEMCPY
953 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
954- depends on MMU
955+ depends on MMU && !PAX_MEMORY_UDEREF
956 default y if CPU_FEROCEON
957 help
958 Implement faster copy_to_user and clear_user methods for CPU
959@@ -1991,6 +1991,7 @@ config XIP_PHYS_ADDR
960 config KEXEC
961 bool "Kexec system call (EXPERIMENTAL)"
962 depends on (!SMP || PM_SLEEP_SMP)
963+ depends on !GRKERNSEC_KMEM
964 help
965 kexec is a system call that implements the ability to shutdown your
966 current kernel, and to start another kernel. It is like a reboot
967diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
968index e22c119..eaa807d 100644
969--- a/arch/arm/include/asm/atomic.h
970+++ b/arch/arm/include/asm/atomic.h
971@@ -18,17 +18,41 @@
972 #include <asm/barrier.h>
973 #include <asm/cmpxchg.h>
974
975+#ifdef CONFIG_GENERIC_ATOMIC64
976+#include <asm-generic/atomic64.h>
977+#endif
978+
979 #define ATOMIC_INIT(i) { (i) }
980
981 #ifdef __KERNEL__
982
983+#ifdef CONFIG_THUMB2_KERNEL
984+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
985+#else
986+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
987+#endif
988+
989+#define _ASM_EXTABLE(from, to) \
990+" .pushsection __ex_table,\"a\"\n"\
991+" .align 3\n" \
992+" .long " #from ", " #to"\n" \
993+" .popsection"
994+
995 /*
996 * On ARM, ordinary assignment (str instruction) doesn't clear the local
997 * strex/ldrex monitor on some implementations. The reason we can use it for
998 * atomic_set() is the clrex or dummy strex done on every exception return.
999 */
1000 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1001+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1002+{
1003+ return ACCESS_ONCE(v->counter);
1004+}
1005 #define atomic_set(v,i) (((v)->counter) = (i))
1006+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1007+{
1008+ v->counter = i;
1009+}
1010
1011 #if __LINUX_ARM_ARCH__ >= 6
1012
1013@@ -38,26 +62,50 @@
1014 * to ensure that the update happens.
1015 */
1016
1017-#define ATOMIC_OP(op, c_op, asm_op) \
1018-static inline void atomic_##op(int i, atomic_t *v) \
1019+#ifdef CONFIG_PAX_REFCOUNT
1020+#define __OVERFLOW_POST \
1021+ " bvc 3f\n" \
1022+ "2: " REFCOUNT_TRAP_INSN "\n"\
1023+ "3:\n"
1024+#define __OVERFLOW_POST_RETURN \
1025+ " bvc 3f\n" \
1026+" mov %0, %1\n" \
1027+ "2: " REFCOUNT_TRAP_INSN "\n"\
1028+ "3:\n"
1029+#define __OVERFLOW_EXTABLE \
1030+ "4:\n" \
1031+ _ASM_EXTABLE(2b, 4b)
1032+#else
1033+#define __OVERFLOW_POST
1034+#define __OVERFLOW_POST_RETURN
1035+#define __OVERFLOW_EXTABLE
1036+#endif
1037+
1038+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1039+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1040 { \
1041 unsigned long tmp; \
1042 int result; \
1043 \
1044 prefetchw(&v->counter); \
1045- __asm__ __volatile__("@ atomic_" #op "\n" \
1046+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1047 "1: ldrex %0, [%3]\n" \
1048 " " #asm_op " %0, %0, %4\n" \
1049+ post_op \
1050 " strex %1, %0, [%3]\n" \
1051 " teq %1, #0\n" \
1052-" bne 1b" \
1053+" bne 1b\n" \
1054+ extable \
1055 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1056 : "r" (&v->counter), "Ir" (i) \
1057 : "cc"); \
1058 } \
1059
1060-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1061-static inline int atomic_##op##_return(int i, atomic_t *v) \
1062+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op, , )\
1063+ __ATOMIC_OP(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1064+
1065+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1066+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1067 { \
1068 unsigned long tmp; \
1069 int result; \
1070@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1071 smp_mb(); \
1072 prefetchw(&v->counter); \
1073 \
1074- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1075+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1076 "1: ldrex %0, [%3]\n" \
1077 " " #asm_op " %0, %0, %4\n" \
1078+ post_op \
1079 " strex %1, %0, [%3]\n" \
1080 " teq %1, #0\n" \
1081-" bne 1b" \
1082+" bne 1b\n" \
1083+ extable \
1084 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1085 : "r" (&v->counter), "Ir" (i) \
1086 : "cc"); \
1087@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1088 return result; \
1089 }
1090
1091+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op, , )\
1092+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1093+
1094 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1095 {
1096 int oldval;
1097@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 __asm__ __volatile__ ("@ atomic_add_unless\n"
1099 "1: ldrex %0, [%4]\n"
1100 " teq %0, %5\n"
1101-" beq 2f\n"
1102-" add %1, %0, %6\n"
1103+" beq 4f\n"
1104+" adds %1, %0, %6\n"
1105+
1106+#ifdef CONFIG_PAX_REFCOUNT
1107+" bvc 3f\n"
1108+"2: " REFCOUNT_TRAP_INSN "\n"
1109+"3:\n"
1110+#endif
1111+
1112 " strex %2, %1, [%4]\n"
1113 " teq %2, #0\n"
1114 " bne 1b\n"
1115-"2:"
1116+"4:"
1117+
1118+#ifdef CONFIG_PAX_REFCOUNT
1119+ _ASM_EXTABLE(2b, 4b)
1120+#endif
1121+
1122 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1123 : "r" (&v->counter), "r" (u), "r" (a)
1124 : "cc");
1125@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1126 return oldval;
1127 }
1128
1129+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1130+{
1131+ unsigned long oldval, res;
1132+
1133+ smp_mb();
1134+
1135+ do {
1136+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1137+ "ldrex %1, [%3]\n"
1138+ "mov %0, #0\n"
1139+ "teq %1, %4\n"
1140+ "strexeq %0, %5, [%3]\n"
1141+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1142+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1143+ : "cc");
1144+ } while (res);
1145+
1146+ smp_mb();
1147+
1148+ return oldval;
1149+}
1150+
1151 #else /* ARM_ARCH_6 */
1152
1153 #ifdef CONFIG_SMP
1154 #error SMP not supported on pre-ARMv6 CPUs
1155 #endif
1156
1157-#define ATOMIC_OP(op, c_op, asm_op) \
1158-static inline void atomic_##op(int i, atomic_t *v) \
1159+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1160+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1161 { \
1162 unsigned long flags; \
1163 \
1164@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1165 raw_local_irq_restore(flags); \
1166 } \
1167
1168-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1169-static inline int atomic_##op##_return(int i, atomic_t *v) \
1170+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1171+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1172+
1173+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1174+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1175 { \
1176 unsigned long flags; \
1177 int val; \
1178@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1179 return val; \
1180 }
1181
1182+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1183+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1184+
1185 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1186 {
1187 int ret;
1188@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1189 return ret;
1190 }
1191
1192+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1193+{
1194+ return atomic_cmpxchg((atomic_t *)v, old, new);
1195+}
1196+
1197 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1198 {
1199 int c, old;
1200@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1201
1202 #undef ATOMIC_OPS
1203 #undef ATOMIC_OP_RETURN
1204+#undef __ATOMIC_OP_RETURN
1205 #undef ATOMIC_OP
1206+#undef __ATOMIC_OP
1207
1208 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1209+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1210+{
1211+ return xchg(&v->counter, new);
1212+}
1213
1214 #define atomic_inc(v) atomic_add(1, v)
1215+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1216+{
1217+ atomic_add_unchecked(1, v);
1218+}
1219 #define atomic_dec(v) atomic_sub(1, v)
1220+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1221+{
1222+ atomic_sub_unchecked(1, v);
1223+}
1224
1225 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1226+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1227+{
1228+ return atomic_add_return_unchecked(1, v) == 0;
1229+}
1230 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1231 #define atomic_inc_return(v) (atomic_add_return(1, v))
1232+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1233+{
1234+ return atomic_add_return_unchecked(1, v);
1235+}
1236 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1237 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1238
1239@@ -216,6 +336,14 @@ typedef struct {
1240 long long counter;
1241 } atomic64_t;
1242
1243+#ifdef CONFIG_PAX_REFCOUNT
1244+typedef struct {
1245+ long long counter;
1246+} atomic64_unchecked_t;
1247+#else
1248+typedef atomic64_t atomic64_unchecked_t;
1249+#endif
1250+
1251 #define ATOMIC64_INIT(i) { (i) }
1252
1253 #ifdef CONFIG_ARM_LPAE
1254@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1255 return result;
1256 }
1257
1258+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1259+{
1260+ long long result;
1261+
1262+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1263+" ldrd %0, %H0, [%1]"
1264+ : "=&r" (result)
1265+ : "r" (&v->counter), "Qo" (v->counter)
1266+ );
1267+
1268+ return result;
1269+}
1270+
1271 static inline void atomic64_set(atomic64_t *v, long long i)
1272 {
1273 __asm__ __volatile__("@ atomic64_set\n"
1274@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1275 : "r" (&v->counter), "r" (i)
1276 );
1277 }
1278+
1279+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1280+{
1281+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1282+" strd %2, %H2, [%1]"
1283+ : "=Qo" (v->counter)
1284+ : "r" (&v->counter), "r" (i)
1285+ );
1286+}
1287 #else
1288 static inline long long atomic64_read(const atomic64_t *v)
1289 {
1290@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1291 return result;
1292 }
1293
1294+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1295+{
1296+ long long result;
1297+
1298+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1299+" ldrexd %0, %H0, [%1]"
1300+ : "=&r" (result)
1301+ : "r" (&v->counter), "Qo" (v->counter)
1302+ );
1303+
1304+ return result;
1305+}
1306+
1307 static inline void atomic64_set(atomic64_t *v, long long i)
1308 {
1309 long long tmp;
1310@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1311 : "r" (&v->counter), "r" (i)
1312 : "cc");
1313 }
1314+
1315+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1316+{
1317+ long long tmp;
1318+
1319+ prefetchw(&v->counter);
1320+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1321+"1: ldrexd %0, %H0, [%2]\n"
1322+" strexd %0, %3, %H3, [%2]\n"
1323+" teq %0, #0\n"
1324+" bne 1b"
1325+ : "=&r" (tmp), "=Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329 #endif
1330
1331-#define ATOMIC64_OP(op, op1, op2) \
1332-static inline void atomic64_##op(long long i, atomic64_t *v) \
1333+#undef __OVERFLOW_POST_RETURN
1334+#define __OVERFLOW_POST_RETURN \
1335+ " bvc 3f\n" \
1336+" mov %0, %1\n" \
1337+" mov %H0, %H1\n" \
1338+ "2: " REFCOUNT_TRAP_INSN "\n"\
1339+ "3:\n"
1340+
1341+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1342+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1343 { \
1344 long long result; \
1345 unsigned long tmp; \
1346 \
1347 prefetchw(&v->counter); \
1348- __asm__ __volatile__("@ atomic64_" #op "\n" \
1349+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1350 "1: ldrexd %0, %H0, [%3]\n" \
1351 " " #op1 " %Q0, %Q0, %Q4\n" \
1352 " " #op2 " %R0, %R0, %R4\n" \
1353+ post_op \
1354 " strexd %1, %0, %H0, [%3]\n" \
1355 " teq %1, #0\n" \
1356-" bne 1b" \
1357+" bne 1b\n" \
1358+ extable \
1359 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1360 : "r" (&v->counter), "r" (i) \
1361 : "cc"); \
1362 } \
1363
1364-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1365-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1366+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, , op1, op2, , ) \
1367+ __ATOMIC64_OP(op, _unchecked, op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1368+
1369+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1370+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1371 { \
1372 long long result; \
1373 unsigned long tmp; \
1374@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1375 smp_mb(); \
1376 prefetchw(&v->counter); \
1377 \
1378- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1379+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1380 "1: ldrexd %0, %H0, [%3]\n" \
1381 " " #op1 " %Q0, %Q0, %Q4\n" \
1382 " " #op2 " %R0, %R0, %R4\n" \
1383+ post_op \
1384 " strexd %1, %0, %H0, [%3]\n" \
1385 " teq %1, #0\n" \
1386-" bne 1b" \
1387+" bne 1b\n" \
1388+ extable \
1389 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1390 : "r" (&v->counter), "r" (i) \
1391 : "cc"); \
1392@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1393 return result; \
1394 }
1395
1396+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, , op1, op2, , ) \
1397+ __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1398+
1399 #define ATOMIC64_OPS(op, op1, op2) \
1400 ATOMIC64_OP(op, op1, op2) \
1401 ATOMIC64_OP_RETURN(op, op1, op2)
1402@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1403
1404 #undef ATOMIC64_OPS
1405 #undef ATOMIC64_OP_RETURN
1406+#undef __ATOMIC64_OP_RETURN
1407 #undef ATOMIC64_OP
1408+#undef __ATOMIC64_OP
1409+#undef __OVERFLOW_EXTABLE
1410+#undef __OVERFLOW_POST_RETURN
1411+#undef __OVERFLOW_POST
1412
1413 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1414 long long new)
1415@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1416 return oldval;
1417 }
1418
1419+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1420+ long long new)
1421+{
1422+ long long oldval;
1423+ unsigned long res;
1424+
1425+ smp_mb();
1426+
1427+ do {
1428+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1429+ "ldrexd %1, %H1, [%3]\n"
1430+ "mov %0, #0\n"
1431+ "teq %1, %4\n"
1432+ "teqeq %H1, %H4\n"
1433+ "strexdeq %0, %5, %H5, [%3]"
1434+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1435+ : "r" (&ptr->counter), "r" (old), "r" (new)
1436+ : "cc");
1437+ } while (res);
1438+
1439+ smp_mb();
1440+
1441+ return oldval;
1442+}
1443+
1444 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1445 {
1446 long long result;
1447@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1448 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1449 {
1450 long long result;
1451- unsigned long tmp;
1452+ u64 tmp;
1453
1454 smp_mb();
1455 prefetchw(&v->counter);
1456
1457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1458-"1: ldrexd %0, %H0, [%3]\n"
1459-" subs %Q0, %Q0, #1\n"
1460-" sbc %R0, %R0, #0\n"
1461+"1: ldrexd %1, %H1, [%3]\n"
1462+" subs %Q0, %Q1, #1\n"
1463+" sbcs %R0, %R1, #0\n"
1464+
1465+#ifdef CONFIG_PAX_REFCOUNT
1466+" bvc 3f\n"
1467+" mov %Q0, %Q1\n"
1468+" mov %R0, %R1\n"
1469+"2: " REFCOUNT_TRAP_INSN "\n"
1470+"3:\n"
1471+#endif
1472+
1473 " teq %R0, #0\n"
1474-" bmi 2f\n"
1475+" bmi 4f\n"
1476 " strexd %1, %0, %H0, [%3]\n"
1477 " teq %1, #0\n"
1478 " bne 1b\n"
1479-"2:"
1480+"4:\n"
1481+
1482+#ifdef CONFIG_PAX_REFCOUNT
1483+ _ASM_EXTABLE(2b, 4b)
1484+#endif
1485+
1486 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1487 : "r" (&v->counter)
1488 : "cc");
1489@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1490 " teq %0, %5\n"
1491 " teqeq %H0, %H5\n"
1492 " moveq %1, #0\n"
1493-" beq 2f\n"
1494+" beq 4f\n"
1495 " adds %Q0, %Q0, %Q6\n"
1496-" adc %R0, %R0, %R6\n"
1497+" adcs %R0, %R0, %R6\n"
1498+
1499+#ifdef CONFIG_PAX_REFCOUNT
1500+" bvc 3f\n"
1501+"2: " REFCOUNT_TRAP_INSN "\n"
1502+"3:\n"
1503+#endif
1504+
1505 " strexd %2, %0, %H0, [%4]\n"
1506 " teq %2, #0\n"
1507 " bne 1b\n"
1508-"2:"
1509+"4:\n"
1510+
1511+#ifdef CONFIG_PAX_REFCOUNT
1512+ _ASM_EXTABLE(2b, 4b)
1513+#endif
1514+
1515 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1516 : "r" (&v->counter), "r" (u), "r" (a)
1517 : "cc");
1518@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1519
1520 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1521 #define atomic64_inc(v) atomic64_add(1LL, (v))
1522+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1523 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1524+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1525 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1526 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1527 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1528+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1529 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1530 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1531 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1532diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1533index d2f81e6..3c4dba5 100644
1534--- a/arch/arm/include/asm/barrier.h
1535+++ b/arch/arm/include/asm/barrier.h
1536@@ -67,7 +67,7 @@
1537 do { \
1538 compiletime_assert_atomic_type(*p); \
1539 smp_mb(); \
1540- ACCESS_ONCE(*p) = (v); \
1541+ ACCESS_ONCE_RW(*p) = (v); \
1542 } while (0)
1543
1544 #define smp_load_acquire(p) \
1545diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1546index 75fe66b..ba3dee4 100644
1547--- a/arch/arm/include/asm/cache.h
1548+++ b/arch/arm/include/asm/cache.h
1549@@ -4,8 +4,10 @@
1550 #ifndef __ASMARM_CACHE_H
1551 #define __ASMARM_CACHE_H
1552
1553+#include <linux/const.h>
1554+
1555 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1556-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1557+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1558
1559 /*
1560 * Memory returned by kmalloc() may be used for DMA, so we must make
1561@@ -24,5 +26,6 @@
1562 #endif
1563
1564 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1565+#define __read_only __attribute__ ((__section__(".data..read_only")))
1566
1567 #endif
1568diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1569index 2d46862..a35415b 100644
1570--- a/arch/arm/include/asm/cacheflush.h
1571+++ b/arch/arm/include/asm/cacheflush.h
1572@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1573 void (*dma_unmap_area)(const void *, size_t, int);
1574
1575 void (*dma_flush_range)(const void *, const void *);
1576-};
1577+} __no_const;
1578
1579 /*
1580 * Select the calling method
1581diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1582index 5233151..87a71fa 100644
1583--- a/arch/arm/include/asm/checksum.h
1584+++ b/arch/arm/include/asm/checksum.h
1585@@ -37,7 +37,19 @@ __wsum
1586 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1587
1588 __wsum
1589-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1590+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1591+
1592+static inline __wsum
1593+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1594+{
1595+ __wsum ret;
1596+ pax_open_userland();
1597+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1598+ pax_close_userland();
1599+ return ret;
1600+}
1601+
1602+
1603
1604 /*
1605 * Fold a partial checksum without adding pseudo headers
1606diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1607index abb2c37..96db950 100644
1608--- a/arch/arm/include/asm/cmpxchg.h
1609+++ b/arch/arm/include/asm/cmpxchg.h
1610@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1611
1612 #define xchg(ptr,x) \
1613 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1614+#define xchg_unchecked(ptr,x) \
1615+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1616
1617 #include <asm-generic/cmpxchg-local.h>
1618
1619diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1620index 6ddbe44..b5e38b1a 100644
1621--- a/arch/arm/include/asm/domain.h
1622+++ b/arch/arm/include/asm/domain.h
1623@@ -48,18 +48,37 @@
1624 * Domain types
1625 */
1626 #define DOMAIN_NOACCESS 0
1627-#define DOMAIN_CLIENT 1
1628 #ifdef CONFIG_CPU_USE_DOMAINS
1629+#define DOMAIN_USERCLIENT 1
1630+#define DOMAIN_KERNELCLIENT 1
1631 #define DOMAIN_MANAGER 3
1632+#define DOMAIN_VECTORS DOMAIN_USER
1633 #else
1634+
1635+#ifdef CONFIG_PAX_KERNEXEC
1636 #define DOMAIN_MANAGER 1
1637+#define DOMAIN_KERNEXEC 3
1638+#else
1639+#define DOMAIN_MANAGER 1
1640+#endif
1641+
1642+#ifdef CONFIG_PAX_MEMORY_UDEREF
1643+#define DOMAIN_USERCLIENT 0
1644+#define DOMAIN_UDEREF 1
1645+#define DOMAIN_VECTORS DOMAIN_KERNEL
1646+#else
1647+#define DOMAIN_USERCLIENT 1
1648+#define DOMAIN_VECTORS DOMAIN_USER
1649+#endif
1650+#define DOMAIN_KERNELCLIENT 1
1651+
1652 #endif
1653
1654 #define domain_val(dom,type) ((type) << (2*(dom)))
1655
1656 #ifndef __ASSEMBLY__
1657
1658-#ifdef CONFIG_CPU_USE_DOMAINS
1659+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1660 static inline void set_domain(unsigned val)
1661 {
1662 asm volatile(
1663@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1664 isb();
1665 }
1666
1667-#define modify_domain(dom,type) \
1668- do { \
1669- struct thread_info *thread = current_thread_info(); \
1670- unsigned int domain = thread->cpu_domain; \
1671- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1672- thread->cpu_domain = domain | domain_val(dom, type); \
1673- set_domain(thread->cpu_domain); \
1674- } while (0)
1675-
1676+extern void modify_domain(unsigned int dom, unsigned int type);
1677 #else
1678 static inline void set_domain(unsigned val) { }
1679 static inline void modify_domain(unsigned dom, unsigned type) { }
1680diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1681index afb9caf..9a0bac0 100644
1682--- a/arch/arm/include/asm/elf.h
1683+++ b/arch/arm/include/asm/elf.h
1684@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1685 the loader. We need to make sure that it is out of the way of the program
1686 that it will "exec", and that there is sufficient room for the brk. */
1687
1688-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1689+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1690+
1691+#ifdef CONFIG_PAX_ASLR
1692+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1693+
1694+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1695+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1696+#endif
1697
1698 /* When the program starts, a1 contains a pointer to a function to be
1699 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1700@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1701 extern void elf_set_personality(const struct elf32_hdr *);
1702 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1703
1704-struct mm_struct;
1705-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1706-#define arch_randomize_brk arch_randomize_brk
1707-
1708 #ifdef CONFIG_MMU
1709 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1710 struct linux_binprm;
1711diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1712index de53547..52b9a28 100644
1713--- a/arch/arm/include/asm/fncpy.h
1714+++ b/arch/arm/include/asm/fncpy.h
1715@@ -81,7 +81,9 @@
1716 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1717 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1718 \
1719+ pax_open_kernel(); \
1720 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1721+ pax_close_kernel(); \
1722 flush_icache_range((unsigned long)(dest_buf), \
1723 (unsigned long)(dest_buf) + (size)); \
1724 \
1725diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1726index 53e69da..3fdc896 100644
1727--- a/arch/arm/include/asm/futex.h
1728+++ b/arch/arm/include/asm/futex.h
1729@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1730 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1731 return -EFAULT;
1732
1733+ pax_open_userland();
1734+
1735 smp_mb();
1736 /* Prefetching cannot fault */
1737 prefetchw(uaddr);
1738@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1739 : "cc", "memory");
1740 smp_mb();
1741
1742+ pax_close_userland();
1743+
1744 *uval = val;
1745 return ret;
1746 }
1747@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1748 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1749 return -EFAULT;
1750
1751+ pax_open_userland();
1752+
1753 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1754 "1: " TUSER(ldr) " %1, [%4]\n"
1755 " teq %1, %2\n"
1756@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1757 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1758 : "cc", "memory");
1759
1760+ pax_close_userland();
1761+
1762 *uval = val;
1763 return ret;
1764 }
1765@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1766 return -EFAULT;
1767
1768 pagefault_disable(); /* implies preempt_disable() */
1769+ pax_open_userland();
1770
1771 switch (op) {
1772 case FUTEX_OP_SET:
1773@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1774 ret = -ENOSYS;
1775 }
1776
1777+ pax_close_userland();
1778 pagefault_enable(); /* subsumes preempt_enable() */
1779
1780 if (!ret) {
1781diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1782index 83eb2f7..ed77159 100644
1783--- a/arch/arm/include/asm/kmap_types.h
1784+++ b/arch/arm/include/asm/kmap_types.h
1785@@ -4,6 +4,6 @@
1786 /*
1787 * This is the "bare minimum". AIO seems to require this.
1788 */
1789-#define KM_TYPE_NR 16
1790+#define KM_TYPE_NR 17
1791
1792 #endif
1793diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1794index 9e614a1..3302cca 100644
1795--- a/arch/arm/include/asm/mach/dma.h
1796+++ b/arch/arm/include/asm/mach/dma.h
1797@@ -22,7 +22,7 @@ struct dma_ops {
1798 int (*residue)(unsigned int, dma_t *); /* optional */
1799 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1800 const char *type;
1801-};
1802+} __do_const;
1803
1804 struct dma_struct {
1805 void *addr; /* single DMA address */
1806diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1807index f98c7f3..e5c626d 100644
1808--- a/arch/arm/include/asm/mach/map.h
1809+++ b/arch/arm/include/asm/mach/map.h
1810@@ -23,17 +23,19 @@ struct map_desc {
1811
1812 /* types 0-3 are defined in asm/io.h */
1813 enum {
1814- MT_UNCACHED = 4,
1815- MT_CACHECLEAN,
1816- MT_MINICLEAN,
1817+ MT_UNCACHED_RW = 4,
1818+ MT_CACHECLEAN_RO,
1819+ MT_MINICLEAN_RO,
1820 MT_LOW_VECTORS,
1821 MT_HIGH_VECTORS,
1822- MT_MEMORY_RWX,
1823+ __MT_MEMORY_RWX,
1824 MT_MEMORY_RW,
1825- MT_ROM,
1826- MT_MEMORY_RWX_NONCACHED,
1827+ MT_MEMORY_RX,
1828+ MT_ROM_RX,
1829+ MT_MEMORY_RW_NONCACHED,
1830+ MT_MEMORY_RX_NONCACHED,
1831 MT_MEMORY_RW_DTCM,
1832- MT_MEMORY_RWX_ITCM,
1833+ MT_MEMORY_RX_ITCM,
1834 MT_MEMORY_RW_SO,
1835 MT_MEMORY_DMA_READY,
1836 };
1837diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1838index 891a56b..48f337e 100644
1839--- a/arch/arm/include/asm/outercache.h
1840+++ b/arch/arm/include/asm/outercache.h
1841@@ -36,7 +36,7 @@ struct outer_cache_fns {
1842
1843 /* This is an ARM L2C thing */
1844 void (*write_sec)(unsigned long, unsigned);
1845-};
1846+} __no_const;
1847
1848 extern struct outer_cache_fns outer_cache;
1849
1850diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1851index 4355f0e..cd9168e 100644
1852--- a/arch/arm/include/asm/page.h
1853+++ b/arch/arm/include/asm/page.h
1854@@ -23,6 +23,7 @@
1855
1856 #else
1857
1858+#include <linux/compiler.h>
1859 #include <asm/glue.h>
1860
1861 /*
1862@@ -114,7 +115,7 @@ struct cpu_user_fns {
1863 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1864 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1865 unsigned long vaddr, struct vm_area_struct *vma);
1866-};
1867+} __no_const;
1868
1869 #ifdef MULTI_USER
1870 extern struct cpu_user_fns cpu_user;
1871diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1872index 19cfab5..3f5c7e9 100644
1873--- a/arch/arm/include/asm/pgalloc.h
1874+++ b/arch/arm/include/asm/pgalloc.h
1875@@ -17,6 +17,7 @@
1876 #include <asm/processor.h>
1877 #include <asm/cacheflush.h>
1878 #include <asm/tlbflush.h>
1879+#include <asm/system_info.h>
1880
1881 #define check_pgt_cache() do { } while (0)
1882
1883@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1884 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1885 }
1886
1887+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1888+{
1889+ pud_populate(mm, pud, pmd);
1890+}
1891+
1892 #else /* !CONFIG_ARM_LPAE */
1893
1894 /*
1895@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1896 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1897 #define pmd_free(mm, pmd) do { } while (0)
1898 #define pud_populate(mm,pmd,pte) BUG()
1899+#define pud_populate_kernel(mm,pmd,pte) BUG()
1900
1901 #endif /* CONFIG_ARM_LPAE */
1902
1903@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1904 __free_page(pte);
1905 }
1906
1907+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1908+{
1909+#ifdef CONFIG_ARM_LPAE
1910+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1911+#else
1912+ if (addr & SECTION_SIZE)
1913+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1914+ else
1915+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1916+#endif
1917+ flush_pmd_entry(pmdp);
1918+}
1919+
1920 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1921 pmdval_t prot)
1922 {
1923diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1924index 5e68278..1869bae 100644
1925--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1927@@ -27,7 +27,7 @@
1928 /*
1929 * - section
1930 */
1931-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1932+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1933 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1934 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1935 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1936@@ -39,6 +39,7 @@
1937 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1938 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1939 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1940+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1941
1942 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1943 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1944@@ -68,6 +69,7 @@
1945 * - extended small page/tiny page
1946 */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1948+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1949 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1950 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1951 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1952diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1953index f027941..f36ce30 100644
1954--- a/arch/arm/include/asm/pgtable-2level.h
1955+++ b/arch/arm/include/asm/pgtable-2level.h
1956@@ -126,6 +126,9 @@
1957 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1958 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1959
1960+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1961+#define L_PTE_PXN (_AT(pteval_t, 0))
1962+
1963 /*
1964 * These are the memory types, defined to be compatible with
1965 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1966diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1967index a31ecdad..95e98d4 100644
1968--- a/arch/arm/include/asm/pgtable-3level.h
1969+++ b/arch/arm/include/asm/pgtable-3level.h
1970@@ -81,6 +81,7 @@
1971 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1972 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1973 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1974+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1975 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1976 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1977 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1978@@ -92,10 +93,12 @@
1979 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
1980 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1981 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1982+#define PMD_SECT_RDONLY PMD_SECT_AP2
1983
1984 /*
1985 * To be used in assembly code with the upper page attributes.
1986 */
1987+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1988 #define L_PTE_XN_HIGH (1 << (54 - 32))
1989 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1990
1991diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1992index d5cac54..906ea3e 100644
1993--- a/arch/arm/include/asm/pgtable.h
1994+++ b/arch/arm/include/asm/pgtable.h
1995@@ -33,6 +33,9 @@
1996 #include <asm/pgtable-2level.h>
1997 #endif
1998
1999+#define ktla_ktva(addr) (addr)
2000+#define ktva_ktla(addr) (addr)
2001+
2002 /*
2003 * Just any arbitrary offset to the start of the vmalloc VM area: the
2004 * current 8MB value just means that there will be a 8MB "hole" after the
2005@@ -48,6 +51,9 @@
2006 #define LIBRARY_TEXT_START 0x0c000000
2007
2008 #ifndef __ASSEMBLY__
2009+extern pteval_t __supported_pte_mask;
2010+extern pmdval_t __supported_pmd_mask;
2011+
2012 extern void __pte_error(const char *file, int line, pte_t);
2013 extern void __pmd_error(const char *file, int line, pmd_t);
2014 extern void __pgd_error(const char *file, int line, pgd_t);
2015@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2016 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2017 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2018
2019+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2020+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2021+
2022+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2023+#include <asm/domain.h>
2024+#include <linux/thread_info.h>
2025+#include <linux/preempt.h>
2026+
2027+static inline int test_domain(int domain, int domaintype)
2028+{
2029+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2030+}
2031+#endif
2032+
2033+#ifdef CONFIG_PAX_KERNEXEC
2034+static inline unsigned long pax_open_kernel(void) {
2035+#ifdef CONFIG_ARM_LPAE
2036+ /* TODO */
2037+#else
2038+ preempt_disable();
2039+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2040+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2041+#endif
2042+ return 0;
2043+}
2044+
2045+static inline unsigned long pax_close_kernel(void) {
2046+#ifdef CONFIG_ARM_LPAE
2047+ /* TODO */
2048+#else
2049+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2050+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2051+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2052+ preempt_enable_no_resched();
2053+#endif
2054+ return 0;
2055+}
2056+#else
2057+static inline unsigned long pax_open_kernel(void) { return 0; }
2058+static inline unsigned long pax_close_kernel(void) { return 0; }
2059+#endif
2060+
2061 /*
2062 * This is the lowest virtual address we can permit any user space
2063 * mapping to be mapped at. This is particularly important for
2064@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2065 /*
2066 * The pgprot_* and protection_map entries will be fixed up in runtime
2067 * to include the cachable and bufferable bits based on memory policy,
2068- * as well as any architecture dependent bits like global/ASID and SMP
2069- * shared mapping bits.
2070+ * as well as any architecture dependent bits like global/ASID, PXN,
2071+ * and SMP shared mapping bits.
2072 */
2073 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2074
2075@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2076 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2077 {
2078 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2079- L_PTE_NONE | L_PTE_VALID;
2080+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2081 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2082 return pte;
2083 }
2084diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2085index c25ef3e..735f14b 100644
2086--- a/arch/arm/include/asm/psci.h
2087+++ b/arch/arm/include/asm/psci.h
2088@@ -32,7 +32,7 @@ struct psci_operations {
2089 int (*affinity_info)(unsigned long target_affinity,
2090 unsigned long lowest_affinity_level);
2091 int (*migrate_info_type)(void);
2092-};
2093+} __no_const;
2094
2095 extern struct psci_operations psci_ops;
2096 extern struct smp_operations psci_smp_ops;
2097diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2098index 18f5a55..5072a40 100644
2099--- a/arch/arm/include/asm/smp.h
2100+++ b/arch/arm/include/asm/smp.h
2101@@ -107,7 +107,7 @@ struct smp_operations {
2102 int (*cpu_disable)(unsigned int cpu);
2103 #endif
2104 #endif
2105-};
2106+} __no_const;
2107
2108 struct of_cpu_method {
2109 const char *method;
2110diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2111index d890e41..3921292 100644
2112--- a/arch/arm/include/asm/thread_info.h
2113+++ b/arch/arm/include/asm/thread_info.h
2114@@ -78,9 +78,9 @@ struct thread_info {
2115 .flags = 0, \
2116 .preempt_count = INIT_PREEMPT_COUNT, \
2117 .addr_limit = KERNEL_DS, \
2118- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2119- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2121+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2122+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2123+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2124 .restart_block = { \
2125 .fn = do_no_restart_syscall, \
2126 }, \
2127@@ -159,7 +159,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2128 #define TIF_SYSCALL_AUDIT 9
2129 #define TIF_SYSCALL_TRACEPOINT 10
2130 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2131-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2132+/* within 8 bits of TIF_SYSCALL_TRACE
2133+ * to meet flexible second operand requirements
2134+ */
2135+#define TIF_GRSEC_SETXID 12
2136+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2137 #define TIF_USING_IWMMXT 17
2138 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2139 #define TIF_RESTORE_SIGMASK 20
2140@@ -173,10 +177,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2141 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2142 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2143 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2144+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2145
2146 /* Checks for any syscall work in entry-common.S */
2147 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2148- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2149+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2150
2151 /*
2152 * Change these and you break ASM code in entry-common.S
2153diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2154index 5f833f7..76e6644 100644
2155--- a/arch/arm/include/asm/tls.h
2156+++ b/arch/arm/include/asm/tls.h
2157@@ -3,6 +3,7 @@
2158
2159 #include <linux/compiler.h>
2160 #include <asm/thread_info.h>
2161+#include <asm/pgtable.h>
2162
2163 #ifdef __ASSEMBLY__
2164 #include <asm/asm-offsets.h>
2165@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2166 * at 0xffff0fe0 must be used instead. (see
2167 * entry-armv.S for details)
2168 */
2169+ pax_open_kernel();
2170 *((unsigned int *)0xffff0ff0) = val;
2171+ pax_close_kernel();
2172 #endif
2173 }
2174
2175diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2176index 4767eb9..bf00668 100644
2177--- a/arch/arm/include/asm/uaccess.h
2178+++ b/arch/arm/include/asm/uaccess.h
2179@@ -18,6 +18,7 @@
2180 #include <asm/domain.h>
2181 #include <asm/unified.h>
2182 #include <asm/compiler.h>
2183+#include <asm/pgtable.h>
2184
2185 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2186 #include <asm-generic/uaccess-unaligned.h>
2187@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2188 static inline void set_fs(mm_segment_t fs)
2189 {
2190 current_thread_info()->addr_limit = fs;
2191- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2192+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2193 }
2194
2195 #define segment_eq(a,b) ((a) == (b))
2196
2197+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2198+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2199+
2200+static inline void pax_open_userland(void)
2201+{
2202+
2203+#ifdef CONFIG_PAX_MEMORY_UDEREF
2204+ if (segment_eq(get_fs(), USER_DS)) {
2205+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2206+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2207+ }
2208+#endif
2209+
2210+}
2211+
2212+static inline void pax_close_userland(void)
2213+{
2214+
2215+#ifdef CONFIG_PAX_MEMORY_UDEREF
2216+ if (segment_eq(get_fs(), USER_DS)) {
2217+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2218+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2219+ }
2220+#endif
2221+
2222+}
2223+
2224 #define __addr_ok(addr) ({ \
2225 unsigned long flag; \
2226 __asm__("cmp %2, %0; movlo %0, #0" \
2227@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2228
2229 #define get_user(x,p) \
2230 ({ \
2231+ int __e; \
2232 might_fault(); \
2233- __get_user_check(x,p); \
2234+ pax_open_userland(); \
2235+ __e = __get_user_check(x,p); \
2236+ pax_close_userland(); \
2237+ __e; \
2238 })
2239
2240 extern int __put_user_1(void *, unsigned int);
2241@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2242
2243 #define put_user(x,p) \
2244 ({ \
2245+ int __e; \
2246 might_fault(); \
2247- __put_user_check(x,p); \
2248+ pax_open_userland(); \
2249+ __e = __put_user_check(x,p); \
2250+ pax_close_userland(); \
2251+ __e; \
2252 })
2253
2254 #else /* CONFIG_MMU */
2255@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2256
2257 #endif /* CONFIG_MMU */
2258
2259+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2260 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2261
2262 #define user_addr_max() \
2263@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2264 #define __get_user(x,ptr) \
2265 ({ \
2266 long __gu_err = 0; \
2267+ pax_open_userland(); \
2268 __get_user_err((x),(ptr),__gu_err); \
2269+ pax_close_userland(); \
2270 __gu_err; \
2271 })
2272
2273 #define __get_user_error(x,ptr,err) \
2274 ({ \
2275+ pax_open_userland(); \
2276 __get_user_err((x),(ptr),err); \
2277+ pax_close_userland(); \
2278 (void) 0; \
2279 })
2280
2281@@ -368,13 +409,17 @@ do { \
2282 #define __put_user(x,ptr) \
2283 ({ \
2284 long __pu_err = 0; \
2285+ pax_open_userland(); \
2286 __put_user_err((x),(ptr),__pu_err); \
2287+ pax_close_userland(); \
2288 __pu_err; \
2289 })
2290
2291 #define __put_user_error(x,ptr,err) \
2292 ({ \
2293+ pax_open_userland(); \
2294 __put_user_err((x),(ptr),err); \
2295+ pax_close_userland(); \
2296 (void) 0; \
2297 })
2298
2299@@ -474,11 +519,44 @@ do { \
2300
2301
2302 #ifdef CONFIG_MMU
2303-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2304-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2305+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2306+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2307+
2308+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2309+{
2310+ unsigned long ret;
2311+
2312+ check_object_size(to, n, false);
2313+ pax_open_userland();
2314+ ret = ___copy_from_user(to, from, n);
2315+ pax_close_userland();
2316+ return ret;
2317+}
2318+
2319+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2320+{
2321+ unsigned long ret;
2322+
2323+ check_object_size(from, n, true);
2324+ pax_open_userland();
2325+ ret = ___copy_to_user(to, from, n);
2326+ pax_close_userland();
2327+ return ret;
2328+}
2329+
2330 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2331-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2332+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2333 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2334+
2335+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2336+{
2337+ unsigned long ret;
2338+ pax_open_userland();
2339+ ret = ___clear_user(addr, n);
2340+ pax_close_userland();
2341+ return ret;
2342+}
2343+
2344 #else
2345 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2346 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2347@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2348
2349 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2350 {
2351+ if ((long)n < 0)
2352+ return n;
2353+
2354 if (access_ok(VERIFY_READ, from, n))
2355 n = __copy_from_user(to, from, n);
2356 else /* security hole - plug it */
2357@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2358
2359 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2360 {
2361+ if ((long)n < 0)
2362+ return n;
2363+
2364 if (access_ok(VERIFY_WRITE, to, n))
2365 n = __copy_to_user(to, from, n);
2366 return n;
2367diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2368index 5af0ed1..cea83883 100644
2369--- a/arch/arm/include/uapi/asm/ptrace.h
2370+++ b/arch/arm/include/uapi/asm/ptrace.h
2371@@ -92,7 +92,7 @@
2372 * ARMv7 groups of PSR bits
2373 */
2374 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2375-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2376+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2377 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2378 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2379
2380diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2381index a88671c..1cc895e 100644
2382--- a/arch/arm/kernel/armksyms.c
2383+++ b/arch/arm/kernel/armksyms.c
2384@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2385
2386 /* networking */
2387 EXPORT_SYMBOL(csum_partial);
2388-EXPORT_SYMBOL(csum_partial_copy_from_user);
2389+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2390 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2391 EXPORT_SYMBOL(__csum_ipv6_magic);
2392
2393@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2394 #ifdef CONFIG_MMU
2395 EXPORT_SYMBOL(copy_page);
2396
2397-EXPORT_SYMBOL(__copy_from_user);
2398-EXPORT_SYMBOL(__copy_to_user);
2399-EXPORT_SYMBOL(__clear_user);
2400+EXPORT_SYMBOL(___copy_from_user);
2401+EXPORT_SYMBOL(___copy_to_user);
2402+EXPORT_SYMBOL(___clear_user);
2403
2404 EXPORT_SYMBOL(__get_user_1);
2405 EXPORT_SYMBOL(__get_user_2);
2406diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2407index 2f5555d..d493c91 100644
2408--- a/arch/arm/kernel/entry-armv.S
2409+++ b/arch/arm/kernel/entry-armv.S
2410@@ -47,6 +47,87 @@
2411 9997:
2412 .endm
2413
2414+ .macro pax_enter_kernel
2415+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2416+ @ make aligned space for saved DACR
2417+ sub sp, sp, #8
2418+ @ save regs
2419+ stmdb sp!, {r1, r2}
2420+ @ read DACR from cpu_domain into r1
2421+ mov r2, sp
2422+ @ assume 8K pages, since we have to split the immediate in two
2423+ bic r2, r2, #(0x1fc0)
2424+ bic r2, r2, #(0x3f)
2425+ ldr r1, [r2, #TI_CPU_DOMAIN]
2426+ @ store old DACR on stack
2427+ str r1, [sp, #8]
2428+#ifdef CONFIG_PAX_KERNEXEC
2429+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2430+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2431+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2432+#endif
2433+#ifdef CONFIG_PAX_MEMORY_UDEREF
2434+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2435+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2436+#endif
2437+ @ write r1 to current_thread_info()->cpu_domain
2438+ str r1, [r2, #TI_CPU_DOMAIN]
2439+ @ write r1 to DACR
2440+ mcr p15, 0, r1, c3, c0, 0
2441+ @ instruction sync
2442+ instr_sync
2443+ @ restore regs
2444+ ldmia sp!, {r1, r2}
2445+#endif
2446+ .endm
2447+
2448+ .macro pax_open_userland
2449+#ifdef CONFIG_PAX_MEMORY_UDEREF
2450+ @ save regs
2451+ stmdb sp!, {r0, r1}
2452+ @ read DACR from cpu_domain into r1
2453+ mov r0, sp
2454+ @ assume 8K pages, since we have to split the immediate in two
2455+ bic r0, r0, #(0x1fc0)
2456+ bic r0, r0, #(0x3f)
2457+ ldr r1, [r0, #TI_CPU_DOMAIN]
2458+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2459+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2460+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2461+ @ write r1 to current_thread_info()->cpu_domain
2462+ str r1, [r0, #TI_CPU_DOMAIN]
2463+ @ write r1 to DACR
2464+ mcr p15, 0, r1, c3, c0, 0
2465+ @ instruction sync
2466+ instr_sync
2467+ @ restore regs
2468+ ldmia sp!, {r0, r1}
2469+#endif
2470+ .endm
2471+
2472+ .macro pax_close_userland
2473+#ifdef CONFIG_PAX_MEMORY_UDEREF
2474+ @ save regs
2475+ stmdb sp!, {r0, r1}
2476+ @ read DACR from cpu_domain into r1
2477+ mov r0, sp
2478+ @ assume 8K pages, since we have to split the immediate in two
2479+ bic r0, r0, #(0x1fc0)
2480+ bic r0, r0, #(0x3f)
2481+ ldr r1, [r0, #TI_CPU_DOMAIN]
2482+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2483+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2484+ @ write r1 to current_thread_info()->cpu_domain
2485+ str r1, [r0, #TI_CPU_DOMAIN]
2486+ @ write r1 to DACR
2487+ mcr p15, 0, r1, c3, c0, 0
2488+ @ instruction sync
2489+ instr_sync
2490+ @ restore regs
2491+ ldmia sp!, {r0, r1}
2492+#endif
2493+ .endm
2494+
2495 .macro pabt_helper
2496 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2497 #ifdef MULTI_PABORT
2498@@ -89,11 +170,15 @@
2499 * Invalid mode handlers
2500 */
2501 .macro inv_entry, reason
2502+
2503+ pax_enter_kernel
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - lr} )
2507 THUMB( stmia sp, {r0 - r12} )
2508 THUMB( str sp, [sp, #S_SP] )
2509 THUMB( str lr, [sp, #S_LR] )
2510+
2511 mov r1, #\reason
2512 .endm
2513
2514@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2515 .macro svc_entry, stack_hole=0, trace=1
2516 UNWIND(.fnstart )
2517 UNWIND(.save {r0 - pc} )
2518+
2519+ pax_enter_kernel
2520+
2521 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2522+
2523 #ifdef CONFIG_THUMB2_KERNEL
2524 SPFIX( str r0, [sp] ) @ temporarily saved
2525 SPFIX( mov r0, sp )
2526@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2527 ldmia r0, {r3 - r5}
2528 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2529 mov r6, #-1 @ "" "" "" ""
2530+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2531+ @ offset sp by 8 as done in pax_enter_kernel
2532+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2533+#else
2534 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2535+#endif
2536 SPFIX( addeq r2, r2, #4 )
2537 str r3, [sp, #-4]! @ save the "real" r0 copied
2538 @ from the exception stack
2539@@ -368,6 +462,9 @@ ENDPROC(__fiq_abt)
2540 .macro usr_entry, trace=1
2541 UNWIND(.fnstart )
2542 UNWIND(.cantunwind ) @ don't unwind the user space
2543+
2544+ pax_enter_kernel_user
2545+
2546 sub sp, sp, #S_FRAME_SIZE
2547 ARM( stmib sp, {r1 - r12} )
2548 THUMB( stmia sp, {r0 - r12} )
2549@@ -478,7 +575,9 @@ __und_usr:
2550 tst r3, #PSR_T_BIT @ Thumb mode?
2551 bne __und_usr_thumb
2552 sub r4, r2, #4 @ ARM instr at LR - 4
2553+ pax_open_userland
2554 1: ldrt r0, [r4]
2555+ pax_close_userland
2556 ARM_BE8(rev r0, r0) @ little endian instruction
2557
2558 @ r0 = 32-bit ARM instruction which caused the exception
2559@@ -512,11 +611,15 @@ __und_usr_thumb:
2560 */
2561 .arch armv6t2
2562 #endif
2563+ pax_open_userland
2564 2: ldrht r5, [r4]
2565+ pax_close_userland
2566 ARM_BE8(rev16 r5, r5) @ little endian instruction
2567 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2568 blo __und_usr_fault_16 @ 16bit undefined instruction
2569+ pax_open_userland
2570 3: ldrht r0, [r2]
2571+ pax_close_userland
2572 ARM_BE8(rev16 r0, r0) @ little endian instruction
2573 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2574 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2575@@ -546,7 +649,8 @@ ENDPROC(__und_usr)
2576 */
2577 .pushsection .fixup, "ax"
2578 .align 2
2579-4: str r4, [sp, #S_PC] @ retry current instruction
2580+4: pax_close_userland
2581+ str r4, [sp, #S_PC] @ retry current instruction
2582 ret r9
2583 .popsection
2584 .pushsection __ex_table,"a"
2585@@ -766,7 +870,7 @@ ENTRY(__switch_to)
2586 THUMB( str lr, [ip], #4 )
2587 ldr r4, [r2, #TI_TP_VALUE]
2588 ldr r5, [r2, #TI_TP_VALUE + 4]
2589-#ifdef CONFIG_CPU_USE_DOMAINS
2590+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2591 ldr r6, [r2, #TI_CPU_DOMAIN]
2592 #endif
2593 switch_tls r1, r4, r5, r3, r7
2594@@ -775,7 +879,7 @@ ENTRY(__switch_to)
2595 ldr r8, =__stack_chk_guard
2596 ldr r7, [r7, #TSK_STACK_CANARY]
2597 #endif
2598-#ifdef CONFIG_CPU_USE_DOMAINS
2599+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2600 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2601 #endif
2602 mov r5, r0
2603diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2604index f8ccc21..83d192f 100644
2605--- a/arch/arm/kernel/entry-common.S
2606+++ b/arch/arm/kernel/entry-common.S
2607@@ -11,18 +11,46 @@
2608 #include <asm/assembler.h>
2609 #include <asm/unistd.h>
2610 #include <asm/ftrace.h>
2611+#include <asm/domain.h>
2612 #include <asm/unwind.h>
2613
2614+#include "entry-header.S"
2615+
2616 #ifdef CONFIG_NEED_RET_TO_USER
2617 #include <mach/entry-macro.S>
2618 #else
2619 .macro arch_ret_to_user, tmp1, tmp2
2620+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2621+ @ save regs
2622+ stmdb sp!, {r1, r2}
2623+ @ read DACR from cpu_domain into r1
2624+ mov r2, sp
2625+ @ assume 8K pages, since we have to split the immediate in two
2626+ bic r2, r2, #(0x1fc0)
2627+ bic r2, r2, #(0x3f)
2628+ ldr r1, [r2, #TI_CPU_DOMAIN]
2629+#ifdef CONFIG_PAX_KERNEXEC
2630+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2631+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2632+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2633+#endif
2634+#ifdef CONFIG_PAX_MEMORY_UDEREF
2635+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2636+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2637+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2638+#endif
2639+ @ write r1 to current_thread_info()->cpu_domain
2640+ str r1, [r2, #TI_CPU_DOMAIN]
2641+ @ write r1 to DACR
2642+ mcr p15, 0, r1, c3, c0, 0
2643+ @ instruction sync
2644+ instr_sync
2645+ @ restore regs
2646+ ldmia sp!, {r1, r2}
2647+#endif
2648 .endm
2649 #endif
2650
2651-#include "entry-header.S"
2652-
2653-
2654 .align 5
2655 /*
2656 * This is the fast syscall return path. We do as little as
2657@@ -171,6 +199,12 @@ ENTRY(vector_swi)
2658 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2659 #endif
2660
2661+ /*
2662+ * do this here to avoid a performance hit of wrapping the code above
2663+ * that directly dereferences userland to parse the SWI instruction
2664+ */
2665+ pax_enter_kernel_user
2666+
2667 adr tbl, sys_call_table @ load syscall table pointer
2668
2669 #if defined(CONFIG_OABI_COMPAT)
2670diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2671index 1a0045a..9b4f34d 100644
2672--- a/arch/arm/kernel/entry-header.S
2673+++ b/arch/arm/kernel/entry-header.S
2674@@ -196,6 +196,60 @@
2675 msr cpsr_c, \rtemp @ switch back to the SVC mode
2676 .endm
2677
2678+ .macro pax_enter_kernel_user
2679+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2680+ @ save regs
2681+ stmdb sp!, {r0, r1}
2682+ @ read DACR from cpu_domain into r1
2683+ mov r0, sp
2684+ @ assume 8K pages, since we have to split the immediate in two
2685+ bic r0, r0, #(0x1fc0)
2686+ bic r0, r0, #(0x3f)
2687+ ldr r1, [r0, #TI_CPU_DOMAIN]
2688+#ifdef CONFIG_PAX_MEMORY_UDEREF
2689+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2690+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2691+#endif
2692+#ifdef CONFIG_PAX_KERNEXEC
2693+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2694+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2695+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2696+#endif
2697+ @ write r1 to current_thread_info()->cpu_domain
2698+ str r1, [r0, #TI_CPU_DOMAIN]
2699+ @ write r1 to DACR
2700+ mcr p15, 0, r1, c3, c0, 0
2701+ @ instruction sync
2702+ instr_sync
2703+ @ restore regs
2704+ ldmia sp!, {r0, r1}
2705+#endif
2706+ .endm
2707+
2708+ .macro pax_exit_kernel
2709+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2710+ @ save regs
2711+ stmdb sp!, {r0, r1}
2712+ @ read old DACR from stack into r1
2713+ ldr r1, [sp, #(8 + S_SP)]
2714+ sub r1, r1, #8
2715+ ldr r1, [r1]
2716+
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ mov r0, sp
2719+ @ assume 8K pages, since we have to split the immediate in two
2720+ bic r0, r0, #(0x1fc0)
2721+ bic r0, r0, #(0x3f)
2722+ str r1, [r0, #TI_CPU_DOMAIN]
2723+ @ write r1 to DACR
2724+ mcr p15, 0, r1, c3, c0, 0
2725+ @ instruction sync
2726+ instr_sync
2727+ @ restore regs
2728+ ldmia sp!, {r0, r1}
2729+#endif
2730+ .endm
2731+
2732 #ifndef CONFIG_THUMB2_KERNEL
2733 .macro svc_exit, rpsr, irq = 0
2734 .if \irq != 0
2735@@ -215,6 +269,9 @@
2736 blne trace_hardirqs_off
2737 #endif
2738 .endif
2739+
2740+ pax_exit_kernel
2741+
2742 msr spsr_cxsf, \rpsr
2743 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2744 @ We must avoid clrex due to Cortex-A15 erratum #830321
2745@@ -291,6 +348,9 @@
2746 blne trace_hardirqs_off
2747 #endif
2748 .endif
2749+
2750+ pax_exit_kernel
2751+
2752 ldr lr, [sp, #S_SP] @ top of the stack
2753 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2754
2755diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2756index 059c3da..8e45cfc 100644
2757--- a/arch/arm/kernel/fiq.c
2758+++ b/arch/arm/kernel/fiq.c
2759@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2760 void *base = vectors_page;
2761 unsigned offset = FIQ_OFFSET;
2762
2763+ pax_open_kernel();
2764 memcpy(base + offset, start, length);
2765+ pax_close_kernel();
2766+
2767 if (!cache_is_vipt_nonaliasing())
2768 flush_icache_range((unsigned long)base + offset, offset +
2769 length);
2770diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2771index 664eee8..f470938 100644
2772--- a/arch/arm/kernel/head.S
2773+++ b/arch/arm/kernel/head.S
2774@@ -437,7 +437,7 @@ __enable_mmu:
2775 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2776 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2777 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2778- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2779+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2780 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2781 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2782 #endif
2783diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2784index bea7db9..a210d10 100644
2785--- a/arch/arm/kernel/module.c
2786+++ b/arch/arm/kernel/module.c
2787@@ -38,12 +38,39 @@
2788 #endif
2789
2790 #ifdef CONFIG_MMU
2791-void *module_alloc(unsigned long size)
2792+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2793 {
2794+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2795+ return NULL;
2796 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2797- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2798+ GFP_KERNEL, prot, NUMA_NO_NODE,
2799 __builtin_return_address(0));
2800 }
2801+
2802+void *module_alloc(unsigned long size)
2803+{
2804+
2805+#ifdef CONFIG_PAX_KERNEXEC
2806+ return __module_alloc(size, PAGE_KERNEL);
2807+#else
2808+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2809+#endif
2810+
2811+}
2812+
2813+#ifdef CONFIG_PAX_KERNEXEC
2814+void module_memfree_exec(void *module_region)
2815+{
2816+ module_memfree(module_region);
2817+}
2818+EXPORT_SYMBOL(module_memfree_exec);
2819+
2820+void *module_alloc_exec(unsigned long size)
2821+{
2822+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2823+}
2824+EXPORT_SYMBOL(module_alloc_exec);
2825+#endif
2826 #endif
2827
2828 int
2829diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2830index 5038960..4aa71d8 100644
2831--- a/arch/arm/kernel/patch.c
2832+++ b/arch/arm/kernel/patch.c
2833@@ -67,6 +67,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2834 else
2835 __acquire(&patch_lock);
2836
2837+ pax_open_kernel();
2838 if (thumb2 && __opcode_is_thumb16(insn)) {
2839 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2840 size = sizeof(u16);
2841@@ -98,6 +99,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2842 *(u32 *)waddr = insn;
2843 size = sizeof(u32);
2844 }
2845+ pax_close_kernel();
2846
2847 if (waddr != addr) {
2848 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2849diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2850index fdfa3a7..5d208b8 100644
2851--- a/arch/arm/kernel/process.c
2852+++ b/arch/arm/kernel/process.c
2853@@ -207,6 +207,7 @@ void machine_power_off(void)
2854
2855 if (pm_power_off)
2856 pm_power_off();
2857+ BUG();
2858 }
2859
2860 /*
2861@@ -220,7 +221,7 @@ void machine_power_off(void)
2862 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2863 * to use. Implementing such co-ordination would be essentially impossible.
2864 */
2865-void machine_restart(char *cmd)
2866+__noreturn void machine_restart(char *cmd)
2867 {
2868 local_irq_disable();
2869 smp_send_stop();
2870@@ -246,8 +247,8 @@ void __show_regs(struct pt_regs *regs)
2871
2872 show_regs_print_info(KERN_DEFAULT);
2873
2874- print_symbol("PC is at %s\n", instruction_pointer(regs));
2875- print_symbol("LR is at %s\n", regs->ARM_lr);
2876+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2877+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2878 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2879 "sp : %08lx ip : %08lx fp : %08lx\n",
2880 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2881@@ -424,12 +425,6 @@ unsigned long get_wchan(struct task_struct *p)
2882 return 0;
2883 }
2884
2885-unsigned long arch_randomize_brk(struct mm_struct *mm)
2886-{
2887- unsigned long range_end = mm->brk + 0x02000000;
2888- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2889-}
2890-
2891 #ifdef CONFIG_MMU
2892 #ifdef CONFIG_KUSER_HELPERS
2893 /*
2894@@ -445,7 +440,7 @@ static struct vm_area_struct gate_vma = {
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903@@ -474,81 +469,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2904 return is_gate_vma(vma) ? "[vectors]" : NULL;
2905 }
2906
2907-/* If possible, provide a placement hint at a random offset from the
2908- * stack for the signal page.
2909- */
2910-static unsigned long sigpage_addr(const struct mm_struct *mm,
2911- unsigned int npages)
2912-{
2913- unsigned long offset;
2914- unsigned long first;
2915- unsigned long last;
2916- unsigned long addr;
2917- unsigned int slots;
2918-
2919- first = PAGE_ALIGN(mm->start_stack);
2920-
2921- last = TASK_SIZE - (npages << PAGE_SHIFT);
2922-
2923- /* No room after stack? */
2924- if (first > last)
2925- return 0;
2926-
2927- /* Just enough room? */
2928- if (first == last)
2929- return first;
2930-
2931- slots = ((last - first) >> PAGE_SHIFT) + 1;
2932-
2933- offset = get_random_int() % slots;
2934-
2935- addr = first + (offset << PAGE_SHIFT);
2936-
2937- return addr;
2938-}
2939-
2940-static struct page *signal_page;
2941-extern struct page *get_signal_page(void);
2942-
2943-static const struct vm_special_mapping sigpage_mapping = {
2944- .name = "[sigpage]",
2945- .pages = &signal_page,
2946-};
2947-
2948 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2949 {
2950 struct mm_struct *mm = current->mm;
2951- struct vm_area_struct *vma;
2952- unsigned long addr;
2953- unsigned long hint;
2954- int ret = 0;
2955-
2956- if (!signal_page)
2957- signal_page = get_signal_page();
2958- if (!signal_page)
2959- return -ENOMEM;
2960
2961 down_write(&mm->mmap_sem);
2962- hint = sigpage_addr(mm, 1);
2963- addr = get_unmapped_area(NULL, hint, PAGE_SIZE, 0, 0);
2964- if (IS_ERR_VALUE(addr)) {
2965- ret = addr;
2966- goto up_fail;
2967- }
2968-
2969- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
2970- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2971- &sigpage_mapping);
2972-
2973- if (IS_ERR(vma)) {
2974- ret = PTR_ERR(vma);
2975- goto up_fail;
2976- }
2977-
2978- mm->context.sigpage = addr;
2979-
2980- up_fail:
2981+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2982 up_write(&mm->mmap_sem);
2983- return ret;
2984+ return 0;
2985 }
2986 #endif
2987diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2988index f73891b..cf3004e 100644
2989--- a/arch/arm/kernel/psci.c
2990+++ b/arch/arm/kernel/psci.c
2991@@ -28,7 +28,7 @@
2992 #include <asm/psci.h>
2993 #include <asm/system_misc.h>
2994
2995-struct psci_operations psci_ops;
2996+struct psci_operations psci_ops __read_only;
2997
2998 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2999 typedef int (*psci_initcall_t)(const struct device_node *);
3000diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3001index ef9119f..31995a3 100644
3002--- a/arch/arm/kernel/ptrace.c
3003+++ b/arch/arm/kernel/ptrace.c
3004@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3005 regs->ARM_ip = ip;
3006 }
3007
3008+#ifdef CONFIG_GRKERNSEC_SETXID
3009+extern void gr_delayed_cred_worker(void);
3010+#endif
3011+
3012 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3013 {
3014 current_thread_info()->syscall = scno;
3015
3016+#ifdef CONFIG_GRKERNSEC_SETXID
3017+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3018+ gr_delayed_cred_worker();
3019+#endif
3020+
3021 /* Do the secure computing check first; failures should be fast. */
3022 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3023 if (secure_computing() == -1)
3024diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3025index e55408e..14d9998 100644
3026--- a/arch/arm/kernel/setup.c
3027+++ b/arch/arm/kernel/setup.c
3028@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
3029 unsigned int elf_hwcap2 __read_mostly;
3030 EXPORT_SYMBOL(elf_hwcap2);
3031
3032+pteval_t __supported_pte_mask __read_only;
3033+pmdval_t __supported_pmd_mask __read_only;
3034
3035 #ifdef MULTI_CPU
3036-struct processor processor __read_mostly;
3037+struct processor processor __read_only;
3038 #endif
3039 #ifdef MULTI_TLB
3040-struct cpu_tlb_fns cpu_tlb __read_mostly;
3041+struct cpu_tlb_fns cpu_tlb __read_only;
3042 #endif
3043 #ifdef MULTI_USER
3044-struct cpu_user_fns cpu_user __read_mostly;
3045+struct cpu_user_fns cpu_user __read_only;
3046 #endif
3047 #ifdef MULTI_CACHE
3048-struct cpu_cache_fns cpu_cache __read_mostly;
3049+struct cpu_cache_fns cpu_cache __read_only;
3050 #endif
3051 #ifdef CONFIG_OUTER_CACHE
3052-struct outer_cache_fns outer_cache __read_mostly;
3053+struct outer_cache_fns outer_cache __read_only;
3054 EXPORT_SYMBOL(outer_cache);
3055 #endif
3056
3057@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3058 asm("mrc p15, 0, %0, c0, c1, 4"
3059 : "=r" (mmfr0));
3060 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3061- (mmfr0 & 0x000000f0) >= 0x00000030)
3062+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3063 cpu_arch = CPU_ARCH_ARMv7;
3064- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3065+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3066+ __supported_pte_mask |= L_PTE_PXN;
3067+ __supported_pmd_mask |= PMD_PXNTABLE;
3068+ }
3069+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3070 (mmfr0 & 0x000000f0) == 0x00000020)
3071 cpu_arch = CPU_ARCH_ARMv6;
3072 else
3073diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3074index 8aa6f1b..0899e08 100644
3075--- a/arch/arm/kernel/signal.c
3076+++ b/arch/arm/kernel/signal.c
3077@@ -24,8 +24,6 @@
3078
3079 extern const unsigned long sigreturn_codes[7];
3080
3081-static unsigned long signal_return_offset;
3082-
3083 #ifdef CONFIG_CRUNCH
3084 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3085 {
3086@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3087 * except when the MPU has protected the vectors
3088 * page from PL0
3089 */
3090- retcode = mm->context.sigpage + signal_return_offset +
3091- (idx << 2) + thumb;
3092+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3093 } else
3094 #endif
3095 {
3096@@ -603,33 +600,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3097 } while (thread_flags & _TIF_WORK_MASK);
3098 return 0;
3099 }
3100-
3101-struct page *get_signal_page(void)
3102-{
3103- unsigned long ptr;
3104- unsigned offset;
3105- struct page *page;
3106- void *addr;
3107-
3108- page = alloc_pages(GFP_KERNEL, 0);
3109-
3110- if (!page)
3111- return NULL;
3112-
3113- addr = page_address(page);
3114-
3115- /* Give the signal return code some randomness */
3116- offset = 0x200 + (get_random_int() & 0x7fc);
3117- signal_return_offset = offset;
3118-
3119- /*
3120- * Copy signal return handlers into the vector page, and
3121- * set sigreturn to be a pointer to these.
3122- */
3123- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3124-
3125- ptr = (unsigned long)addr + offset;
3126- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3127-
3128- return page;
3129-}
3130diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3131index 86ef244..c518451 100644
3132--- a/arch/arm/kernel/smp.c
3133+++ b/arch/arm/kernel/smp.c
3134@@ -76,7 +76,7 @@ enum ipi_msg_type {
3135
3136 static DECLARE_COMPLETION(cpu_running);
3137
3138-static struct smp_operations smp_ops;
3139+static struct smp_operations smp_ops __read_only;
3140
3141 void __init smp_set_ops(struct smp_operations *ops)
3142 {
3143diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3144index 7a3be1d..b00c7de 100644
3145--- a/arch/arm/kernel/tcm.c
3146+++ b/arch/arm/kernel/tcm.c
3147@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3148 .virtual = ITCM_OFFSET,
3149 .pfn = __phys_to_pfn(ITCM_OFFSET),
3150 .length = 0,
3151- .type = MT_MEMORY_RWX_ITCM,
3152+ .type = MT_MEMORY_RX_ITCM,
3153 }
3154 };
3155
3156@@ -267,7 +267,9 @@ no_dtcm:
3157 start = &__sitcm_text;
3158 end = &__eitcm_text;
3159 ram = &__itcm_start;
3160+ pax_open_kernel();
3161 memcpy(start, ram, itcm_code_sz);
3162+ pax_close_kernel();
3163 pr_debug("CPU ITCM: copied code from %p - %p\n",
3164 start, end);
3165 itcm_present = true;
3166diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3167index 788e23f..6fa06a1 100644
3168--- a/arch/arm/kernel/traps.c
3169+++ b/arch/arm/kernel/traps.c
3170@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3171 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3172 {
3173 #ifdef CONFIG_KALLSYMS
3174- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3175+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3176 #else
3177 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3178 #endif
3179@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3180 static int die_owner = -1;
3181 static unsigned int die_nest_count;
3182
3183+extern void gr_handle_kernel_exploit(void);
3184+
3185 static unsigned long oops_begin(void)
3186 {
3187 int cpu;
3188@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3189 panic("Fatal exception in interrupt");
3190 if (panic_on_oops)
3191 panic("Fatal exception");
3192+
3193+ gr_handle_kernel_exploit();
3194+
3195 if (signr)
3196 do_exit(signr);
3197 }
3198@@ -880,7 +885,11 @@ void __init early_trap_init(void *vectors_base)
3199 kuser_init(vectors_base);
3200
3201 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3202- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3203+
3204+#ifndef CONFIG_PAX_MEMORY_UDEREF
3205+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3206+#endif
3207+
3208 #else /* ifndef CONFIG_CPU_V7M */
3209 /*
3210 * on V7-M there is no need to copy the vector table to a dedicated
3211diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3212index b31aa73..cc4b7a1 100644
3213--- a/arch/arm/kernel/vmlinux.lds.S
3214+++ b/arch/arm/kernel/vmlinux.lds.S
3215@@ -37,7 +37,7 @@
3216 #endif
3217
3218 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3219- defined(CONFIG_GENERIC_BUG)
3220+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3221 #define ARM_EXIT_KEEP(x) x
3222 #define ARM_EXIT_DISCARD(x)
3223 #else
3224@@ -123,6 +123,8 @@ SECTIONS
3225 #ifdef CONFIG_DEBUG_RODATA
3226 . = ALIGN(1<<SECTION_SHIFT);
3227 #endif
3228+ _etext = .; /* End of text section */
3229+
3230 RO_DATA(PAGE_SIZE)
3231
3232 . = ALIGN(4);
3233@@ -153,8 +155,6 @@ SECTIONS
3234
3235 NOTES
3236
3237- _etext = .; /* End of text and rodata section */
3238-
3239 #ifndef CONFIG_XIP_KERNEL
3240 # ifdef CONFIG_ARM_KERNMEM_PERMS
3241 . = ALIGN(1<<SECTION_SHIFT);
3242diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3243index 0b0d58a..988cb45 100644
3244--- a/arch/arm/kvm/arm.c
3245+++ b/arch/arm/kvm/arm.c
3246@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3247 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3248
3249 /* The VMID used in the VTTBR */
3250-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3251+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3252 static u8 kvm_next_vmid;
3253 static DEFINE_SPINLOCK(kvm_vmid_lock);
3254
3255@@ -351,7 +351,7 @@ void force_vm_exit(const cpumask_t *mask)
3256 */
3257 static bool need_new_vmid_gen(struct kvm *kvm)
3258 {
3259- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3260+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3261 }
3262
3263 /**
3264@@ -384,7 +384,7 @@ static void update_vttbr(struct kvm *kvm)
3265
3266 /* First user of a new VMID generation? */
3267 if (unlikely(kvm_next_vmid == 0)) {
3268- atomic64_inc(&kvm_vmid_gen);
3269+ atomic64_inc_unchecked(&kvm_vmid_gen);
3270 kvm_next_vmid = 1;
3271
3272 /*
3273@@ -401,7 +401,7 @@ static void update_vttbr(struct kvm *kvm)
3274 kvm_call_hyp(__kvm_flush_vm_context);
3275 }
3276
3277- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3278+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3279 kvm->arch.vmid = kvm_next_vmid;
3280 kvm_next_vmid++;
3281
3282@@ -1038,7 +1038,7 @@ static void check_kvm_target_cpu(void *ret)
3283 /**
3284 * Initialize Hyp-mode and memory mappings on all CPUs.
3285 */
3286-int kvm_arch_init(void *opaque)
3287+int kvm_arch_init(const void *opaque)
3288 {
3289 int err;
3290 int ret, cpu;
3291diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3292index 14a0d98..7771a7d 100644
3293--- a/arch/arm/lib/clear_user.S
3294+++ b/arch/arm/lib/clear_user.S
3295@@ -12,14 +12,14 @@
3296
3297 .text
3298
3299-/* Prototype: int __clear_user(void *addr, size_t sz)
3300+/* Prototype: int ___clear_user(void *addr, size_t sz)
3301 * Purpose : clear some user memory
3302 * Params : addr - user memory address to clear
3303 * : sz - number of bytes to clear
3304 * Returns : number of bytes NOT cleared
3305 */
3306 ENTRY(__clear_user_std)
3307-WEAK(__clear_user)
3308+WEAK(___clear_user)
3309 stmfd sp!, {r1, lr}
3310 mov r2, #0
3311 cmp r1, #4
3312@@ -44,7 +44,7 @@ WEAK(__clear_user)
3313 USER( strnebt r2, [r0])
3314 mov r0, #0
3315 ldmfd sp!, {r1, pc}
3316-ENDPROC(__clear_user)
3317+ENDPROC(___clear_user)
3318 ENDPROC(__clear_user_std)
3319
3320 .pushsection .fixup,"ax"
3321diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3322index 7a235b9..73a0556 100644
3323--- a/arch/arm/lib/copy_from_user.S
3324+++ b/arch/arm/lib/copy_from_user.S
3325@@ -17,7 +17,7 @@
3326 /*
3327 * Prototype:
3328 *
3329- * size_t __copy_from_user(void *to, const void *from, size_t n)
3330+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3331 *
3332 * Purpose:
3333 *
3334@@ -89,11 +89,11 @@
3335
3336 .text
3337
3338-ENTRY(__copy_from_user)
3339+ENTRY(___copy_from_user)
3340
3341 #include "copy_template.S"
3342
3343-ENDPROC(__copy_from_user)
3344+ENDPROC(___copy_from_user)
3345
3346 .pushsection .fixup,"ax"
3347 .align 0
3348diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3349index 6ee2f67..d1cce76 100644
3350--- a/arch/arm/lib/copy_page.S
3351+++ b/arch/arm/lib/copy_page.S
3352@@ -10,6 +10,7 @@
3353 * ASM optimised string functions
3354 */
3355 #include <linux/linkage.h>
3356+#include <linux/const.h>
3357 #include <asm/assembler.h>
3358 #include <asm/asm-offsets.h>
3359 #include <asm/cache.h>
3360diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3361index a9d3db1..164b089 100644
3362--- a/arch/arm/lib/copy_to_user.S
3363+++ b/arch/arm/lib/copy_to_user.S
3364@@ -17,7 +17,7 @@
3365 /*
3366 * Prototype:
3367 *
3368- * size_t __copy_to_user(void *to, const void *from, size_t n)
3369+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3370 *
3371 * Purpose:
3372 *
3373@@ -93,11 +93,11 @@
3374 .text
3375
3376 ENTRY(__copy_to_user_std)
3377-WEAK(__copy_to_user)
3378+WEAK(___copy_to_user)
3379
3380 #include "copy_template.S"
3381
3382-ENDPROC(__copy_to_user)
3383+ENDPROC(___copy_to_user)
3384 ENDPROC(__copy_to_user_std)
3385
3386 .pushsection .fixup,"ax"
3387diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3388index 7d08b43..f7ca7ea 100644
3389--- a/arch/arm/lib/csumpartialcopyuser.S
3390+++ b/arch/arm/lib/csumpartialcopyuser.S
3391@@ -57,8 +57,8 @@
3392 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3393 */
3394
3395-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3396-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3397+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3398+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3399
3400 #include "csumpartialcopygeneric.S"
3401
3402diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3403index 312d43e..21d2322 100644
3404--- a/arch/arm/lib/delay.c
3405+++ b/arch/arm/lib/delay.c
3406@@ -29,7 +29,7 @@
3407 /*
3408 * Default to the loop-based delay implementation.
3409 */
3410-struct arm_delay_ops arm_delay_ops = {
3411+struct arm_delay_ops arm_delay_ops __read_only = {
3412 .delay = __loop_delay,
3413 .const_udelay = __loop_const_udelay,
3414 .udelay = __loop_udelay,
3415diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3416index 3e58d71..029817c 100644
3417--- a/arch/arm/lib/uaccess_with_memcpy.c
3418+++ b/arch/arm/lib/uaccess_with_memcpy.c
3419@@ -136,7 +136,7 @@ out:
3420 }
3421
3422 unsigned long
3423-__copy_to_user(void __user *to, const void *from, unsigned long n)
3424+___copy_to_user(void __user *to, const void *from, unsigned long n)
3425 {
3426 /*
3427 * This test is stubbed out of the main function above to keep
3428@@ -190,7 +190,7 @@ out:
3429 return n;
3430 }
3431
3432-unsigned long __clear_user(void __user *addr, unsigned long n)
3433+unsigned long ___clear_user(void __user *addr, unsigned long n)
3434 {
3435 /* See rational for this in __copy_to_user() above. */
3436 if (n < 64)
3437diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3438index ce25e85..3dd7850 100644
3439--- a/arch/arm/mach-at91/setup.c
3440+++ b/arch/arm/mach-at91/setup.c
3441@@ -57,7 +57,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3442
3443 desc->pfn = __phys_to_pfn(base);
3444 desc->length = length;
3445- desc->type = MT_MEMORY_RWX_NONCACHED;
3446+ desc->type = MT_MEMORY_RW_NONCACHED;
3447
3448 pr_info("sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3449 base, length, desc->virtual);
3450diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3451index f8e7dcd..17ee921 100644
3452--- a/arch/arm/mach-exynos/suspend.c
3453+++ b/arch/arm/mach-exynos/suspend.c
3454@@ -18,6 +18,7 @@
3455 #include <linux/syscore_ops.h>
3456 #include <linux/cpu_pm.h>
3457 #include <linux/io.h>
3458+#include <linux/irq.h>
3459 #include <linux/irqchip/arm-gic.h>
3460 #include <linux/err.h>
3461 #include <linux/regulator/machine.h>
3462@@ -558,8 +559,10 @@ void __init exynos_pm_init(void)
3463 tmp |= pm_data->wake_disable_mask;
3464 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3465
3466- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3467- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3468+ pax_open_kernel();
3469+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3470+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3471+ pax_close_kernel();
3472
3473 register_syscore_ops(&exynos_pm_syscore_ops);
3474 suspend_set_ops(&exynos_suspend_ops);
3475diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3476index 7f352de..6dc0929 100644
3477--- a/arch/arm/mach-keystone/keystone.c
3478+++ b/arch/arm/mach-keystone/keystone.c
3479@@ -27,7 +27,7 @@
3480
3481 #include "keystone.h"
3482
3483-static struct notifier_block platform_nb;
3484+static notifier_block_no_const platform_nb;
3485 static unsigned long keystone_dma_pfn_offset __read_mostly;
3486
3487 static int keystone_platform_notifier(struct notifier_block *nb,
3488diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3489index ccef880..5dfad80 100644
3490--- a/arch/arm/mach-mvebu/coherency.c
3491+++ b/arch/arm/mach-mvebu/coherency.c
3492@@ -164,7 +164,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3493
3494 /*
3495 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3496- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3497+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3498 * is needed as a workaround for a deadlock issue between the PCIe
3499 * interface and the cache controller.
3500 */
3501@@ -177,7 +177,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3502 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3503
3504 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3505- mtype = MT_UNCACHED;
3506+ mtype = MT_UNCACHED_RW;
3507
3508 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3509 }
3510diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3511index b6443a4..20a0b74 100644
3512--- a/arch/arm/mach-omap2/board-n8x0.c
3513+++ b/arch/arm/mach-omap2/board-n8x0.c
3514@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3515 }
3516 #endif
3517
3518-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3519+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3520 .late_init = n8x0_menelaus_late_init,
3521 };
3522
3523diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3524index 79f49d9..70bf184 100644
3525--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3526+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3527@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3528 void (*resume)(void);
3529 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3530 void (*hotplug_restart)(void);
3531-};
3532+} __no_const;
3533
3534 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3535 static struct powerdomain *mpuss_pd;
3536@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3537 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3538 {}
3539
3540-struct cpu_pm_ops omap_pm_ops = {
3541+static struct cpu_pm_ops omap_pm_ops __read_only = {
3542 .finish_suspend = default_finish_suspend,
3543 .resume = dummy_cpu_resume,
3544 .scu_prepare = dummy_scu_prepare,
3545diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3546index 5305ec7..6d74045 100644
3547--- a/arch/arm/mach-omap2/omap-smp.c
3548+++ b/arch/arm/mach-omap2/omap-smp.c
3549@@ -19,6 +19,7 @@
3550 #include <linux/device.h>
3551 #include <linux/smp.h>
3552 #include <linux/io.h>
3553+#include <linux/irq.h>
3554 #include <linux/irqchip/arm-gic.h>
3555
3556 #include <asm/smp_scu.h>
3557diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3558index f961c46..4a453dc 100644
3559--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3560+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3561@@ -344,7 +344,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3562 return NOTIFY_OK;
3563 }
3564
3565-static struct notifier_block __refdata irq_hotplug_notifier = {
3566+static struct notifier_block irq_hotplug_notifier = {
3567 .notifier_call = irq_cpu_hotplug_notify,
3568 };
3569
3570diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3571index be9541e..821805f 100644
3572--- a/arch/arm/mach-omap2/omap_device.c
3573+++ b/arch/arm/mach-omap2/omap_device.c
3574@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3575 struct platform_device __init *omap_device_build(const char *pdev_name,
3576 int pdev_id,
3577 struct omap_hwmod *oh,
3578- void *pdata, int pdata_len)
3579+ const void *pdata, int pdata_len)
3580 {
3581 struct omap_hwmod *ohs[] = { oh };
3582
3583@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3584 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3585 int pdev_id,
3586 struct omap_hwmod **ohs,
3587- int oh_cnt, void *pdata,
3588+ int oh_cnt, const void *pdata,
3589 int pdata_len)
3590 {
3591 int ret = -ENOMEM;
3592diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3593index 78c02b3..c94109a 100644
3594--- a/arch/arm/mach-omap2/omap_device.h
3595+++ b/arch/arm/mach-omap2/omap_device.h
3596@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3597 /* Core code interface */
3598
3599 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3600- struct omap_hwmod *oh, void *pdata,
3601+ struct omap_hwmod *oh, const void *pdata,
3602 int pdata_len);
3603
3604 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3605 struct omap_hwmod **oh, int oh_cnt,
3606- void *pdata, int pdata_len);
3607+ const void *pdata, int pdata_len);
3608
3609 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3610 struct omap_hwmod **ohs, int oh_cnt);
3611diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3612index 9025fff..3555702 100644
3613--- a/arch/arm/mach-omap2/omap_hwmod.c
3614+++ b/arch/arm/mach-omap2/omap_hwmod.c
3615@@ -193,10 +193,10 @@ struct omap_hwmod_soc_ops {
3616 int (*init_clkdm)(struct omap_hwmod *oh);
3617 void (*update_context_lost)(struct omap_hwmod *oh);
3618 int (*get_context_lost)(struct omap_hwmod *oh);
3619-};
3620+} __no_const;
3621
3622 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3623-static struct omap_hwmod_soc_ops soc_ops;
3624+static struct omap_hwmod_soc_ops soc_ops __read_only;
3625
3626 /* omap_hwmod_list contains all registered struct omap_hwmods */
3627 static LIST_HEAD(omap_hwmod_list);
3628diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3629index 95fee54..cfa9cf1 100644
3630--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3631+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3632@@ -10,6 +10,7 @@
3633
3634 #include <linux/kernel.h>
3635 #include <linux/init.h>
3636+#include <asm/pgtable.h>
3637
3638 #include "powerdomain.h"
3639
3640@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3641
3642 void __init am43xx_powerdomains_init(void)
3643 {
3644- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3645+ pax_open_kernel();
3646+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3647+ pax_close_kernel();
3648 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3649 pwrdm_register_pwrdms(powerdomains_am43xx);
3650 pwrdm_complete_init();
3651diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3652index ff0a68c..b312aa0 100644
3653--- a/arch/arm/mach-omap2/wd_timer.c
3654+++ b/arch/arm/mach-omap2/wd_timer.c
3655@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3656 struct omap_hwmod *oh;
3657 char *oh_name = "wd_timer2";
3658 char *dev_name = "omap_wdt";
3659- struct omap_wd_timer_platform_data pdata;
3660+ static struct omap_wd_timer_platform_data pdata = {
3661+ .read_reset_sources = prm_read_reset_sources
3662+ };
3663
3664 if (!cpu_class_is_omap2() || of_have_populated_dt())
3665 return 0;
3666@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3667 return -EINVAL;
3668 }
3669
3670- pdata.read_reset_sources = prm_read_reset_sources;
3671-
3672 pdev = omap_device_build(dev_name, id, oh, &pdata,
3673 sizeof(struct omap_wd_timer_platform_data));
3674 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3675diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3676index 4f25a7c..a81be85 100644
3677--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3678+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3679@@ -179,7 +179,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3680 bool entered_lp2 = false;
3681
3682 if (tegra_pending_sgi())
3683- ACCESS_ONCE(abort_flag) = true;
3684+ ACCESS_ONCE_RW(abort_flag) = true;
3685
3686 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3687
3688diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3689index ab95f53..4b977a7 100644
3690--- a/arch/arm/mach-tegra/irq.c
3691+++ b/arch/arm/mach-tegra/irq.c
3692@@ -20,6 +20,7 @@
3693 #include <linux/cpu_pm.h>
3694 #include <linux/interrupt.h>
3695 #include <linux/io.h>
3696+#include <linux/irq.h>
3697 #include <linux/irqchip/arm-gic.h>
3698 #include <linux/irq.h>
3699 #include <linux/kernel.h>
3700diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3701index 2cb587b..6ddfebf 100644
3702--- a/arch/arm/mach-ux500/pm.c
3703+++ b/arch/arm/mach-ux500/pm.c
3704@@ -10,6 +10,7 @@
3705 */
3706
3707 #include <linux/kernel.h>
3708+#include <linux/irq.h>
3709 #include <linux/irqchip/arm-gic.h>
3710 #include <linux/delay.h>
3711 #include <linux/io.h>
3712diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3713index 2dea8b5..6499da2 100644
3714--- a/arch/arm/mach-ux500/setup.h
3715+++ b/arch/arm/mach-ux500/setup.h
3716@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3717 .type = MT_DEVICE, \
3718 }
3719
3720-#define __MEM_DEV_DESC(x, sz) { \
3721- .virtual = IO_ADDRESS(x), \
3722- .pfn = __phys_to_pfn(x), \
3723- .length = sz, \
3724- .type = MT_MEMORY_RWX, \
3725-}
3726-
3727 extern struct smp_operations ux500_smp_ops;
3728 extern void ux500_cpu_die(unsigned int cpu);
3729
3730diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3731index 52d768f..5f93180 100644
3732--- a/arch/arm/mach-zynq/platsmp.c
3733+++ b/arch/arm/mach-zynq/platsmp.c
3734@@ -24,6 +24,7 @@
3735 #include <linux/io.h>
3736 #include <asm/cacheflush.h>
3737 #include <asm/smp_scu.h>
3738+#include <linux/irq.h>
3739 #include <linux/irqchip/arm-gic.h>
3740 #include "common.h"
3741
3742diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3743index c43c714..4f8f7b9 100644
3744--- a/arch/arm/mm/Kconfig
3745+++ b/arch/arm/mm/Kconfig
3746@@ -446,6 +446,7 @@ config CPU_32v5
3747
3748 config CPU_32v6
3749 bool
3750+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3751 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3752
3753 config CPU_32v6K
3754@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3755
3756 config CPU_USE_DOMAINS
3757 bool
3758+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3759 help
3760 This option enables or disables the use of domain switching
3761 via the set_fs() function.
3762@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3763
3764 config KUSER_HELPERS
3765 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3766- depends on MMU
3767+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3768 default y
3769 help
3770 Warning: disabling this option may break user programs.
3771@@ -812,7 +814,7 @@ config KUSER_HELPERS
3772 See Documentation/arm/kernel_user_helpers.txt for details.
3773
3774 However, the fixed address nature of these helpers can be used
3775- by ROP (return orientated programming) authors when creating
3776+ by ROP (Return Oriented Programming) authors when creating
3777 exploits.
3778
3779 If all of the binaries and libraries which run on your platform
3780diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3781index 2c0c541..4585df9 100644
3782--- a/arch/arm/mm/alignment.c
3783+++ b/arch/arm/mm/alignment.c
3784@@ -216,10 +216,12 @@ union offset_union {
3785 #define __get16_unaligned_check(ins,val,addr) \
3786 do { \
3787 unsigned int err = 0, v, a = addr; \
3788+ pax_open_userland(); \
3789 __get8_unaligned_check(ins,v,a,err); \
3790 val = v << ((BE) ? 8 : 0); \
3791 __get8_unaligned_check(ins,v,a,err); \
3792 val |= v << ((BE) ? 0 : 8); \
3793+ pax_close_userland(); \
3794 if (err) \
3795 goto fault; \
3796 } while (0)
3797@@ -233,6 +235,7 @@ union offset_union {
3798 #define __get32_unaligned_check(ins,val,addr) \
3799 do { \
3800 unsigned int err = 0, v, a = addr; \
3801+ pax_open_userland(); \
3802 __get8_unaligned_check(ins,v,a,err); \
3803 val = v << ((BE) ? 24 : 0); \
3804 __get8_unaligned_check(ins,v,a,err); \
3805@@ -241,6 +244,7 @@ union offset_union {
3806 val |= v << ((BE) ? 8 : 16); \
3807 __get8_unaligned_check(ins,v,a,err); \
3808 val |= v << ((BE) ? 0 : 24); \
3809+ pax_close_userland(); \
3810 if (err) \
3811 goto fault; \
3812 } while (0)
3813@@ -254,6 +258,7 @@ union offset_union {
3814 #define __put16_unaligned_check(ins,val,addr) \
3815 do { \
3816 unsigned int err = 0, v = val, a = addr; \
3817+ pax_open_userland(); \
3818 __asm__( FIRST_BYTE_16 \
3819 ARM( "1: "ins" %1, [%2], #1\n" ) \
3820 THUMB( "1: "ins" %1, [%2]\n" ) \
3821@@ -273,6 +278,7 @@ union offset_union {
3822 " .popsection\n" \
3823 : "=r" (err), "=&r" (v), "=&r" (a) \
3824 : "0" (err), "1" (v), "2" (a)); \
3825+ pax_close_userland(); \
3826 if (err) \
3827 goto fault; \
3828 } while (0)
3829@@ -286,6 +292,7 @@ union offset_union {
3830 #define __put32_unaligned_check(ins,val,addr) \
3831 do { \
3832 unsigned int err = 0, v = val, a = addr; \
3833+ pax_open_userland(); \
3834 __asm__( FIRST_BYTE_32 \
3835 ARM( "1: "ins" %1, [%2], #1\n" ) \
3836 THUMB( "1: "ins" %1, [%2]\n" ) \
3837@@ -315,6 +322,7 @@ union offset_union {
3838 " .popsection\n" \
3839 : "=r" (err), "=&r" (v), "=&r" (a) \
3840 : "0" (err), "1" (v), "2" (a)); \
3841+ pax_close_userland(); \
3842 if (err) \
3843 goto fault; \
3844 } while (0)
3845diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3846index 5e65ca8..879e7b3 100644
3847--- a/arch/arm/mm/cache-l2x0.c
3848+++ b/arch/arm/mm/cache-l2x0.c
3849@@ -42,7 +42,7 @@ struct l2c_init_data {
3850 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3851 void (*save)(void __iomem *);
3852 struct outer_cache_fns outer_cache;
3853-};
3854+} __do_const;
3855
3856 #define CACHE_LINE_SIZE 32
3857
3858diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3859index 845769e..4278fd7 100644
3860--- a/arch/arm/mm/context.c
3861+++ b/arch/arm/mm/context.c
3862@@ -43,7 +43,7 @@
3863 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3864
3865 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3866-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3867+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3868 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3869
3870 static DEFINE_PER_CPU(atomic64_t, active_asids);
3871@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3872 {
3873 static u32 cur_idx = 1;
3874 u64 asid = atomic64_read(&mm->context.id);
3875- u64 generation = atomic64_read(&asid_generation);
3876+ u64 generation = atomic64_read_unchecked(&asid_generation);
3877
3878 if (asid != 0) {
3879 /*
3880@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3881 */
3882 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
3883 if (asid == NUM_USER_ASIDS) {
3884- generation = atomic64_add_return(ASID_FIRST_VERSION,
3885+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3886 &asid_generation);
3887 flush_context(cpu);
3888 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3889@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3890 cpu_set_reserved_ttbr0();
3891
3892 asid = atomic64_read(&mm->context.id);
3893- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3894+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3895 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3896 goto switch_mm_fastpath;
3897
3898 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3899 /* Check that our ASID belongs to the current generation. */
3900 asid = atomic64_read(&mm->context.id);
3901- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3902+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3903 asid = new_context(mm, cpu);
3904 atomic64_set(&mm->context.id, asid);
3905 }
3906diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3907index a982dc3..2d9f5f7 100644
3908--- a/arch/arm/mm/fault.c
3909+++ b/arch/arm/mm/fault.c
3910@@ -25,6 +25,7 @@
3911 #include <asm/system_misc.h>
3912 #include <asm/system_info.h>
3913 #include <asm/tlbflush.h>
3914+#include <asm/sections.h>
3915
3916 #include "fault.h"
3917
3918@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3919 if (fixup_exception(regs))
3920 return;
3921
3922+#ifdef CONFIG_PAX_MEMORY_UDEREF
3923+ if (addr < TASK_SIZE) {
3924+ if (current->signal->curr_ip)
3925+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3926+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3927+ else
3928+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3929+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3930+ }
3931+#endif
3932+
3933+#ifdef CONFIG_PAX_KERNEXEC
3934+ if ((fsr & FSR_WRITE) &&
3935+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3936+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3937+ {
3938+ if (current->signal->curr_ip)
3939+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3940+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3941+ else
3942+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3943+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3944+ }
3945+#endif
3946+
3947 /*
3948 * No handler, we'll have to terminate things with extreme prejudice.
3949 */
3950@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3951 }
3952 #endif
3953
3954+#ifdef CONFIG_PAX_PAGEEXEC
3955+ if (fsr & FSR_LNX_PF) {
3956+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3957+ do_group_exit(SIGKILL);
3958+ }
3959+#endif
3960+
3961 tsk->thread.address = addr;
3962 tsk->thread.error_code = fsr;
3963 tsk->thread.trap_no = 14;
3964@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3965 }
3966 #endif /* CONFIG_MMU */
3967
3968+#ifdef CONFIG_PAX_PAGEEXEC
3969+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3970+{
3971+ long i;
3972+
3973+ printk(KERN_ERR "PAX: bytes at PC: ");
3974+ for (i = 0; i < 20; i++) {
3975+ unsigned char c;
3976+ if (get_user(c, (__force unsigned char __user *)pc+i))
3977+ printk(KERN_CONT "?? ");
3978+ else
3979+ printk(KERN_CONT "%02x ", c);
3980+ }
3981+ printk("\n");
3982+
3983+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3984+ for (i = -1; i < 20; i++) {
3985+ unsigned long c;
3986+ if (get_user(c, (__force unsigned long __user *)sp+i))
3987+ printk(KERN_CONT "???????? ");
3988+ else
3989+ printk(KERN_CONT "%08lx ", c);
3990+ }
3991+ printk("\n");
3992+}
3993+#endif
3994+
3995 /*
3996 * First Level Translation Fault Handler
3997 *
3998@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3999 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4000 struct siginfo info;
4001
4002+#ifdef CONFIG_PAX_MEMORY_UDEREF
4003+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4004+ if (current->signal->curr_ip)
4005+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4006+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4007+ else
4008+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4009+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4010+ goto die;
4011+ }
4012+#endif
4013+
4014 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4015 return;
4016
4017+die:
4018 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4019 inf->name, fsr, addr);
4020
4021@@ -573,15 +646,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4022 ifsr_info[nr].name = name;
4023 }
4024
4025+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4026+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4027+
4028 asmlinkage void __exception
4029 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4030 {
4031 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4032 struct siginfo info;
4033+ unsigned long pc = instruction_pointer(regs);
4034+
4035+ if (user_mode(regs)) {
4036+ unsigned long sigpage = current->mm->context.sigpage;
4037+
4038+ if (sigpage <= pc && pc < sigpage + 7*4) {
4039+ if (pc < sigpage + 3*4)
4040+ sys_sigreturn(regs);
4041+ else
4042+ sys_rt_sigreturn(regs);
4043+ return;
4044+ }
4045+ if (pc == 0xffff0f60UL) {
4046+ /*
4047+ * PaX: __kuser_cmpxchg64 emulation
4048+ */
4049+ // TODO
4050+ //regs->ARM_pc = regs->ARM_lr;
4051+ //return;
4052+ }
4053+ if (pc == 0xffff0fa0UL) {
4054+ /*
4055+ * PaX: __kuser_memory_barrier emulation
4056+ */
4057+ // dmb(); implied by the exception
4058+ regs->ARM_pc = regs->ARM_lr;
4059+ return;
4060+ }
4061+ if (pc == 0xffff0fc0UL) {
4062+ /*
4063+ * PaX: __kuser_cmpxchg emulation
4064+ */
4065+ // TODO
4066+ //long new;
4067+ //int op;
4068+
4069+ //op = FUTEX_OP_SET << 28;
4070+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4071+ //regs->ARM_r0 = old != new;
4072+ //regs->ARM_pc = regs->ARM_lr;
4073+ //return;
4074+ }
4075+ if (pc == 0xffff0fe0UL) {
4076+ /*
4077+ * PaX: __kuser_get_tls emulation
4078+ */
4079+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4080+ regs->ARM_pc = regs->ARM_lr;
4081+ return;
4082+ }
4083+ }
4084+
4085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4086+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4087+ if (current->signal->curr_ip)
4088+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4089+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4090+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4091+ else
4092+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4094+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4095+ goto die;
4096+ }
4097+#endif
4098+
4099+#ifdef CONFIG_PAX_REFCOUNT
4100+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4101+#ifdef CONFIG_THUMB2_KERNEL
4102+ unsigned short bkpt;
4103+
4104+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4105+#else
4106+ unsigned int bkpt;
4107+
4108+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4109+#endif
4110+ current->thread.error_code = ifsr;
4111+ current->thread.trap_no = 0;
4112+ pax_report_refcount_overflow(regs);
4113+ fixup_exception(regs);
4114+ return;
4115+ }
4116+ }
4117+#endif
4118
4119 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4120 return;
4121
4122+die:
4123 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4124 inf->name, ifsr, addr);
4125
4126diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4127index cf08bdf..772656c 100644
4128--- a/arch/arm/mm/fault.h
4129+++ b/arch/arm/mm/fault.h
4130@@ -3,6 +3,7 @@
4131
4132 /*
4133 * Fault status register encodings. We steal bit 31 for our own purposes.
4134+ * Set when the FSR value is from an instruction fault.
4135 */
4136 #define FSR_LNX_PF (1 << 31)
4137 #define FSR_WRITE (1 << 11)
4138@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4139 }
4140 #endif
4141
4142+/* valid for LPAE and !LPAE */
4143+static inline int is_xn_fault(unsigned int fsr)
4144+{
4145+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4146+}
4147+
4148+static inline int is_domain_fault(unsigned int fsr)
4149+{
4150+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4151+}
4152+
4153 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4154 unsigned long search_exception_table(unsigned long addr);
4155
4156diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4157index 2495c8c..415b7fc 100644
4158--- a/arch/arm/mm/init.c
4159+++ b/arch/arm/mm/init.c
4160@@ -758,7 +758,46 @@ void free_tcmmem(void)
4161 {
4162 #ifdef CONFIG_HAVE_TCM
4163 extern char __tcm_start, __tcm_end;
4164+#endif
4165
4166+#ifdef CONFIG_PAX_KERNEXEC
4167+ unsigned long addr;
4168+ pgd_t *pgd;
4169+ pud_t *pud;
4170+ pmd_t *pmd;
4171+ int cpu_arch = cpu_architecture();
4172+ unsigned int cr = get_cr();
4173+
4174+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4175+ /* make pages tables, etc before .text NX */
4176+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4177+ pgd = pgd_offset_k(addr);
4178+ pud = pud_offset(pgd, addr);
4179+ pmd = pmd_offset(pud, addr);
4180+ __section_update(pmd, addr, PMD_SECT_XN);
4181+ }
4182+ /* make init NX */
4183+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4184+ pgd = pgd_offset_k(addr);
4185+ pud = pud_offset(pgd, addr);
4186+ pmd = pmd_offset(pud, addr);
4187+ __section_update(pmd, addr, PMD_SECT_XN);
4188+ }
4189+ /* make kernel code/rodata RX */
4190+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4191+ pgd = pgd_offset_k(addr);
4192+ pud = pud_offset(pgd, addr);
4193+ pmd = pmd_offset(pud, addr);
4194+#ifdef CONFIG_ARM_LPAE
4195+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4196+#else
4197+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4198+#endif
4199+ }
4200+ }
4201+#endif
4202+
4203+#ifdef CONFIG_HAVE_TCM
4204 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4205 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4206 #endif
4207diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4208index d1e5ad7..84dcbf2 100644
4209--- a/arch/arm/mm/ioremap.c
4210+++ b/arch/arm/mm/ioremap.c
4211@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4212 unsigned int mtype;
4213
4214 if (cached)
4215- mtype = MT_MEMORY_RWX;
4216+ mtype = MT_MEMORY_RX;
4217 else
4218- mtype = MT_MEMORY_RWX_NONCACHED;
4219+ mtype = MT_MEMORY_RX_NONCACHED;
4220
4221 return __arm_ioremap_caller(phys_addr, size, mtype,
4222 __builtin_return_address(0));
4223diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4224index 5e85ed3..b10a7ed 100644
4225--- a/arch/arm/mm/mmap.c
4226+++ b/arch/arm/mm/mmap.c
4227@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4228 struct vm_area_struct *vma;
4229 int do_align = 0;
4230 int aliasing = cache_is_vipt_aliasing();
4231+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4232 struct vm_unmapped_area_info info;
4233
4234 /*
4235@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4236 if (len > TASK_SIZE)
4237 return -ENOMEM;
4238
4239+#ifdef CONFIG_PAX_RANDMMAP
4240+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4241+#endif
4242+
4243 if (addr) {
4244 if (do_align)
4245 addr = COLOUR_ALIGN(addr, pgoff);
4246@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4247 addr = PAGE_ALIGN(addr);
4248
4249 vma = find_vma(mm, addr);
4250- if (TASK_SIZE - len >= addr &&
4251- (!vma || addr + len <= vma->vm_start))
4252+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4253 return addr;
4254 }
4255
4256@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4257 info.high_limit = TASK_SIZE;
4258 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4259 info.align_offset = pgoff << PAGE_SHIFT;
4260+ info.threadstack_offset = offset;
4261 return vm_unmapped_area(&info);
4262 }
4263
4264@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4265 unsigned long addr = addr0;
4266 int do_align = 0;
4267 int aliasing = cache_is_vipt_aliasing();
4268+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4269 struct vm_unmapped_area_info info;
4270
4271 /*
4272@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4273 return addr;
4274 }
4275
4276+#ifdef CONFIG_PAX_RANDMMAP
4277+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4278+#endif
4279+
4280 /* requesting a specific address */
4281 if (addr) {
4282 if (do_align)
4283@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4284 else
4285 addr = PAGE_ALIGN(addr);
4286 vma = find_vma(mm, addr);
4287- if (TASK_SIZE - len >= addr &&
4288- (!vma || addr + len <= vma->vm_start))
4289+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4290 return addr;
4291 }
4292
4293@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4294 info.high_limit = mm->mmap_base;
4295 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4296 info.align_offset = pgoff << PAGE_SHIFT;
4297+ info.threadstack_offset = offset;
4298 addr = vm_unmapped_area(&info);
4299
4300 /*
4301@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4302 {
4303 unsigned long random_factor = 0UL;
4304
4305+#ifdef CONFIG_PAX_RANDMMAP
4306+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4307+#endif
4308+
4309 /* 8 bits of randomness in 20 address space bits */
4310 if ((current->flags & PF_RANDOMIZE) &&
4311 !(current->personality & ADDR_NO_RANDOMIZE))
4312@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4313
4314 if (mmap_is_legacy()) {
4315 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4316+
4317+#ifdef CONFIG_PAX_RANDMMAP
4318+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4319+ mm->mmap_base += mm->delta_mmap;
4320+#endif
4321+
4322 mm->get_unmapped_area = arch_get_unmapped_area;
4323 } else {
4324 mm->mmap_base = mmap_base(random_factor);
4325+
4326+#ifdef CONFIG_PAX_RANDMMAP
4327+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4328+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4329+#endif
4330+
4331 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4332 }
4333 }
4334diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4335index 4e6ef89..21c27f2 100644
4336--- a/arch/arm/mm/mmu.c
4337+++ b/arch/arm/mm/mmu.c
4338@@ -41,6 +41,22 @@
4339 #include "mm.h"
4340 #include "tcm.h"
4341
4342+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4343+void modify_domain(unsigned int dom, unsigned int type)
4344+{
4345+ struct thread_info *thread = current_thread_info();
4346+ unsigned int domain = thread->cpu_domain;
4347+ /*
4348+ * DOMAIN_MANAGER might be defined to some other value,
4349+ * use the arch-defined constant
4350+ */
4351+ domain &= ~domain_val(dom, 3);
4352+ thread->cpu_domain = domain | domain_val(dom, type);
4353+ set_domain(thread->cpu_domain);
4354+}
4355+EXPORT_SYMBOL(modify_domain);
4356+#endif
4357+
4358 /*
4359 * empty_zero_page is a special page that is used for
4360 * zero-initialized data and COW.
4361@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4362 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4363 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4364
4365-static struct mem_type mem_types[] = {
4366+#ifdef CONFIG_PAX_KERNEXEC
4367+#define L_PTE_KERNEXEC L_PTE_RDONLY
4368+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4369+#else
4370+#define L_PTE_KERNEXEC L_PTE_DIRTY
4371+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4372+#endif
4373+
4374+static struct mem_type mem_types[] __read_only = {
4375 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4376 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4377 L_PTE_SHARED,
4378@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4379 .prot_sect = PROT_SECT_DEVICE,
4380 .domain = DOMAIN_IO,
4381 },
4382- [MT_UNCACHED] = {
4383+ [MT_UNCACHED_RW] = {
4384 .prot_pte = PROT_PTE_DEVICE,
4385 .prot_l1 = PMD_TYPE_TABLE,
4386 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4387 .domain = DOMAIN_IO,
4388 },
4389- [MT_CACHECLEAN] = {
4390- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4391+ [MT_CACHECLEAN_RO] = {
4392+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4393 .domain = DOMAIN_KERNEL,
4394 },
4395 #ifndef CONFIG_ARM_LPAE
4396- [MT_MINICLEAN] = {
4397- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4398+ [MT_MINICLEAN_RO] = {
4399+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4400 .domain = DOMAIN_KERNEL,
4401 },
4402 #endif
4403@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4404 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4405 L_PTE_RDONLY,
4406 .prot_l1 = PMD_TYPE_TABLE,
4407- .domain = DOMAIN_USER,
4408+ .domain = DOMAIN_VECTORS,
4409 },
4410 [MT_HIGH_VECTORS] = {
4411 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4412 L_PTE_USER | L_PTE_RDONLY,
4413 .prot_l1 = PMD_TYPE_TABLE,
4414- .domain = DOMAIN_USER,
4415+ .domain = DOMAIN_VECTORS,
4416 },
4417- [MT_MEMORY_RWX] = {
4418+ [__MT_MEMORY_RWX] = {
4419 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4420 .prot_l1 = PMD_TYPE_TABLE,
4421 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4422@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4423 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4424 .domain = DOMAIN_KERNEL,
4425 },
4426- [MT_ROM] = {
4427- .prot_sect = PMD_TYPE_SECT,
4428+ [MT_MEMORY_RX] = {
4429+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4430+ .prot_l1 = PMD_TYPE_TABLE,
4431+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4432+ .domain = DOMAIN_KERNEL,
4433+ },
4434+ [MT_ROM_RX] = {
4435+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4436 .domain = DOMAIN_KERNEL,
4437 },
4438- [MT_MEMORY_RWX_NONCACHED] = {
4439+ [MT_MEMORY_RW_NONCACHED] = {
4440 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4441 L_PTE_MT_BUFFERABLE,
4442 .prot_l1 = PMD_TYPE_TABLE,
4443 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4444 .domain = DOMAIN_KERNEL,
4445 },
4446+ [MT_MEMORY_RX_NONCACHED] = {
4447+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4448+ L_PTE_MT_BUFFERABLE,
4449+ .prot_l1 = PMD_TYPE_TABLE,
4450+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4451+ .domain = DOMAIN_KERNEL,
4452+ },
4453 [MT_MEMORY_RW_DTCM] = {
4454 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4455 L_PTE_XN,
4456@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4457 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4458 .domain = DOMAIN_KERNEL,
4459 },
4460- [MT_MEMORY_RWX_ITCM] = {
4461- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4462+ [MT_MEMORY_RX_ITCM] = {
4463+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4464 .prot_l1 = PMD_TYPE_TABLE,
4465+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4466 .domain = DOMAIN_KERNEL,
4467 },
4468 [MT_MEMORY_RW_SO] = {
4469@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4470 * Mark cache clean areas and XIP ROM read only
4471 * from SVC mode and no access from userspace.
4472 */
4473- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4474- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4475- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4476+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4477+#ifdef CONFIG_PAX_KERNEXEC
4478+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4479+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4480+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4481+#endif
4482+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4483+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4484 #endif
4485
4486 /*
4487@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4488 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4489 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4490 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4491- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4492- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4493+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4494+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4495 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4496 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4497+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4498+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4499 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4500- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4501- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4502+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4503+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4504+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4505+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4506 }
4507 }
4508
4509@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4510 if (cpu_arch >= CPU_ARCH_ARMv6) {
4511 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4512 /* Non-cacheable Normal is XCB = 001 */
4513- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4514+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4515+ PMD_SECT_BUFFERED;
4516+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4517 PMD_SECT_BUFFERED;
4518 } else {
4519 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4520- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4521+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4522+ PMD_SECT_TEX(1);
4523+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4524 PMD_SECT_TEX(1);
4525 }
4526 } else {
4527- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4528+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4529+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4530 }
4531
4532 #ifdef CONFIG_ARM_LPAE
4533@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4534 user_pgprot |= PTE_EXT_PXN;
4535 #endif
4536
4537+ user_pgprot |= __supported_pte_mask;
4538+
4539 for (i = 0; i < 16; i++) {
4540 pteval_t v = pgprot_val(protection_map[i]);
4541 protection_map[i] = __pgprot(v | user_pgprot);
4542@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4543
4544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4546- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4547- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4548+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4549+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4550 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4551 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4552+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4553+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4554 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4555- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4556- mem_types[MT_ROM].prot_sect |= cp->pmd;
4557+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4558+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4559+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4560
4561 switch (cp->pmd) {
4562 case PMD_SECT_WT:
4563- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4564+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4565 break;
4566 case PMD_SECT_WB:
4567 case PMD_SECT_WBWA:
4568- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4569+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4570 break;
4571 }
4572 pr_info("Memory policy: %sData cache %s\n",
4573@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4574 return;
4575 }
4576
4577- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4578+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4579 md->virtual >= PAGE_OFFSET &&
4580 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4581 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4582@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4583 * called function. This means you can't use any function or debugging
4584 * method which may touch any device, otherwise the kernel _will_ crash.
4585 */
4586+
4587+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4588+
4589 static void __init devicemaps_init(const struct machine_desc *mdesc)
4590 {
4591 struct map_desc map;
4592 unsigned long addr;
4593- void *vectors;
4594
4595- /*
4596- * Allocate the vector page early.
4597- */
4598- vectors = early_alloc(PAGE_SIZE * 2);
4599-
4600- early_trap_init(vectors);
4601+ early_trap_init(&vectors);
4602
4603 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4604 pmd_clear(pmd_off_k(addr));
4605@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4606 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4607 map.virtual = MODULES_VADDR;
4608 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4609- map.type = MT_ROM;
4610+ map.type = MT_ROM_RX;
4611 create_mapping(&map);
4612 #endif
4613
4614@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4615 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4616 map.virtual = FLUSH_BASE;
4617 map.length = SZ_1M;
4618- map.type = MT_CACHECLEAN;
4619+ map.type = MT_CACHECLEAN_RO;
4620 create_mapping(&map);
4621 #endif
4622 #ifdef FLUSH_BASE_MINICACHE
4623 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4624 map.virtual = FLUSH_BASE_MINICACHE;
4625 map.length = SZ_1M;
4626- map.type = MT_MINICLEAN;
4627+ map.type = MT_MINICLEAN_RO;
4628 create_mapping(&map);
4629 #endif
4630
4631@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4632 * location (0xffff0000). If we aren't using high-vectors, also
4633 * create a mapping at the low-vectors virtual address.
4634 */
4635- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4636+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4637 map.virtual = 0xffff0000;
4638 map.length = PAGE_SIZE;
4639 #ifdef CONFIG_KUSER_HELPERS
4640@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
4641 static void __init map_lowmem(void)
4642 {
4643 struct memblock_region *reg;
4644+#ifndef CONFIG_PAX_KERNEXEC
4645 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4646 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4647+#endif
4648
4649 /* Map all the lowmem memory banks. */
4650 for_each_memblock(memory, reg) {
4651@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
4652 if (start >= end)
4653 break;
4654
4655+#ifdef CONFIG_PAX_KERNEXEC
4656+ map.pfn = __phys_to_pfn(start);
4657+ map.virtual = __phys_to_virt(start);
4658+ map.length = end - start;
4659+
4660+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4661+ struct map_desc kernel;
4662+ struct map_desc initmap;
4663+
4664+ /* when freeing initmem we will make this RW */
4665+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4666+ initmap.virtual = (unsigned long)__init_begin;
4667+ initmap.length = _sdata - __init_begin;
4668+ initmap.type = __MT_MEMORY_RWX;
4669+ create_mapping(&initmap);
4670+
4671+ /* when freeing initmem we will make this RX */
4672+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4673+ kernel.virtual = (unsigned long)_stext;
4674+ kernel.length = __init_begin - _stext;
4675+ kernel.type = __MT_MEMORY_RWX;
4676+ create_mapping(&kernel);
4677+
4678+ if (map.virtual < (unsigned long)_stext) {
4679+ map.length = (unsigned long)_stext - map.virtual;
4680+ map.type = __MT_MEMORY_RWX;
4681+ create_mapping(&map);
4682+ }
4683+
4684+ map.pfn = __phys_to_pfn(__pa(_sdata));
4685+ map.virtual = (unsigned long)_sdata;
4686+ map.length = end - __pa(_sdata);
4687+ }
4688+
4689+ map.type = MT_MEMORY_RW;
4690+ create_mapping(&map);
4691+#else
4692 if (end < kernel_x_start) {
4693 map.pfn = __phys_to_pfn(start);
4694 map.virtual = __phys_to_virt(start);
4695 map.length = end - start;
4696- map.type = MT_MEMORY_RWX;
4697+ map.type = __MT_MEMORY_RWX;
4698
4699 create_mapping(&map);
4700 } else if (start >= kernel_x_end) {
4701@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
4702 map.pfn = __phys_to_pfn(kernel_x_start);
4703 map.virtual = __phys_to_virt(kernel_x_start);
4704 map.length = kernel_x_end - kernel_x_start;
4705- map.type = MT_MEMORY_RWX;
4706+ map.type = __MT_MEMORY_RWX;
4707
4708 create_mapping(&map);
4709
4710@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
4711 create_mapping(&map);
4712 }
4713 }
4714+#endif
4715 }
4716 }
4717
4718diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4719index e1268f9..a9755a7 100644
4720--- a/arch/arm/net/bpf_jit_32.c
4721+++ b/arch/arm/net/bpf_jit_32.c
4722@@ -20,6 +20,7 @@
4723 #include <asm/cacheflush.h>
4724 #include <asm/hwcap.h>
4725 #include <asm/opcodes.h>
4726+#include <asm/pgtable.h>
4727
4728 #include "bpf_jit_32.h"
4729
4730@@ -71,7 +72,11 @@ struct jit_ctx {
4731 #endif
4732 };
4733
4734+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4735+int bpf_jit_enable __read_only;
4736+#else
4737 int bpf_jit_enable __read_mostly;
4738+#endif
4739
4740 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4741 {
4742@@ -178,8 +183,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4743 {
4744 u32 *ptr;
4745 /* We are guaranteed to have aligned memory. */
4746+ pax_open_kernel();
4747 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4748 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4749+ pax_close_kernel();
4750 }
4751
4752 static void build_prologue(struct jit_ctx *ctx)
4753diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4754index 5b217f4..c23f40e 100644
4755--- a/arch/arm/plat-iop/setup.c
4756+++ b/arch/arm/plat-iop/setup.c
4757@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4758 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4759 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4760 .length = IOP3XX_PERIPHERAL_SIZE,
4761- .type = MT_UNCACHED,
4762+ .type = MT_UNCACHED_RW,
4763 },
4764 };
4765
4766diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4767index a5bc92d..0bb4730 100644
4768--- a/arch/arm/plat-omap/sram.c
4769+++ b/arch/arm/plat-omap/sram.c
4770@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4771 * Looks like we need to preserve some bootloader code at the
4772 * beginning of SRAM for jumping to flash for reboot to work...
4773 */
4774+ pax_open_kernel();
4775 memset_io(omap_sram_base + omap_sram_skip, 0,
4776 omap_sram_size - omap_sram_skip);
4777+ pax_close_kernel();
4778 }
4779diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4780index ce6d763..cfea917 100644
4781--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4782+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4783@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4784 int (*started)(unsigned ch);
4785 int (*flush)(unsigned ch);
4786 int (*stop)(unsigned ch);
4787-};
4788+} __no_const;
4789
4790 extern void *samsung_dmadev_get_ops(void);
4791 extern void *s3c_dma_get_ops(void);
4792diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
4793index 7047051..44e8675 100644
4794--- a/arch/arm64/include/asm/atomic.h
4795+++ b/arch/arm64/include/asm/atomic.h
4796@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
4797 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
4798 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
4799
4800+#define atomic64_read_unchecked(v) atomic64_read(v)
4801+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4802+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4803+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4804+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4805+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4806+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4807+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4808+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4809+
4810 #endif
4811 #endif
4812diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4813index a5abb00..9cbca9a 100644
4814--- a/arch/arm64/include/asm/barrier.h
4815+++ b/arch/arm64/include/asm/barrier.h
4816@@ -44,7 +44,7 @@
4817 do { \
4818 compiletime_assert_atomic_type(*p); \
4819 barrier(); \
4820- ACCESS_ONCE(*p) = (v); \
4821+ ACCESS_ONCE_RW(*p) = (v); \
4822 } while (0)
4823
4824 #define smp_load_acquire(p) \
4825diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
4826index 4fde8c1..441f84f 100644
4827--- a/arch/arm64/include/asm/percpu.h
4828+++ b/arch/arm64/include/asm/percpu.h
4829@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
4830 {
4831 switch (size) {
4832 case 1:
4833- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
4834+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
4835 break;
4836 case 2:
4837- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
4838+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
4839 break;
4840 case 4:
4841- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
4842+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
4843 break;
4844 case 8:
4845- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
4846+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
4847 break;
4848 default:
4849 BUILD_BUG();
4850diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
4851index e20df38..027ede3 100644
4852--- a/arch/arm64/include/asm/pgalloc.h
4853+++ b/arch/arm64/include/asm/pgalloc.h
4854@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4855 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
4856 }
4857
4858+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
4859+{
4860+ pud_populate(mm, pud, pmd);
4861+}
4862+
4863 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
4864
4865 #if CONFIG_ARM64_PGTABLE_LEVELS > 3
4866diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4867index 3bf8f4e..5dd5491 100644
4868--- a/arch/arm64/include/asm/uaccess.h
4869+++ b/arch/arm64/include/asm/uaccess.h
4870@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4871 flag; \
4872 })
4873
4874+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4875 #define access_ok(type, addr, size) __range_ok(addr, size)
4876 #define user_addr_max get_fs
4877
4878diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
4879index df34a70..5727a75 100644
4880--- a/arch/arm64/mm/dma-mapping.c
4881+++ b/arch/arm64/mm/dma-mapping.c
4882@@ -137,7 +137,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
4883 phys_to_page(paddr),
4884 size >> PAGE_SHIFT);
4885 if (!freed)
4886- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
4887+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
4888 }
4889
4890 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
4891diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4892index c3a58a1..78fbf54 100644
4893--- a/arch/avr32/include/asm/cache.h
4894+++ b/arch/avr32/include/asm/cache.h
4895@@ -1,8 +1,10 @@
4896 #ifndef __ASM_AVR32_CACHE_H
4897 #define __ASM_AVR32_CACHE_H
4898
4899+#include <linux/const.h>
4900+
4901 #define L1_CACHE_SHIFT 5
4902-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4903+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4904
4905 /*
4906 * Memory returned by kmalloc() may be used for DMA, so we must make
4907diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4908index d232888..87c8df1 100644
4909--- a/arch/avr32/include/asm/elf.h
4910+++ b/arch/avr32/include/asm/elf.h
4911@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4912 the loader. We need to make sure that it is out of the way of the program
4913 that it will "exec", and that there is sufficient room for the brk. */
4914
4915-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4916+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4917
4918+#ifdef CONFIG_PAX_ASLR
4919+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4920+
4921+#define PAX_DELTA_MMAP_LEN 15
4922+#define PAX_DELTA_STACK_LEN 15
4923+#endif
4924
4925 /* This yields a mask that user programs can use to figure out what
4926 instruction set this CPU supports. This could be done in user space,
4927diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4928index 479330b..53717a8 100644
4929--- a/arch/avr32/include/asm/kmap_types.h
4930+++ b/arch/avr32/include/asm/kmap_types.h
4931@@ -2,9 +2,9 @@
4932 #define __ASM_AVR32_KMAP_TYPES_H
4933
4934 #ifdef CONFIG_DEBUG_HIGHMEM
4935-# define KM_TYPE_NR 29
4936+# define KM_TYPE_NR 30
4937 #else
4938-# define KM_TYPE_NR 14
4939+# define KM_TYPE_NR 15
4940 #endif
4941
4942 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4943diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4944index d223a8b..69c5210 100644
4945--- a/arch/avr32/mm/fault.c
4946+++ b/arch/avr32/mm/fault.c
4947@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4948
4949 int exception_trace = 1;
4950
4951+#ifdef CONFIG_PAX_PAGEEXEC
4952+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4953+{
4954+ unsigned long i;
4955+
4956+ printk(KERN_ERR "PAX: bytes at PC: ");
4957+ for (i = 0; i < 20; i++) {
4958+ unsigned char c;
4959+ if (get_user(c, (unsigned char *)pc+i))
4960+ printk(KERN_CONT "???????? ");
4961+ else
4962+ printk(KERN_CONT "%02x ", c);
4963+ }
4964+ printk("\n");
4965+}
4966+#endif
4967+
4968 /*
4969 * This routine handles page faults. It determines the address and the
4970 * problem, and then passes it off to one of the appropriate routines.
4971@@ -178,6 +195,16 @@ bad_area:
4972 up_read(&mm->mmap_sem);
4973
4974 if (user_mode(regs)) {
4975+
4976+#ifdef CONFIG_PAX_PAGEEXEC
4977+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4978+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4979+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4980+ do_group_exit(SIGKILL);
4981+ }
4982+ }
4983+#endif
4984+
4985 if (exception_trace && printk_ratelimit())
4986 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4987 "sp %08lx ecr %lu\n",
4988diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4989index 568885a..f8008df 100644
4990--- a/arch/blackfin/include/asm/cache.h
4991+++ b/arch/blackfin/include/asm/cache.h
4992@@ -7,6 +7,7 @@
4993 #ifndef __ARCH_BLACKFIN_CACHE_H
4994 #define __ARCH_BLACKFIN_CACHE_H
4995
4996+#include <linux/const.h>
4997 #include <linux/linkage.h> /* for asmlinkage */
4998
4999 /*
5000@@ -14,7 +15,7 @@
5001 * Blackfin loads 32 bytes for cache
5002 */
5003 #define L1_CACHE_SHIFT 5
5004-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5005+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5006 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5007
5008 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5009diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5010index aea2718..3639a60 100644
5011--- a/arch/cris/include/arch-v10/arch/cache.h
5012+++ b/arch/cris/include/arch-v10/arch/cache.h
5013@@ -1,8 +1,9 @@
5014 #ifndef _ASM_ARCH_CACHE_H
5015 #define _ASM_ARCH_CACHE_H
5016
5017+#include <linux/const.h>
5018 /* Etrax 100LX have 32-byte cache-lines. */
5019-#define L1_CACHE_BYTES 32
5020 #define L1_CACHE_SHIFT 5
5021+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5022
5023 #endif /* _ASM_ARCH_CACHE_H */
5024diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5025index 7caf25d..ee65ac5 100644
5026--- a/arch/cris/include/arch-v32/arch/cache.h
5027+++ b/arch/cris/include/arch-v32/arch/cache.h
5028@@ -1,11 +1,12 @@
5029 #ifndef _ASM_CRIS_ARCH_CACHE_H
5030 #define _ASM_CRIS_ARCH_CACHE_H
5031
5032+#include <linux/const.h>
5033 #include <arch/hwregs/dma.h>
5034
5035 /* A cache-line is 32 bytes. */
5036-#define L1_CACHE_BYTES 32
5037 #define L1_CACHE_SHIFT 5
5038+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5039
5040 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5041
5042diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5043index 102190a..5334cea 100644
5044--- a/arch/frv/include/asm/atomic.h
5045+++ b/arch/frv/include/asm/atomic.h
5046@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5047 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5048 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5049
5050+#define atomic64_read_unchecked(v) atomic64_read(v)
5051+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5052+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5053+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5054+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5055+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5056+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5057+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5058+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5059+
5060 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5061 {
5062 int c, old;
5063diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5064index 2797163..c2a401df9 100644
5065--- a/arch/frv/include/asm/cache.h
5066+++ b/arch/frv/include/asm/cache.h
5067@@ -12,10 +12,11 @@
5068 #ifndef __ASM_CACHE_H
5069 #define __ASM_CACHE_H
5070
5071+#include <linux/const.h>
5072
5073 /* bytes per L1 cache line */
5074 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5075-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5076+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5077
5078 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5079 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5080diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5081index 43901f2..0d8b865 100644
5082--- a/arch/frv/include/asm/kmap_types.h
5083+++ b/arch/frv/include/asm/kmap_types.h
5084@@ -2,6 +2,6 @@
5085 #ifndef _ASM_KMAP_TYPES_H
5086 #define _ASM_KMAP_TYPES_H
5087
5088-#define KM_TYPE_NR 17
5089+#define KM_TYPE_NR 18
5090
5091 #endif
5092diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5093index 836f147..4cf23f5 100644
5094--- a/arch/frv/mm/elf-fdpic.c
5095+++ b/arch/frv/mm/elf-fdpic.c
5096@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5097 {
5098 struct vm_area_struct *vma;
5099 struct vm_unmapped_area_info info;
5100+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5101
5102 if (len > TASK_SIZE)
5103 return -ENOMEM;
5104@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5105 if (addr) {
5106 addr = PAGE_ALIGN(addr);
5107 vma = find_vma(current->mm, addr);
5108- if (TASK_SIZE - len >= addr &&
5109- (!vma || addr + len <= vma->vm_start))
5110+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5111 goto success;
5112 }
5113
5114@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5115 info.high_limit = (current->mm->start_stack - 0x00200000);
5116 info.align_mask = 0;
5117 info.align_offset = 0;
5118+ info.threadstack_offset = offset;
5119 addr = vm_unmapped_area(&info);
5120 if (!(addr & ~PAGE_MASK))
5121 goto success;
5122diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5123index 69952c1..4fa2908 100644
5124--- a/arch/hexagon/include/asm/cache.h
5125+++ b/arch/hexagon/include/asm/cache.h
5126@@ -21,9 +21,11 @@
5127 #ifndef __ASM_CACHE_H
5128 #define __ASM_CACHE_H
5129
5130+#include <linux/const.h>
5131+
5132 /* Bytes per L1 cache line */
5133-#define L1_CACHE_SHIFT (5)
5134-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5135+#define L1_CACHE_SHIFT 5
5136+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5137
5138 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5139
5140diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5141index 074e52b..76afdac 100644
5142--- a/arch/ia64/Kconfig
5143+++ b/arch/ia64/Kconfig
5144@@ -548,6 +548,7 @@ source "drivers/sn/Kconfig"
5145 config KEXEC
5146 bool "kexec system call"
5147 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5148+ depends on !GRKERNSEC_KMEM
5149 help
5150 kexec is a system call that implements the ability to shutdown your
5151 current kernel, and to start another kernel. It is like a reboot
5152diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5153index 970d0bd..e750b9b 100644
5154--- a/arch/ia64/Makefile
5155+++ b/arch/ia64/Makefile
5156@@ -98,5 +98,6 @@ endef
5157 archprepare: make_nr_irqs_h FORCE
5158 PHONY += make_nr_irqs_h FORCE
5159
5160+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5161 make_nr_irqs_h: FORCE
5162 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5163diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5164index 0bf0350..2ad1957 100644
5165--- a/arch/ia64/include/asm/atomic.h
5166+++ b/arch/ia64/include/asm/atomic.h
5167@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5168 #define atomic64_inc(v) atomic64_add(1, (v))
5169 #define atomic64_dec(v) atomic64_sub(1, (v))
5170
5171+#define atomic64_read_unchecked(v) atomic64_read(v)
5172+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5173+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5174+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5175+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5176+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5177+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5178+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5179+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5180+
5181 #endif /* _ASM_IA64_ATOMIC_H */
5182diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5183index f6769eb..1cdb590 100644
5184--- a/arch/ia64/include/asm/barrier.h
5185+++ b/arch/ia64/include/asm/barrier.h
5186@@ -66,7 +66,7 @@
5187 do { \
5188 compiletime_assert_atomic_type(*p); \
5189 barrier(); \
5190- ACCESS_ONCE(*p) = (v); \
5191+ ACCESS_ONCE_RW(*p) = (v); \
5192 } while (0)
5193
5194 #define smp_load_acquire(p) \
5195diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5196index 988254a..e1ee885 100644
5197--- a/arch/ia64/include/asm/cache.h
5198+++ b/arch/ia64/include/asm/cache.h
5199@@ -1,6 +1,7 @@
5200 #ifndef _ASM_IA64_CACHE_H
5201 #define _ASM_IA64_CACHE_H
5202
5203+#include <linux/const.h>
5204
5205 /*
5206 * Copyright (C) 1998-2000 Hewlett-Packard Co
5207@@ -9,7 +10,7 @@
5208
5209 /* Bytes per L1 (data) cache line. */
5210 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5211-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5213
5214 #ifdef CONFIG_SMP
5215 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5216diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5217index 5a83c5c..4d7f553 100644
5218--- a/arch/ia64/include/asm/elf.h
5219+++ b/arch/ia64/include/asm/elf.h
5220@@ -42,6 +42,13 @@
5221 */
5222 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5223
5224+#ifdef CONFIG_PAX_ASLR
5225+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5226+
5227+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5228+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5229+#endif
5230+
5231 #define PT_IA_64_UNWIND 0x70000001
5232
5233 /* IA-64 relocations: */
5234diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5235index 5767cdf..7462574 100644
5236--- a/arch/ia64/include/asm/pgalloc.h
5237+++ b/arch/ia64/include/asm/pgalloc.h
5238@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5239 pgd_val(*pgd_entry) = __pa(pud);
5240 }
5241
5242+static inline void
5243+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5244+{
5245+ pgd_populate(mm, pgd_entry, pud);
5246+}
5247+
5248 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5249 {
5250 return quicklist_alloc(0, GFP_KERNEL, NULL);
5251@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5252 pud_val(*pud_entry) = __pa(pmd);
5253 }
5254
5255+static inline void
5256+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5257+{
5258+ pud_populate(mm, pud_entry, pmd);
5259+}
5260+
5261 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5262 {
5263 return quicklist_alloc(0, GFP_KERNEL, NULL);
5264diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5265index 7935115..c0eca6a 100644
5266--- a/arch/ia64/include/asm/pgtable.h
5267+++ b/arch/ia64/include/asm/pgtable.h
5268@@ -12,7 +12,7 @@
5269 * David Mosberger-Tang <davidm@hpl.hp.com>
5270 */
5271
5272-
5273+#include <linux/const.h>
5274 #include <asm/mman.h>
5275 #include <asm/page.h>
5276 #include <asm/processor.h>
5277@@ -142,6 +142,17 @@
5278 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5279 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5280 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5281+
5282+#ifdef CONFIG_PAX_PAGEEXEC
5283+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5284+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5285+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5286+#else
5287+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5288+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5289+# define PAGE_COPY_NOEXEC PAGE_COPY
5290+#endif
5291+
5292 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5293 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5294 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5295diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5296index 45698cd..e8e2dbc 100644
5297--- a/arch/ia64/include/asm/spinlock.h
5298+++ b/arch/ia64/include/asm/spinlock.h
5299@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5300 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5301
5302 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5303- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5304+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5305 }
5306
5307 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5308diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5309index 103bedc..0210597 100644
5310--- a/arch/ia64/include/asm/uaccess.h
5311+++ b/arch/ia64/include/asm/uaccess.h
5312@@ -70,6 +70,7 @@
5313 && ((segment).seg == KERNEL_DS.seg \
5314 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5315 })
5316+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5317 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5318
5319 /*
5320@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5321 static inline unsigned long
5322 __copy_to_user (void __user *to, const void *from, unsigned long count)
5323 {
5324+ if (count > INT_MAX)
5325+ return count;
5326+
5327+ if (!__builtin_constant_p(count))
5328+ check_object_size(from, count, true);
5329+
5330 return __copy_user(to, (__force void __user *) from, count);
5331 }
5332
5333 static inline unsigned long
5334 __copy_from_user (void *to, const void __user *from, unsigned long count)
5335 {
5336+ if (count > INT_MAX)
5337+ return count;
5338+
5339+ if (!__builtin_constant_p(count))
5340+ check_object_size(to, count, false);
5341+
5342 return __copy_user((__force void __user *) to, from, count);
5343 }
5344
5345@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5346 ({ \
5347 void __user *__cu_to = (to); \
5348 const void *__cu_from = (from); \
5349- long __cu_len = (n); \
5350+ unsigned long __cu_len = (n); \
5351 \
5352- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5353+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5354+ if (!__builtin_constant_p(n)) \
5355+ check_object_size(__cu_from, __cu_len, true); \
5356 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5357+ } \
5358 __cu_len; \
5359 })
5360
5361@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5362 ({ \
5363 void *__cu_to = (to); \
5364 const void __user *__cu_from = (from); \
5365- long __cu_len = (n); \
5366+ unsigned long __cu_len = (n); \
5367 \
5368 __chk_user_ptr(__cu_from); \
5369- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5370+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5371+ if (!__builtin_constant_p(n)) \
5372+ check_object_size(__cu_to, __cu_len, false); \
5373 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5374+ } \
5375 __cu_len; \
5376 })
5377
5378diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5379index 29754aa..06d2838 100644
5380--- a/arch/ia64/kernel/module.c
5381+++ b/arch/ia64/kernel/module.c
5382@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5383 }
5384
5385 static inline int
5386+in_init_rx (const struct module *mod, uint64_t addr)
5387+{
5388+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5389+}
5390+
5391+static inline int
5392+in_init_rw (const struct module *mod, uint64_t addr)
5393+{
5394+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5395+}
5396+
5397+static inline int
5398 in_init (const struct module *mod, uint64_t addr)
5399 {
5400- return addr - (uint64_t) mod->module_init < mod->init_size;
5401+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5402+}
5403+
5404+static inline int
5405+in_core_rx (const struct module *mod, uint64_t addr)
5406+{
5407+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5408+}
5409+
5410+static inline int
5411+in_core_rw (const struct module *mod, uint64_t addr)
5412+{
5413+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5414 }
5415
5416 static inline int
5417 in_core (const struct module *mod, uint64_t addr)
5418 {
5419- return addr - (uint64_t) mod->module_core < mod->core_size;
5420+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5421 }
5422
5423 static inline int
5424@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5425 break;
5426
5427 case RV_BDREL:
5428- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5429+ if (in_init_rx(mod, val))
5430+ val -= (uint64_t) mod->module_init_rx;
5431+ else if (in_init_rw(mod, val))
5432+ val -= (uint64_t) mod->module_init_rw;
5433+ else if (in_core_rx(mod, val))
5434+ val -= (uint64_t) mod->module_core_rx;
5435+ else if (in_core_rw(mod, val))
5436+ val -= (uint64_t) mod->module_core_rw;
5437 break;
5438
5439 case RV_LTV:
5440@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5441 * addresses have been selected...
5442 */
5443 uint64_t gp;
5444- if (mod->core_size > MAX_LTOFF)
5445+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5446 /*
5447 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5448 * at the end of the module.
5449 */
5450- gp = mod->core_size - MAX_LTOFF / 2;
5451+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5452 else
5453- gp = mod->core_size / 2;
5454- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5455+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5456+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5457 mod->arch.gp = gp;
5458 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5459 }
5460diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5461index c39c3cd..3c77738 100644
5462--- a/arch/ia64/kernel/palinfo.c
5463+++ b/arch/ia64/kernel/palinfo.c
5464@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5465 return NOTIFY_OK;
5466 }
5467
5468-static struct notifier_block __refdata palinfo_cpu_notifier =
5469+static struct notifier_block palinfo_cpu_notifier =
5470 {
5471 .notifier_call = palinfo_cpu_callback,
5472 .priority = 0,
5473diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5474index 41e33f8..65180b2a 100644
5475--- a/arch/ia64/kernel/sys_ia64.c
5476+++ b/arch/ia64/kernel/sys_ia64.c
5477@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5478 unsigned long align_mask = 0;
5479 struct mm_struct *mm = current->mm;
5480 struct vm_unmapped_area_info info;
5481+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5482
5483 if (len > RGN_MAP_LIMIT)
5484 return -ENOMEM;
5485@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5486 if (REGION_NUMBER(addr) == RGN_HPAGE)
5487 addr = 0;
5488 #endif
5489+
5490+#ifdef CONFIG_PAX_RANDMMAP
5491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5492+ addr = mm->free_area_cache;
5493+ else
5494+#endif
5495+
5496 if (!addr)
5497 addr = TASK_UNMAPPED_BASE;
5498
5499@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5500 info.high_limit = TASK_SIZE;
5501 info.align_mask = align_mask;
5502 info.align_offset = 0;
5503+ info.threadstack_offset = offset;
5504 return vm_unmapped_area(&info);
5505 }
5506
5507diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5508index 84f8a52..7c76178 100644
5509--- a/arch/ia64/kernel/vmlinux.lds.S
5510+++ b/arch/ia64/kernel/vmlinux.lds.S
5511@@ -192,7 +192,7 @@ SECTIONS {
5512 /* Per-cpu data: */
5513 . = ALIGN(PERCPU_PAGE_SIZE);
5514 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5515- __phys_per_cpu_start = __per_cpu_load;
5516+ __phys_per_cpu_start = per_cpu_load;
5517 /*
5518 * ensure percpu data fits
5519 * into percpu page size
5520diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5521index ba5ba7a..36e9d3a 100644
5522--- a/arch/ia64/mm/fault.c
5523+++ b/arch/ia64/mm/fault.c
5524@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5525 return pte_present(pte);
5526 }
5527
5528+#ifdef CONFIG_PAX_PAGEEXEC
5529+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5530+{
5531+ unsigned long i;
5532+
5533+ printk(KERN_ERR "PAX: bytes at PC: ");
5534+ for (i = 0; i < 8; i++) {
5535+ unsigned int c;
5536+ if (get_user(c, (unsigned int *)pc+i))
5537+ printk(KERN_CONT "???????? ");
5538+ else
5539+ printk(KERN_CONT "%08x ", c);
5540+ }
5541+ printk("\n");
5542+}
5543+#endif
5544+
5545 # define VM_READ_BIT 0
5546 # define VM_WRITE_BIT 1
5547 # define VM_EXEC_BIT 2
5548@@ -151,8 +168,21 @@ retry:
5549 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5550 goto bad_area;
5551
5552- if ((vma->vm_flags & mask) != mask)
5553+ if ((vma->vm_flags & mask) != mask) {
5554+
5555+#ifdef CONFIG_PAX_PAGEEXEC
5556+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5557+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5558+ goto bad_area;
5559+
5560+ up_read(&mm->mmap_sem);
5561+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5562+ do_group_exit(SIGKILL);
5563+ }
5564+#endif
5565+
5566 goto bad_area;
5567+ }
5568
5569 /*
5570 * If for any reason at all we couldn't handle the fault, make
5571diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5572index 76069c1..c2aa816 100644
5573--- a/arch/ia64/mm/hugetlbpage.c
5574+++ b/arch/ia64/mm/hugetlbpage.c
5575@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5576 unsigned long pgoff, unsigned long flags)
5577 {
5578 struct vm_unmapped_area_info info;
5579+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5580
5581 if (len > RGN_MAP_LIMIT)
5582 return -ENOMEM;
5583@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5584 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5585 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5586 info.align_offset = 0;
5587+ info.threadstack_offset = offset;
5588 return vm_unmapped_area(&info);
5589 }
5590
5591diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5592index 6b33457..88b5124 100644
5593--- a/arch/ia64/mm/init.c
5594+++ b/arch/ia64/mm/init.c
5595@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5596 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5597 vma->vm_end = vma->vm_start + PAGE_SIZE;
5598 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5599+
5600+#ifdef CONFIG_PAX_PAGEEXEC
5601+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5602+ vma->vm_flags &= ~VM_EXEC;
5603+
5604+#ifdef CONFIG_PAX_MPROTECT
5605+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5606+ vma->vm_flags &= ~VM_MAYEXEC;
5607+#endif
5608+
5609+ }
5610+#endif
5611+
5612 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5613 down_write(&current->mm->mmap_sem);
5614 if (insert_vm_struct(current->mm, vma)) {
5615@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5616 gate_vma.vm_start = FIXADDR_USER_START;
5617 gate_vma.vm_end = FIXADDR_USER_END;
5618 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5619- gate_vma.vm_page_prot = __P101;
5620+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5621
5622 return 0;
5623 }
5624diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5625index 40b3ee98..8c2c112 100644
5626--- a/arch/m32r/include/asm/cache.h
5627+++ b/arch/m32r/include/asm/cache.h
5628@@ -1,8 +1,10 @@
5629 #ifndef _ASM_M32R_CACHE_H
5630 #define _ASM_M32R_CACHE_H
5631
5632+#include <linux/const.h>
5633+
5634 /* L1 cache line size */
5635 #define L1_CACHE_SHIFT 4
5636-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5637+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5638
5639 #endif /* _ASM_M32R_CACHE_H */
5640diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5641index 82abd15..d95ae5d 100644
5642--- a/arch/m32r/lib/usercopy.c
5643+++ b/arch/m32r/lib/usercopy.c
5644@@ -14,6 +14,9 @@
5645 unsigned long
5646 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5647 {
5648+ if ((long)n < 0)
5649+ return n;
5650+
5651 prefetch(from);
5652 if (access_ok(VERIFY_WRITE, to, n))
5653 __copy_user(to,from,n);
5654@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5655 unsigned long
5656 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5657 {
5658+ if ((long)n < 0)
5659+ return n;
5660+
5661 prefetchw(to);
5662 if (access_ok(VERIFY_READ, from, n))
5663 __copy_user_zeroing(to,from,n);
5664diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5665index 0395c51..5f26031 100644
5666--- a/arch/m68k/include/asm/cache.h
5667+++ b/arch/m68k/include/asm/cache.h
5668@@ -4,9 +4,11 @@
5669 #ifndef __ARCH_M68K_CACHE_H
5670 #define __ARCH_M68K_CACHE_H
5671
5672+#include <linux/const.h>
5673+
5674 /* bytes per L1 cache line */
5675 #define L1_CACHE_SHIFT 4
5676-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5677+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5678
5679 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5680
5681diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5682index d703d8e..a8e2d70 100644
5683--- a/arch/metag/include/asm/barrier.h
5684+++ b/arch/metag/include/asm/barrier.h
5685@@ -90,7 +90,7 @@ static inline void fence(void)
5686 do { \
5687 compiletime_assert_atomic_type(*p); \
5688 smp_mb(); \
5689- ACCESS_ONCE(*p) = (v); \
5690+ ACCESS_ONCE_RW(*p) = (v); \
5691 } while (0)
5692
5693 #define smp_load_acquire(p) \
5694diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5695index 3c32075..ae0ae75 100644
5696--- a/arch/metag/mm/hugetlbpage.c
5697+++ b/arch/metag/mm/hugetlbpage.c
5698@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5699 info.high_limit = TASK_SIZE;
5700 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5701 info.align_offset = 0;
5702+ info.threadstack_offset = 0;
5703 return vm_unmapped_area(&info);
5704 }
5705
5706diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5707index 4efe96a..60e8699 100644
5708--- a/arch/microblaze/include/asm/cache.h
5709+++ b/arch/microblaze/include/asm/cache.h
5710@@ -13,11 +13,12 @@
5711 #ifndef _ASM_MICROBLAZE_CACHE_H
5712 #define _ASM_MICROBLAZE_CACHE_H
5713
5714+#include <linux/const.h>
5715 #include <asm/registers.h>
5716
5717 #define L1_CACHE_SHIFT 5
5718 /* word-granular cache in microblaze */
5719-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5720+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5721
5722 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5723
5724diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5725index 843713c..b6a87b9 100644
5726--- a/arch/mips/Kconfig
5727+++ b/arch/mips/Kconfig
5728@@ -2439,6 +2439,7 @@ source "kernel/Kconfig.preempt"
5729
5730 config KEXEC
5731 bool "Kexec system call"
5732+ depends on !GRKERNSEC_KMEM
5733 help
5734 kexec is a system call that implements the ability to shutdown your
5735 current kernel, and to start another kernel. It is like a reboot
5736diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5737index 3778655..1dff0a9 100644
5738--- a/arch/mips/cavium-octeon/dma-octeon.c
5739+++ b/arch/mips/cavium-octeon/dma-octeon.c
5740@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5741 if (dma_release_from_coherent(dev, order, vaddr))
5742 return;
5743
5744- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5745+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5746 }
5747
5748 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5749diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5750index 857da84..3f4458b 100644
5751--- a/arch/mips/include/asm/atomic.h
5752+++ b/arch/mips/include/asm/atomic.h
5753@@ -22,15 +22,39 @@
5754 #include <asm/cmpxchg.h>
5755 #include <asm/war.h>
5756
5757+#ifdef CONFIG_GENERIC_ATOMIC64
5758+#include <asm-generic/atomic64.h>
5759+#endif
5760+
5761 #define ATOMIC_INIT(i) { (i) }
5762
5763+#ifdef CONFIG_64BIT
5764+#define _ASM_EXTABLE(from, to) \
5765+" .section __ex_table,\"a\"\n" \
5766+" .dword " #from ", " #to"\n" \
5767+" .previous\n"
5768+#else
5769+#define _ASM_EXTABLE(from, to) \
5770+" .section __ex_table,\"a\"\n" \
5771+" .word " #from ", " #to"\n" \
5772+" .previous\n"
5773+#endif
5774+
5775 /*
5776 * atomic_read - read atomic variable
5777 * @v: pointer of type atomic_t
5778 *
5779 * Atomically reads the value of @v.
5780 */
5781-#define atomic_read(v) ACCESS_ONCE((v)->counter)
5782+static inline int atomic_read(const atomic_t *v)
5783+{
5784+ return ACCESS_ONCE(v->counter);
5785+}
5786+
5787+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5788+{
5789+ return ACCESS_ONCE(v->counter);
5790+}
5791
5792 /*
5793 * atomic_set - set atomic variable
5794@@ -39,47 +63,77 @@
5795 *
5796 * Atomically sets the value of @v to @i.
5797 */
5798-#define atomic_set(v, i) ((v)->counter = (i))
5799+static inline void atomic_set(atomic_t *v, int i)
5800+{
5801+ v->counter = i;
5802+}
5803
5804-#define ATOMIC_OP(op, c_op, asm_op) \
5805-static __inline__ void atomic_##op(int i, atomic_t * v) \
5806+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5807+{
5808+ v->counter = i;
5809+}
5810+
5811+#ifdef CONFIG_PAX_REFCOUNT
5812+#define __OVERFLOW_POST \
5813+ " b 4f \n" \
5814+ " .set noreorder \n" \
5815+ "3: b 5f \n" \
5816+ " move %0, %1 \n" \
5817+ " .set reorder \n"
5818+#define __OVERFLOW_EXTABLE \
5819+ "3:\n" \
5820+ _ASM_EXTABLE(2b, 3b)
5821+#else
5822+#define __OVERFLOW_POST
5823+#define __OVERFLOW_EXTABLE
5824+#endif
5825+
5826+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
5827+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
5828 { \
5829 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
5830 int temp; \
5831 \
5832 __asm__ __volatile__( \
5833- " .set arch=r4000 \n" \
5834- "1: ll %0, %1 # atomic_" #op " \n" \
5835- " " #asm_op " %0, %2 \n" \
5836+ " .set mips3 \n" \
5837+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5838+ "2: " #asm_op " %0, %2 \n" \
5839 " sc %0, %1 \n" \
5840 " beqzl %0, 1b \n" \
5841+ extable \
5842 " .set mips0 \n" \
5843 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5844 : "Ir" (i)); \
5845 } else if (kernel_uses_llsc) { \
5846 int temp; \
5847 \
5848- do { \
5849- __asm__ __volatile__( \
5850- " .set arch=r4000 \n" \
5851- " ll %0, %1 # atomic_" #op "\n" \
5852- " " #asm_op " %0, %2 \n" \
5853- " sc %0, %1 \n" \
5854- " .set mips0 \n" \
5855- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5856- : "Ir" (i)); \
5857- } while (unlikely(!temp)); \
5858+ __asm__ __volatile__( \
5859+ " .set mips3 \n" \
5860+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
5861+ "2: " #asm_op " %0, %2 \n" \
5862+ " sc %0, %1 \n" \
5863+ " beqz %0, 1b \n" \
5864+ extable \
5865+ " .set mips0 \n" \
5866+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
5867+ : "Ir" (i)); \
5868 } else { \
5869 unsigned long flags; \
5870 \
5871 raw_local_irq_save(flags); \
5872- v->counter c_op i; \
5873+ __asm__ __volatile__( \
5874+ "2: " #asm_op " %0, %1 \n" \
5875+ extable \
5876+ : "+r" (v->counter) : "Ir" (i)); \
5877 raw_local_irq_restore(flags); \
5878 } \
5879 }
5880
5881-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
5882-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5883+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , asm_op##u) \
5884+ __ATOMIC_OP(op, _unchecked, asm_op)
5885+
5886+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
5887+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
5888 { \
5889 int result; \
5890 \
5891@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5892 int temp; \
5893 \
5894 __asm__ __volatile__( \
5895- " .set arch=r4000 \n" \
5896- "1: ll %1, %2 # atomic_" #op "_return \n" \
5897- " " #asm_op " %0, %1, %3 \n" \
5898+ " .set mips3 \n" \
5899+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
5900+ "2: " #asm_op " %0, %1, %3 \n" \
5901 " sc %0, %2 \n" \
5902 " beqzl %0, 1b \n" \
5903- " " #asm_op " %0, %1, %3 \n" \
5904+ post_op \
5905+ extable \
5906+ "4: " #asm_op " %0, %1, %3 \n" \
5907+ "5: \n" \
5908 " .set mips0 \n" \
5909 : "=&r" (result), "=&r" (temp), \
5910 "+" GCC_OFF12_ASM() (v->counter) \
5911@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5912 } else if (kernel_uses_llsc) { \
5913 int temp; \
5914 \
5915- do { \
5916- __asm__ __volatile__( \
5917- " .set arch=r4000 \n" \
5918- " ll %1, %2 # atomic_" #op "_return \n" \
5919- " " #asm_op " %0, %1, %3 \n" \
5920- " sc %0, %2 \n" \
5921- " .set mips0 \n" \
5922- : "=&r" (result), "=&r" (temp), \
5923- "+" GCC_OFF12_ASM() (v->counter) \
5924- : "Ir" (i)); \
5925- } while (unlikely(!result)); \
5926+ __asm__ __volatile__( \
5927+ " .set mips3 \n" \
5928+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
5929+ "2: " #asm_op " %0, %1, %3 \n" \
5930+ " sc %0, %2 \n" \
5931+ post_op \
5932+ extable \
5933+ "4: " #asm_op " %0, %1, %3 \n" \
5934+ "5: \n" \
5935+ " .set mips0 \n" \
5936+ : "=&r" (result), "=&r" (temp), \
5937+ "+" GCC_OFF12_ASM() (v->counter) \
5938+ : "Ir" (i)); \
5939 \
5940 result = temp; result c_op i; \
5941 } else { \
5942 unsigned long flags; \
5943 \
5944 raw_local_irq_save(flags); \
5945- result = v->counter; \
5946- result c_op i; \
5947- v->counter = result; \
5948+ __asm__ __volatile__( \
5949+ " lw %0, %1 \n" \
5950+ "2: " #asm_op " %0, %1, %2 \n" \
5951+ " sw %0, %1 \n" \
5952+ "3: \n" \
5953+ extable \
5954+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
5955+ : "Ir" (i)); \
5956 raw_local_irq_restore(flags); \
5957 } \
5958 \
5959@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
5960 return result; \
5961 }
5962
5963-#define ATOMIC_OPS(op, c_op, asm_op) \
5964- ATOMIC_OP(op, c_op, asm_op) \
5965- ATOMIC_OP_RETURN(op, c_op, asm_op)
5966+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
5967+ __ATOMIC_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
5968
5969-ATOMIC_OPS(add, +=, addu)
5970-ATOMIC_OPS(sub, -=, subu)
5971+#define ATOMIC_OPS(op, asm_op) \
5972+ ATOMIC_OP(op, asm_op) \
5973+ ATOMIC_OP_RETURN(op, asm_op)
5974+
5975+ATOMIC_OPS(add, add)
5976+ATOMIC_OPS(sub, sub)
5977
5978 #undef ATOMIC_OPS
5979 #undef ATOMIC_OP_RETURN
5980+#undef __ATOMIC_OP_RETURN
5981 #undef ATOMIC_OP
5982+#undef __ATOMIC_OP
5983
5984 /*
5985 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
5986@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
5987 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5988 * The function returns the old value of @v minus @i.
5989 */
5990-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5991+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5992 {
5993 int result;
5994
5995@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5996 return result;
5997 }
5998
5999-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6000-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6001+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6002+{
6003+ return cmpxchg(&v->counter, old, new);
6004+}
6005+
6006+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6007+ int new)
6008+{
6009+ return cmpxchg(&(v->counter), old, new);
6010+}
6011+
6012+static inline int atomic_xchg(atomic_t *v, int new)
6013+{
6014+ return xchg(&v->counter, new);
6015+}
6016+
6017+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6018+{
6019+ return xchg(&(v->counter), new);
6020+}
6021
6022 /**
6023 * __atomic_add_unless - add unless the number is a given value
6024@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6025
6026 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6027 #define atomic_inc_return(v) atomic_add_return(1, (v))
6028+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6029+{
6030+ return atomic_add_return_unchecked(1, v);
6031+}
6032
6033 /*
6034 * atomic_sub_and_test - subtract value from variable and test result
6035@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6036 * other cases.
6037 */
6038 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6039+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6040+{
6041+ return atomic_add_return_unchecked(1, v) == 0;
6042+}
6043
6044 /*
6045 * atomic_dec_and_test - decrement by 1 and test
6046@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6047 * Atomically increments @v by 1.
6048 */
6049 #define atomic_inc(v) atomic_add(1, (v))
6050+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6051+{
6052+ atomic_add_unchecked(1, v);
6053+}
6054
6055 /*
6056 * atomic_dec - decrement and test
6057@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6058 * Atomically decrements @v by 1.
6059 */
6060 #define atomic_dec(v) atomic_sub(1, (v))
6061+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6062+{
6063+ atomic_sub_unchecked(1, v);
6064+}
6065
6066 /*
6067 * atomic_add_negative - add and test if negative
6068@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6069 * @v: pointer of type atomic64_t
6070 *
6071 */
6072-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6073+static inline long atomic64_read(const atomic64_t *v)
6074+{
6075+ return ACCESS_ONCE(v->counter);
6076+}
6077+
6078+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6079+{
6080+ return ACCESS_ONCE(v->counter);
6081+}
6082
6083 /*
6084 * atomic64_set - set atomic variable
6085 * @v: pointer of type atomic64_t
6086 * @i: required value
6087 */
6088-#define atomic64_set(v, i) ((v)->counter = (i))
6089+static inline void atomic64_set(atomic64_t *v, long i)
6090+{
6091+ v->counter = i;
6092+}
6093
6094-#define ATOMIC64_OP(op, c_op, asm_op) \
6095-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6096+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6097+{
6098+ v->counter = i;
6099+}
6100+
6101+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6102+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6103 { \
6104 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6105 long temp; \
6106 \
6107 __asm__ __volatile__( \
6108- " .set arch=r4000 \n" \
6109- "1: lld %0, %1 # atomic64_" #op " \n" \
6110- " " #asm_op " %0, %2 \n" \
6111+ " .set mips3 \n" \
6112+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6113+ "2: " #asm_op " %0, %2 \n" \
6114 " scd %0, %1 \n" \
6115 " beqzl %0, 1b \n" \
6116+ extable \
6117 " .set mips0 \n" \
6118 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6119 : "Ir" (i)); \
6120 } else if (kernel_uses_llsc) { \
6121 long temp; \
6122 \
6123- do { \
6124- __asm__ __volatile__( \
6125- " .set arch=r4000 \n" \
6126- " lld %0, %1 # atomic64_" #op "\n" \
6127- " " #asm_op " %0, %2 \n" \
6128- " scd %0, %1 \n" \
6129- " .set mips0 \n" \
6130- : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6131- : "Ir" (i)); \
6132- } while (unlikely(!temp)); \
6133+ __asm__ __volatile__( \
6134+ " .set mips3 \n" \
6135+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6136+ "2: " #asm_op " %0, %2 \n" \
6137+ " scd %0, %1 \n" \
6138+ " beqz %0, 1b \n" \
6139+ extable \
6140+ " .set mips0 \n" \
6141+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
6142+ : "Ir" (i)); \
6143 } else { \
6144 unsigned long flags; \
6145 \
6146 raw_local_irq_save(flags); \
6147- v->counter c_op i; \
6148+ __asm__ __volatile__( \
6149+ "2: " #asm_op " %0, %1 \n" \
6150+ extable \
6151+ : "+" GCC_OFF12_ASM() (v->counter) : "Ir" (i)); \
6152 raw_local_irq_restore(flags); \
6153 } \
6154 }
6155
6156-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6157-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6158+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , asm_op##u) \
6159+ __ATOMIC64_OP(op, _unchecked, asm_op)
6160+
6161+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6162+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6163 { \
6164 long result; \
6165 \
6166@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6167 long temp; \
6168 \
6169 __asm__ __volatile__( \
6170- " .set arch=r4000 \n" \
6171+ " .set mips3 \n" \
6172 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6173- " " #asm_op " %0, %1, %3 \n" \
6174+ "2: " #asm_op " %0, %1, %3 \n" \
6175 " scd %0, %2 \n" \
6176 " beqzl %0, 1b \n" \
6177- " " #asm_op " %0, %1, %3 \n" \
6178+ post_op \
6179+ extable \
6180+ "4: " #asm_op " %0, %1, %3 \n" \
6181+ "5: \n" \
6182 " .set mips0 \n" \
6183 : "=&r" (result), "=&r" (temp), \
6184 "+" GCC_OFF12_ASM() (v->counter) \
6185@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6186 } else if (kernel_uses_llsc) { \
6187 long temp; \
6188 \
6189- do { \
6190- __asm__ __volatile__( \
6191- " .set arch=r4000 \n" \
6192- " lld %1, %2 # atomic64_" #op "_return\n" \
6193- " " #asm_op " %0, %1, %3 \n" \
6194- " scd %0, %2 \n" \
6195- " .set mips0 \n" \
6196- : "=&r" (result), "=&r" (temp), \
6197- "=" GCC_OFF12_ASM() (v->counter) \
6198- : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6199- : "memory"); \
6200- } while (unlikely(!result)); \
6201+ __asm__ __volatile__( \
6202+ " .set mips3 \n" \
6203+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6204+ "2: " #asm_op " %0, %1, %3 \n" \
6205+ " scd %0, %2 \n" \
6206+ " beqz %0, 1b \n" \
6207+ post_op \
6208+ extable \
6209+ "4: " #asm_op " %0, %1, %3 \n" \
6210+ "5: \n" \
6211+ " .set mips0 \n" \
6212+ : "=&r" (result), "=&r" (temp), \
6213+ "=" GCC_OFF12_ASM() (v->counter) \
6214+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
6215+ : "memory"); \
6216 \
6217 result = temp; result c_op i; \
6218 } else { \
6219 unsigned long flags; \
6220 \
6221 raw_local_irq_save(flags); \
6222- result = v->counter; \
6223- result c_op i; \
6224- v->counter = result; \
6225+ __asm__ __volatile__( \
6226+ " ld %0, %1 \n" \
6227+ "2: " #asm_op " %0, %1, %2 \n" \
6228+ " sd %0, %1 \n" \
6229+ "3: \n" \
6230+ extable \
6231+ : "=&r" (result), "+" GCC_OFF12_ASM() (v->counter) \
6232+ : "Ir" (i)); \
6233 raw_local_irq_restore(flags); \
6234 } \
6235 \
6236@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6237 return result; \
6238 }
6239
6240-#define ATOMIC64_OPS(op, c_op, asm_op) \
6241- ATOMIC64_OP(op, c_op, asm_op) \
6242- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6243+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , asm_op##u, , __OVERFLOW_EXTABLE) \
6244+ __ATOMIC64_OP_RETURN(op, _unchecked, asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6245
6246-ATOMIC64_OPS(add, +=, daddu)
6247-ATOMIC64_OPS(sub, -=, dsubu)
6248+#define ATOMIC64_OPS(op, asm_op) \
6249+ ATOMIC64_OP(op, asm_op) \
6250+ ATOMIC64_OP_RETURN(op, asm_op)
6251+
6252+ATOMIC64_OPS(add, dadd)
6253+ATOMIC64_OPS(sub, dsub)
6254
6255 #undef ATOMIC64_OPS
6256 #undef ATOMIC64_OP_RETURN
6257+#undef __ATOMIC64_OP_RETURN
6258 #undef ATOMIC64_OP
6259+#undef __ATOMIC64_OP
6260+#undef __OVERFLOW_EXTABLE
6261+#undef __OVERFLOW_POST
6262
6263 /*
6264 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6265@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6266 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6267 * The function returns the old value of @v minus @i.
6268 */
6269-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6270+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6271 {
6272 long result;
6273
6274@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6275 return result;
6276 }
6277
6278-#define atomic64_cmpxchg(v, o, n) \
6279- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6280-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6281+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6282+{
6283+ return cmpxchg(&v->counter, old, new);
6284+}
6285+
6286+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6287+ long new)
6288+{
6289+ return cmpxchg(&(v->counter), old, new);
6290+}
6291+
6292+static inline long atomic64_xchg(atomic64_t *v, long new)
6293+{
6294+ return xchg(&v->counter, new);
6295+}
6296+
6297+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6298+{
6299+ return xchg(&(v->counter), new);
6300+}
6301
6302 /**
6303 * atomic64_add_unless - add unless the number is a given value
6304@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6305
6306 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6307 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6308+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6309
6310 /*
6311 * atomic64_sub_and_test - subtract value from variable and test result
6312@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6313 * other cases.
6314 */
6315 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6316+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6317
6318 /*
6319 * atomic64_dec_and_test - decrement by 1 and test
6320@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6321 * Atomically increments @v by 1.
6322 */
6323 #define atomic64_inc(v) atomic64_add(1, (v))
6324+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6325
6326 /*
6327 * atomic64_dec - decrement and test
6328@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6329 * Atomically decrements @v by 1.
6330 */
6331 #define atomic64_dec(v) atomic64_sub(1, (v))
6332+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6333
6334 /*
6335 * atomic64_add_negative - add and test if negative
6336diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6337index 2b8bbbc..4556df6 100644
6338--- a/arch/mips/include/asm/barrier.h
6339+++ b/arch/mips/include/asm/barrier.h
6340@@ -133,7 +133,7 @@
6341 do { \
6342 compiletime_assert_atomic_type(*p); \
6343 smp_mb(); \
6344- ACCESS_ONCE(*p) = (v); \
6345+ ACCESS_ONCE_RW(*p) = (v); \
6346 } while (0)
6347
6348 #define smp_load_acquire(p) \
6349diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6350index b4db69f..8f3b093 100644
6351--- a/arch/mips/include/asm/cache.h
6352+++ b/arch/mips/include/asm/cache.h
6353@@ -9,10 +9,11 @@
6354 #ifndef _ASM_CACHE_H
6355 #define _ASM_CACHE_H
6356
6357+#include <linux/const.h>
6358 #include <kmalloc.h>
6359
6360 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6361-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6362+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6363
6364 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6365 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6366diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6367index eb4d95d..f2f7f93 100644
6368--- a/arch/mips/include/asm/elf.h
6369+++ b/arch/mips/include/asm/elf.h
6370@@ -405,15 +405,18 @@ extern const char *__elf_platform;
6371 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6372 #endif
6373
6374+#ifdef CONFIG_PAX_ASLR
6375+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6376+
6377+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6378+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6379+#endif
6380+
6381 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6382 struct linux_binprm;
6383 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6384 int uses_interp);
6385
6386-struct mm_struct;
6387-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6388-#define arch_randomize_brk arch_randomize_brk
6389-
6390 struct arch_elf_state {
6391 int fp_abi;
6392 int interp_fp_abi;
6393diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6394index c1f6afa..38cc6e9 100644
6395--- a/arch/mips/include/asm/exec.h
6396+++ b/arch/mips/include/asm/exec.h
6397@@ -12,6 +12,6 @@
6398 #ifndef _ASM_EXEC_H
6399 #define _ASM_EXEC_H
6400
6401-extern unsigned long arch_align_stack(unsigned long sp);
6402+#define arch_align_stack(x) ((x) & ~0xfUL)
6403
6404 #endif /* _ASM_EXEC_H */
6405diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6406index 9e8ef59..1139d6b 100644
6407--- a/arch/mips/include/asm/hw_irq.h
6408+++ b/arch/mips/include/asm/hw_irq.h
6409@@ -10,7 +10,7 @@
6410
6411 #include <linux/atomic.h>
6412
6413-extern atomic_t irq_err_count;
6414+extern atomic_unchecked_t irq_err_count;
6415
6416 /*
6417 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6418diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6419index 46dfc3c..a16b13a 100644
6420--- a/arch/mips/include/asm/local.h
6421+++ b/arch/mips/include/asm/local.h
6422@@ -12,15 +12,25 @@ typedef struct
6423 atomic_long_t a;
6424 } local_t;
6425
6426+typedef struct {
6427+ atomic_long_unchecked_t a;
6428+} local_unchecked_t;
6429+
6430 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6431
6432 #define local_read(l) atomic_long_read(&(l)->a)
6433+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6434 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6435+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6436
6437 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6438+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6439 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6440+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6441 #define local_inc(l) atomic_long_inc(&(l)->a)
6442+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6443 #define local_dec(l) atomic_long_dec(&(l)->a)
6444+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6445
6446 /*
6447 * Same as above, but return the result value
6448@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6449 return result;
6450 }
6451
6452+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6453+{
6454+ unsigned long result;
6455+
6456+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6457+ unsigned long temp;
6458+
6459+ __asm__ __volatile__(
6460+ " .set mips3 \n"
6461+ "1:" __LL "%1, %2 # local_add_return \n"
6462+ " addu %0, %1, %3 \n"
6463+ __SC "%0, %2 \n"
6464+ " beqzl %0, 1b \n"
6465+ " addu %0, %1, %3 \n"
6466+ " .set mips0 \n"
6467+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6468+ : "Ir" (i), "m" (l->a.counter)
6469+ : "memory");
6470+ } else if (kernel_uses_llsc) {
6471+ unsigned long temp;
6472+
6473+ __asm__ __volatile__(
6474+ " .set mips3 \n"
6475+ "1:" __LL "%1, %2 # local_add_return \n"
6476+ " addu %0, %1, %3 \n"
6477+ __SC "%0, %2 \n"
6478+ " beqz %0, 1b \n"
6479+ " addu %0, %1, %3 \n"
6480+ " .set mips0 \n"
6481+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6482+ : "Ir" (i), "m" (l->a.counter)
6483+ : "memory");
6484+ } else {
6485+ unsigned long flags;
6486+
6487+ local_irq_save(flags);
6488+ result = l->a.counter;
6489+ result += i;
6490+ l->a.counter = result;
6491+ local_irq_restore(flags);
6492+ }
6493+
6494+ return result;
6495+}
6496+
6497 static __inline__ long local_sub_return(long i, local_t * l)
6498 {
6499 unsigned long result;
6500@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6501
6502 #define local_cmpxchg(l, o, n) \
6503 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6504+#define local_cmpxchg_unchecked(l, o, n) \
6505+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6506 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6507
6508 /**
6509diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6510index 154b70a..426ae3d 100644
6511--- a/arch/mips/include/asm/page.h
6512+++ b/arch/mips/include/asm/page.h
6513@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6514 #ifdef CONFIG_CPU_MIPS32
6515 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6516 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6517- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6518+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6519 #else
6520 typedef struct { unsigned long long pte; } pte_t;
6521 #define pte_val(x) ((x).pte)
6522diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6523index b336037..5b874cc 100644
6524--- a/arch/mips/include/asm/pgalloc.h
6525+++ b/arch/mips/include/asm/pgalloc.h
6526@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6527 {
6528 set_pud(pud, __pud((unsigned long)pmd));
6529 }
6530+
6531+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6532+{
6533+ pud_populate(mm, pud, pmd);
6534+}
6535 #endif
6536
6537 /*
6538diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6539index 845016d..3303268 100644
6540--- a/arch/mips/include/asm/pgtable.h
6541+++ b/arch/mips/include/asm/pgtable.h
6542@@ -20,6 +20,9 @@
6543 #include <asm/io.h>
6544 #include <asm/pgtable-bits.h>
6545
6546+#define ktla_ktva(addr) (addr)
6547+#define ktva_ktla(addr) (addr)
6548+
6549 struct mm_struct;
6550 struct vm_area_struct;
6551
6552diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6553index e4440f9..8fb0005 100644
6554--- a/arch/mips/include/asm/thread_info.h
6555+++ b/arch/mips/include/asm/thread_info.h
6556@@ -106,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6557 #define TIF_SECCOMP 4 /* secure computing */
6558 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6559 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6560+/* li takes a 32bit immediate */
6561+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6562+
6563 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6564 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6565 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6566@@ -141,14 +144,16 @@ static inline struct thread_info *current_thread_info(void)
6567 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6568 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6569 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6570+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6571
6572 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6573 _TIF_SYSCALL_AUDIT | \
6574- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6575+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6576+ _TIF_GRSEC_SETXID)
6577
6578 /* work to do in syscall_trace_leave() */
6579 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6580- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6581+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6582
6583 /* work to do on interrupt/exception return */
6584 #define _TIF_WORK_MASK \
6585@@ -156,7 +161,7 @@ static inline struct thread_info *current_thread_info(void)
6586 /* work to do on any return to u-space */
6587 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6588 _TIF_WORK_SYSCALL_EXIT | \
6589- _TIF_SYSCALL_TRACEPOINT)
6590+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6591
6592 /*
6593 * We stash processor id into a COP0 register to retrieve it fast
6594diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6595index bf8b324..cec5705 100644
6596--- a/arch/mips/include/asm/uaccess.h
6597+++ b/arch/mips/include/asm/uaccess.h
6598@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6599 __ok == 0; \
6600 })
6601
6602+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6603 #define access_ok(type, addr, size) \
6604 likely(__access_ok((addr), (size), __access_mask))
6605
6606diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6607index 1188e00..41cf144 100644
6608--- a/arch/mips/kernel/binfmt_elfn32.c
6609+++ b/arch/mips/kernel/binfmt_elfn32.c
6610@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6611 #undef ELF_ET_DYN_BASE
6612 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6613
6614+#ifdef CONFIG_PAX_ASLR
6615+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6616+
6617+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6618+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6619+#endif
6620+
6621 #include <asm/processor.h>
6622 #include <linux/module.h>
6623 #include <linux/elfcore.h>
6624diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6625index 9287678..f870e47 100644
6626--- a/arch/mips/kernel/binfmt_elfo32.c
6627+++ b/arch/mips/kernel/binfmt_elfo32.c
6628@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6629 #undef ELF_ET_DYN_BASE
6630 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6631
6632+#ifdef CONFIG_PAX_ASLR
6633+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6634+
6635+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6636+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6637+#endif
6638+
6639 #include <asm/processor.h>
6640
6641 #include <linux/module.h>
6642diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6643index a74ec3a..4f06f18 100644
6644--- a/arch/mips/kernel/i8259.c
6645+++ b/arch/mips/kernel/i8259.c
6646@@ -202,7 +202,7 @@ spurious_8259A_irq:
6647 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6648 spurious_irq_mask |= irqmask;
6649 }
6650- atomic_inc(&irq_err_count);
6651+ atomic_inc_unchecked(&irq_err_count);
6652 /*
6653 * Theoretically we do not have to handle this IRQ,
6654 * but in Linux this does not cause problems and is
6655diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6656index 44a1f79..2bd6aa3 100644
6657--- a/arch/mips/kernel/irq-gt641xx.c
6658+++ b/arch/mips/kernel/irq-gt641xx.c
6659@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6660 }
6661 }
6662
6663- atomic_inc(&irq_err_count);
6664+ atomic_inc_unchecked(&irq_err_count);
6665 }
6666
6667 void __init gt641xx_irq_init(void)
6668diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6669index d2bfbc2..a8eacd2 100644
6670--- a/arch/mips/kernel/irq.c
6671+++ b/arch/mips/kernel/irq.c
6672@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
6673 printk("unexpected IRQ # %d\n", irq);
6674 }
6675
6676-atomic_t irq_err_count;
6677+atomic_unchecked_t irq_err_count;
6678
6679 int arch_show_interrupts(struct seq_file *p, int prec)
6680 {
6681- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6682+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6683 return 0;
6684 }
6685
6686 asmlinkage void spurious_interrupt(void)
6687 {
6688- atomic_inc(&irq_err_count);
6689+ atomic_inc_unchecked(&irq_err_count);
6690 }
6691
6692 void __init init_IRQ(void)
6693@@ -109,7 +109,10 @@ void __init init_IRQ(void)
6694 #endif
6695 }
6696
6697+
6698 #ifdef DEBUG_STACKOVERFLOW
6699+extern void gr_handle_kernel_exploit(void);
6700+
6701 static inline void check_stack_overflow(void)
6702 {
6703 unsigned long sp;
6704@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
6705 printk("do_IRQ: stack overflow: %ld\n",
6706 sp - sizeof(struct thread_info));
6707 dump_stack();
6708+ gr_handle_kernel_exploit();
6709 }
6710 }
6711 #else
6712diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6713index 0614717..002fa43 100644
6714--- a/arch/mips/kernel/pm-cps.c
6715+++ b/arch/mips/kernel/pm-cps.c
6716@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6717 nc_core_ready_count = nc_addr;
6718
6719 /* Ensure ready_count is zero-initialised before the assembly runs */
6720- ACCESS_ONCE(*nc_core_ready_count) = 0;
6721+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6722 coupled_barrier(&per_cpu(pm_barrier, core), online);
6723
6724 /* Run the generated entry code */
6725diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6726index 85bff5d..39bc202 100644
6727--- a/arch/mips/kernel/process.c
6728+++ b/arch/mips/kernel/process.c
6729@@ -534,18 +534,6 @@ out:
6730 return pc;
6731 }
6732
6733-/*
6734- * Don't forget that the stack pointer must be aligned on a 8 bytes
6735- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6736- */
6737-unsigned long arch_align_stack(unsigned long sp)
6738-{
6739- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6740- sp -= get_random_int() & ~PAGE_MASK;
6741-
6742- return sp & ALMASK;
6743-}
6744-
6745 static void arch_dump_stack(void *info)
6746 {
6747 struct pt_regs *regs;
6748diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6749index 5104528..950bbdc 100644
6750--- a/arch/mips/kernel/ptrace.c
6751+++ b/arch/mips/kernel/ptrace.c
6752@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
6753 return ret;
6754 }
6755
6756+#ifdef CONFIG_GRKERNSEC_SETXID
6757+extern void gr_delayed_cred_worker(void);
6758+#endif
6759+
6760 /*
6761 * Notification of system call entry/exit
6762 * - triggered by current->work.syscall_trace
6763@@ -779,6 +783,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
6764 tracehook_report_syscall_entry(regs))
6765 ret = -1;
6766
6767+#ifdef CONFIG_GRKERNSEC_SETXID
6768+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6769+ gr_delayed_cred_worker();
6770+#endif
6771+
6772 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6773 trace_sys_enter(regs, regs->regs[2]);
6774
6775diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
6776index 07fc524..b9d7f28 100644
6777--- a/arch/mips/kernel/reset.c
6778+++ b/arch/mips/kernel/reset.c
6779@@ -13,6 +13,7 @@
6780 #include <linux/reboot.h>
6781
6782 #include <asm/reboot.h>
6783+#include <asm/bug.h>
6784
6785 /*
6786 * Urgs ... Too many MIPS machines to handle this in a generic way.
6787@@ -29,16 +30,19 @@ void machine_restart(char *command)
6788 {
6789 if (_machine_restart)
6790 _machine_restart(command);
6791+ BUG();
6792 }
6793
6794 void machine_halt(void)
6795 {
6796 if (_machine_halt)
6797 _machine_halt();
6798+ BUG();
6799 }
6800
6801 void machine_power_off(void)
6802 {
6803 if (pm_power_off)
6804 pm_power_off();
6805+ BUG();
6806 }
6807diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6808index 2242bdd..b284048 100644
6809--- a/arch/mips/kernel/sync-r4k.c
6810+++ b/arch/mips/kernel/sync-r4k.c
6811@@ -18,8 +18,8 @@
6812 #include <asm/mipsregs.h>
6813
6814 static atomic_t count_start_flag = ATOMIC_INIT(0);
6815-static atomic_t count_count_start = ATOMIC_INIT(0);
6816-static atomic_t count_count_stop = ATOMIC_INIT(0);
6817+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6818+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6819 static atomic_t count_reference = ATOMIC_INIT(0);
6820
6821 #define COUNTON 100
6822@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
6823
6824 for (i = 0; i < NR_LOOPS; i++) {
6825 /* slaves loop on '!= 2' */
6826- while (atomic_read(&count_count_start) != 1)
6827+ while (atomic_read_unchecked(&count_count_start) != 1)
6828 mb();
6829- atomic_set(&count_count_stop, 0);
6830+ atomic_set_unchecked(&count_count_stop, 0);
6831 smp_wmb();
6832
6833 /* this lets the slaves write their count register */
6834- atomic_inc(&count_count_start);
6835+ atomic_inc_unchecked(&count_count_start);
6836
6837 /*
6838 * Everyone initialises count in the last loop:
6839@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
6840 /*
6841 * Wait for all slaves to leave the synchronization point:
6842 */
6843- while (atomic_read(&count_count_stop) != 1)
6844+ while (atomic_read_unchecked(&count_count_stop) != 1)
6845 mb();
6846- atomic_set(&count_count_start, 0);
6847+ atomic_set_unchecked(&count_count_start, 0);
6848 smp_wmb();
6849- atomic_inc(&count_count_stop);
6850+ atomic_inc_unchecked(&count_count_stop);
6851 }
6852 /* Arrange for an interrupt in a short while */
6853 write_c0_compare(read_c0_count() + COUNTON);
6854@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
6855 initcount = atomic_read(&count_reference);
6856
6857 for (i = 0; i < NR_LOOPS; i++) {
6858- atomic_inc(&count_count_start);
6859- while (atomic_read(&count_count_start) != 2)
6860+ atomic_inc_unchecked(&count_count_start);
6861+ while (atomic_read_unchecked(&count_count_start) != 2)
6862 mb();
6863
6864 /*
6865@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
6866 if (i == NR_LOOPS-1)
6867 write_c0_count(initcount);
6868
6869- atomic_inc(&count_count_stop);
6870- while (atomic_read(&count_count_stop) != 2)
6871+ atomic_inc_unchecked(&count_count_stop);
6872+ while (atomic_read_unchecked(&count_count_stop) != 2)
6873 mb();
6874 }
6875 /* Arrange for an interrupt in a short while */
6876diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6877index c3b41e2..46c32e9 100644
6878--- a/arch/mips/kernel/traps.c
6879+++ b/arch/mips/kernel/traps.c
6880@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6881 siginfo_t info;
6882
6883 prev_state = exception_enter();
6884- die_if_kernel("Integer overflow", regs);
6885+ if (unlikely(!user_mode(regs))) {
6886+
6887+#ifdef CONFIG_PAX_REFCOUNT
6888+ if (fixup_exception(regs)) {
6889+ pax_report_refcount_overflow(regs);
6890+ exception_exit(prev_state);
6891+ return;
6892+ }
6893+#endif
6894+
6895+ die("Integer overflow", regs);
6896+ }
6897
6898 info.si_code = FPE_INTOVF;
6899 info.si_signo = SIGFPE;
6900diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
6901index 270bbd4..c01932a 100644
6902--- a/arch/mips/kvm/mips.c
6903+++ b/arch/mips/kvm/mips.c
6904@@ -815,7 +815,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6905 return r;
6906 }
6907
6908-int kvm_arch_init(void *opaque)
6909+int kvm_arch_init(const void *opaque)
6910 {
6911 if (kvm_mips_callbacks) {
6912 kvm_err("kvm: module already exists\n");
6913diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6914index 70ab5d6..62940fe 100644
6915--- a/arch/mips/mm/fault.c
6916+++ b/arch/mips/mm/fault.c
6917@@ -28,6 +28,23 @@
6918 #include <asm/highmem.h> /* For VMALLOC_END */
6919 #include <linux/kdebug.h>
6920
6921+#ifdef CONFIG_PAX_PAGEEXEC
6922+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6923+{
6924+ unsigned long i;
6925+
6926+ printk(KERN_ERR "PAX: bytes at PC: ");
6927+ for (i = 0; i < 5; i++) {
6928+ unsigned int c;
6929+ if (get_user(c, (unsigned int *)pc+i))
6930+ printk(KERN_CONT "???????? ");
6931+ else
6932+ printk(KERN_CONT "%08x ", c);
6933+ }
6934+ printk("\n");
6935+}
6936+#endif
6937+
6938 /*
6939 * This routine handles page faults. It determines the address,
6940 * and the problem, and then passes it off to one of the appropriate
6941@@ -201,6 +218,14 @@ bad_area:
6942 bad_area_nosemaphore:
6943 /* User mode accesses just cause a SIGSEGV */
6944 if (user_mode(regs)) {
6945+
6946+#ifdef CONFIG_PAX_PAGEEXEC
6947+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6948+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6949+ do_group_exit(SIGKILL);
6950+ }
6951+#endif
6952+
6953 tsk->thread.cp0_badvaddr = address;
6954 tsk->thread.error_code = write;
6955 #if 0
6956diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6957index f1baadd..5472dca 100644
6958--- a/arch/mips/mm/mmap.c
6959+++ b/arch/mips/mm/mmap.c
6960@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6961 struct vm_area_struct *vma;
6962 unsigned long addr = addr0;
6963 int do_color_align;
6964+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6965 struct vm_unmapped_area_info info;
6966
6967 if (unlikely(len > TASK_SIZE))
6968@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6969 do_color_align = 1;
6970
6971 /* requesting a specific address */
6972+
6973+#ifdef CONFIG_PAX_RANDMMAP
6974+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6975+#endif
6976+
6977 if (addr) {
6978 if (do_color_align)
6979 addr = COLOUR_ALIGN(addr, pgoff);
6980@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6981 addr = PAGE_ALIGN(addr);
6982
6983 vma = find_vma(mm, addr);
6984- if (TASK_SIZE - len >= addr &&
6985- (!vma || addr + len <= vma->vm_start))
6986+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
6987 return addr;
6988 }
6989
6990 info.length = len;
6991 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6992 info.align_offset = pgoff << PAGE_SHIFT;
6993+ info.threadstack_offset = offset;
6994
6995 if (dir == DOWN) {
6996 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6997@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6998 {
6999 unsigned long random_factor = 0UL;
7000
7001+#ifdef CONFIG_PAX_RANDMMAP
7002+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7003+#endif
7004+
7005 if (current->flags & PF_RANDOMIZE) {
7006 random_factor = get_random_int();
7007 random_factor = random_factor << PAGE_SHIFT;
7008@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7009
7010 if (mmap_is_legacy()) {
7011 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7012+
7013+#ifdef CONFIG_PAX_RANDMMAP
7014+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7015+ mm->mmap_base += mm->delta_mmap;
7016+#endif
7017+
7018 mm->get_unmapped_area = arch_get_unmapped_area;
7019 } else {
7020 mm->mmap_base = mmap_base(random_factor);
7021+
7022+#ifdef CONFIG_PAX_RANDMMAP
7023+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7024+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7025+#endif
7026+
7027 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7028 }
7029 }
7030
7031-static inline unsigned long brk_rnd(void)
7032-{
7033- unsigned long rnd = get_random_int();
7034-
7035- rnd = rnd << PAGE_SHIFT;
7036- /* 8MB for 32bit, 256MB for 64bit */
7037- if (TASK_IS_32BIT_ADDR)
7038- rnd = rnd & 0x7ffffful;
7039- else
7040- rnd = rnd & 0xffffffful;
7041-
7042- return rnd;
7043-}
7044-
7045-unsigned long arch_randomize_brk(struct mm_struct *mm)
7046-{
7047- unsigned long base = mm->brk;
7048- unsigned long ret;
7049-
7050- ret = PAGE_ALIGN(base + brk_rnd());
7051-
7052- if (ret < mm->brk)
7053- return mm->brk;
7054-
7055- return ret;
7056-}
7057-
7058 int __virt_addr_valid(const volatile void *kaddr)
7059 {
7060 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7061diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7062index d07e041..bedb72b 100644
7063--- a/arch/mips/pci/pci-octeon.c
7064+++ b/arch/mips/pci/pci-octeon.c
7065@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7066
7067
7068 static struct pci_ops octeon_pci_ops = {
7069- octeon_read_config,
7070- octeon_write_config,
7071+ .read = octeon_read_config,
7072+ .write = octeon_write_config,
7073 };
7074
7075 static struct resource octeon_pci_mem_resource = {
7076diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7077index 5e36c33..eb4a17b 100644
7078--- a/arch/mips/pci/pcie-octeon.c
7079+++ b/arch/mips/pci/pcie-octeon.c
7080@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7081 }
7082
7083 static struct pci_ops octeon_pcie0_ops = {
7084- octeon_pcie0_read_config,
7085- octeon_pcie0_write_config,
7086+ .read = octeon_pcie0_read_config,
7087+ .write = octeon_pcie0_write_config,
7088 };
7089
7090 static struct resource octeon_pcie0_mem_resource = {
7091@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7092 };
7093
7094 static struct pci_ops octeon_pcie1_ops = {
7095- octeon_pcie1_read_config,
7096- octeon_pcie1_write_config,
7097+ .read = octeon_pcie1_read_config,
7098+ .write = octeon_pcie1_write_config,
7099 };
7100
7101 static struct resource octeon_pcie1_mem_resource = {
7102@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7103 };
7104
7105 static struct pci_ops octeon_dummy_ops = {
7106- octeon_dummy_read_config,
7107- octeon_dummy_write_config,
7108+ .read = octeon_dummy_read_config,
7109+ .write = octeon_dummy_write_config,
7110 };
7111
7112 static struct resource octeon_dummy_mem_resource = {
7113diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7114index a2358b4..7cead4f 100644
7115--- a/arch/mips/sgi-ip27/ip27-nmi.c
7116+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7117@@ -187,9 +187,9 @@ void
7118 cont_nmi_dump(void)
7119 {
7120 #ifndef REAL_NMI_SIGNAL
7121- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7122+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7123
7124- atomic_inc(&nmied_cpus);
7125+ atomic_inc_unchecked(&nmied_cpus);
7126 #endif
7127 /*
7128 * Only allow 1 cpu to proceed
7129@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7130 udelay(10000);
7131 }
7132 #else
7133- while (atomic_read(&nmied_cpus) != num_online_cpus());
7134+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7135 #endif
7136
7137 /*
7138diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7139index a046b30..6799527 100644
7140--- a/arch/mips/sni/rm200.c
7141+++ b/arch/mips/sni/rm200.c
7142@@ -270,7 +270,7 @@ spurious_8259A_irq:
7143 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7144 spurious_irq_mask |= irqmask;
7145 }
7146- atomic_inc(&irq_err_count);
7147+ atomic_inc_unchecked(&irq_err_count);
7148 /*
7149 * Theoretically we do not have to handle this IRQ,
7150 * but in Linux this does not cause problems and is
7151diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7152index 41e873b..34d33a7 100644
7153--- a/arch/mips/vr41xx/common/icu.c
7154+++ b/arch/mips/vr41xx/common/icu.c
7155@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7156
7157 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7158
7159- atomic_inc(&irq_err_count);
7160+ atomic_inc_unchecked(&irq_err_count);
7161
7162 return -1;
7163 }
7164diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7165index ae0e4ee..e8f0692 100644
7166--- a/arch/mips/vr41xx/common/irq.c
7167+++ b/arch/mips/vr41xx/common/irq.c
7168@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7169 irq_cascade_t *cascade;
7170
7171 if (irq >= NR_IRQS) {
7172- atomic_inc(&irq_err_count);
7173+ atomic_inc_unchecked(&irq_err_count);
7174 return;
7175 }
7176
7177@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7178 ret = cascade->get_irq(irq);
7179 irq = ret;
7180 if (ret < 0)
7181- atomic_inc(&irq_err_count);
7182+ atomic_inc_unchecked(&irq_err_count);
7183 else
7184 irq_dispatch(irq);
7185 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7186diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7187index 967d144..db12197 100644
7188--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7189+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7190@@ -11,12 +11,14 @@
7191 #ifndef _ASM_PROC_CACHE_H
7192 #define _ASM_PROC_CACHE_H
7193
7194+#include <linux/const.h>
7195+
7196 /* L1 cache */
7197
7198 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7199 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7200-#define L1_CACHE_BYTES 16 /* bytes per entry */
7201 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7203 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7204
7205 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7206diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7207index bcb5df2..84fabd2 100644
7208--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7209+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7210@@ -16,13 +16,15 @@
7211 #ifndef _ASM_PROC_CACHE_H
7212 #define _ASM_PROC_CACHE_H
7213
7214+#include <linux/const.h>
7215+
7216 /*
7217 * L1 cache
7218 */
7219 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7220 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7221-#define L1_CACHE_BYTES 32 /* bytes per entry */
7222 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7223+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7224 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7225
7226 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7227diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7228index 4ce7a01..449202a 100644
7229--- a/arch/openrisc/include/asm/cache.h
7230+++ b/arch/openrisc/include/asm/cache.h
7231@@ -19,11 +19,13 @@
7232 #ifndef __ASM_OPENRISC_CACHE_H
7233 #define __ASM_OPENRISC_CACHE_H
7234
7235+#include <linux/const.h>
7236+
7237 /* FIXME: How can we replace these with values from the CPU...
7238 * they shouldn't be hard-coded!
7239 */
7240
7241-#define L1_CACHE_BYTES 16
7242 #define L1_CACHE_SHIFT 4
7243+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7244
7245 #endif /* __ASM_OPENRISC_CACHE_H */
7246diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7247index 226f8ca..9d9b87d 100644
7248--- a/arch/parisc/include/asm/atomic.h
7249+++ b/arch/parisc/include/asm/atomic.h
7250@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7251 return dec;
7252 }
7253
7254+#define atomic64_read_unchecked(v) atomic64_read(v)
7255+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7256+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7257+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7258+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7259+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7260+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7261+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7262+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7263+
7264 #endif /* !CONFIG_64BIT */
7265
7266
7267diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7268index 47f11c7..3420df2 100644
7269--- a/arch/parisc/include/asm/cache.h
7270+++ b/arch/parisc/include/asm/cache.h
7271@@ -5,6 +5,7 @@
7272 #ifndef __ARCH_PARISC_CACHE_H
7273 #define __ARCH_PARISC_CACHE_H
7274
7275+#include <linux/const.h>
7276
7277 /*
7278 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7279@@ -15,13 +16,13 @@
7280 * just ruin performance.
7281 */
7282 #ifdef CONFIG_PA20
7283-#define L1_CACHE_BYTES 64
7284 #define L1_CACHE_SHIFT 6
7285 #else
7286-#define L1_CACHE_BYTES 32
7287 #define L1_CACHE_SHIFT 5
7288 #endif
7289
7290+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7291+
7292 #ifndef __ASSEMBLY__
7293
7294 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7295diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7296index 3391d06..c23a2cc 100644
7297--- a/arch/parisc/include/asm/elf.h
7298+++ b/arch/parisc/include/asm/elf.h
7299@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7300
7301 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7302
7303+#ifdef CONFIG_PAX_ASLR
7304+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7305+
7306+#define PAX_DELTA_MMAP_LEN 16
7307+#define PAX_DELTA_STACK_LEN 16
7308+#endif
7309+
7310 /* This yields a mask that user programs can use to figure out what
7311 instruction set this CPU supports. This could be done in user space,
7312 but it's not easy, and we've already done it here. */
7313diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7314index f213f5b..0af3e8e 100644
7315--- a/arch/parisc/include/asm/pgalloc.h
7316+++ b/arch/parisc/include/asm/pgalloc.h
7317@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7318 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7319 }
7320
7321+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7322+{
7323+ pgd_populate(mm, pgd, pmd);
7324+}
7325+
7326 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7327 {
7328 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7329@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7330 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7331 #define pmd_free(mm, x) do { } while (0)
7332 #define pgd_populate(mm, pmd, pte) BUG()
7333+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7334
7335 #endif
7336
7337diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7338index 22b89d1..ce34230 100644
7339--- a/arch/parisc/include/asm/pgtable.h
7340+++ b/arch/parisc/include/asm/pgtable.h
7341@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7342 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7343 #define PAGE_COPY PAGE_EXECREAD
7344 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7345+
7346+#ifdef CONFIG_PAX_PAGEEXEC
7347+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7348+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7349+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7350+#else
7351+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7352+# define PAGE_COPY_NOEXEC PAGE_COPY
7353+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7354+#endif
7355+
7356 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7357 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7358 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7359diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7360index a5cb070..8604ddc 100644
7361--- a/arch/parisc/include/asm/uaccess.h
7362+++ b/arch/parisc/include/asm/uaccess.h
7363@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7364 const void __user *from,
7365 unsigned long n)
7366 {
7367- int sz = __compiletime_object_size(to);
7368+ size_t sz = __compiletime_object_size(to);
7369 int ret = -EFAULT;
7370
7371- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7372+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7373 ret = __copy_from_user(to, from, n);
7374 else
7375 copy_from_user_overflow();
7376diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7377index 5822e8e..bc5e638 100644
7378--- a/arch/parisc/kernel/module.c
7379+++ b/arch/parisc/kernel/module.c
7380@@ -98,16 +98,38 @@
7381
7382 /* three functions to determine where in the module core
7383 * or init pieces the location is */
7384+static inline int in_init_rx(struct module *me, void *loc)
7385+{
7386+ return (loc >= me->module_init_rx &&
7387+ loc < (me->module_init_rx + me->init_size_rx));
7388+}
7389+
7390+static inline int in_init_rw(struct module *me, void *loc)
7391+{
7392+ return (loc >= me->module_init_rw &&
7393+ loc < (me->module_init_rw + me->init_size_rw));
7394+}
7395+
7396 static inline int in_init(struct module *me, void *loc)
7397 {
7398- return (loc >= me->module_init &&
7399- loc <= (me->module_init + me->init_size));
7400+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7401+}
7402+
7403+static inline int in_core_rx(struct module *me, void *loc)
7404+{
7405+ return (loc >= me->module_core_rx &&
7406+ loc < (me->module_core_rx + me->core_size_rx));
7407+}
7408+
7409+static inline int in_core_rw(struct module *me, void *loc)
7410+{
7411+ return (loc >= me->module_core_rw &&
7412+ loc < (me->module_core_rw + me->core_size_rw));
7413 }
7414
7415 static inline int in_core(struct module *me, void *loc)
7416 {
7417- return (loc >= me->module_core &&
7418- loc <= (me->module_core + me->core_size));
7419+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7420 }
7421
7422 static inline int in_local(struct module *me, void *loc)
7423@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7424 }
7425
7426 /* align things a bit */
7427- me->core_size = ALIGN(me->core_size, 16);
7428- me->arch.got_offset = me->core_size;
7429- me->core_size += gots * sizeof(struct got_entry);
7430+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7431+ me->arch.got_offset = me->core_size_rw;
7432+ me->core_size_rw += gots * sizeof(struct got_entry);
7433
7434- me->core_size = ALIGN(me->core_size, 16);
7435- me->arch.fdesc_offset = me->core_size;
7436- me->core_size += fdescs * sizeof(Elf_Fdesc);
7437+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7438+ me->arch.fdesc_offset = me->core_size_rw;
7439+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7440
7441 me->arch.got_max = gots;
7442 me->arch.fdesc_max = fdescs;
7443@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7444
7445 BUG_ON(value == 0);
7446
7447- got = me->module_core + me->arch.got_offset;
7448+ got = me->module_core_rw + me->arch.got_offset;
7449 for (i = 0; got[i].addr; i++)
7450 if (got[i].addr == value)
7451 goto out;
7452@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7453 #ifdef CONFIG_64BIT
7454 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7455 {
7456- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7457+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7458
7459 if (!value) {
7460 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7461@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7462
7463 /* Create new one */
7464 fdesc->addr = value;
7465- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7466+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7467 return (Elf_Addr)fdesc;
7468 }
7469 #endif /* CONFIG_64BIT */
7470@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
7471
7472 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7473 end = table + sechdrs[me->arch.unwind_section].sh_size;
7474- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7475+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7476
7477 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7478 me->arch.unwind_section, table, end, gp);
7479diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7480index e1ffea2..46ed66e 100644
7481--- a/arch/parisc/kernel/sys_parisc.c
7482+++ b/arch/parisc/kernel/sys_parisc.c
7483@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7484 unsigned long task_size = TASK_SIZE;
7485 int do_color_align, last_mmap;
7486 struct vm_unmapped_area_info info;
7487+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7488
7489 if (len > task_size)
7490 return -ENOMEM;
7491@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7492 goto found_addr;
7493 }
7494
7495+#ifdef CONFIG_PAX_RANDMMAP
7496+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7497+#endif
7498+
7499 if (addr) {
7500 if (do_color_align && last_mmap)
7501 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7502@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7503 info.high_limit = mmap_upper_limit();
7504 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7505 info.align_offset = shared_align_offset(last_mmap, pgoff);
7506+ info.threadstack_offset = offset;
7507 addr = vm_unmapped_area(&info);
7508
7509 found_addr:
7510@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7511 unsigned long addr = addr0;
7512 int do_color_align, last_mmap;
7513 struct vm_unmapped_area_info info;
7514+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7515
7516 #ifdef CONFIG_64BIT
7517 /* This should only ever run for 32-bit processes. */
7518@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7519 }
7520
7521 /* requesting a specific address */
7522+#ifdef CONFIG_PAX_RANDMMAP
7523+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7524+#endif
7525+
7526 if (addr) {
7527 if (do_color_align && last_mmap)
7528 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7529@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7530 info.high_limit = mm->mmap_base;
7531 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7532 info.align_offset = shared_align_offset(last_mmap, pgoff);
7533+ info.threadstack_offset = offset;
7534 addr = vm_unmapped_area(&info);
7535 if (!(addr & ~PAGE_MASK))
7536 goto found_addr;
7537@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7538 mm->mmap_legacy_base = mmap_legacy_base();
7539 mm->mmap_base = mmap_upper_limit();
7540
7541+#ifdef CONFIG_PAX_RANDMMAP
7542+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7543+ mm->mmap_legacy_base += mm->delta_mmap;
7544+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7545+ }
7546+#endif
7547+
7548 if (mmap_is_legacy()) {
7549 mm->mmap_base = mm->mmap_legacy_base;
7550 mm->get_unmapped_area = arch_get_unmapped_area;
7551diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7552index 47ee620..1107387 100644
7553--- a/arch/parisc/kernel/traps.c
7554+++ b/arch/parisc/kernel/traps.c
7555@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7556
7557 down_read(&current->mm->mmap_sem);
7558 vma = find_vma(current->mm,regs->iaoq[0]);
7559- if (vma && (regs->iaoq[0] >= vma->vm_start)
7560- && (vma->vm_flags & VM_EXEC)) {
7561-
7562+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7563 fault_address = regs->iaoq[0];
7564 fault_space = regs->iasq[0];
7565
7566diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7567index e5120e6..8ddb5cc 100644
7568--- a/arch/parisc/mm/fault.c
7569+++ b/arch/parisc/mm/fault.c
7570@@ -15,6 +15,7 @@
7571 #include <linux/sched.h>
7572 #include <linux/interrupt.h>
7573 #include <linux/module.h>
7574+#include <linux/unistd.h>
7575
7576 #include <asm/uaccess.h>
7577 #include <asm/traps.h>
7578@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7579 static unsigned long
7580 parisc_acctyp(unsigned long code, unsigned int inst)
7581 {
7582- if (code == 6 || code == 16)
7583+ if (code == 6 || code == 7 || code == 16)
7584 return VM_EXEC;
7585
7586 switch (inst & 0xf0000000) {
7587@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7588 }
7589 #endif
7590
7591+#ifdef CONFIG_PAX_PAGEEXEC
7592+/*
7593+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7594+ *
7595+ * returns 1 when task should be killed
7596+ * 2 when rt_sigreturn trampoline was detected
7597+ * 3 when unpatched PLT trampoline was detected
7598+ */
7599+static int pax_handle_fetch_fault(struct pt_regs *regs)
7600+{
7601+
7602+#ifdef CONFIG_PAX_EMUPLT
7603+ int err;
7604+
7605+ do { /* PaX: unpatched PLT emulation */
7606+ unsigned int bl, depwi;
7607+
7608+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7609+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7610+
7611+ if (err)
7612+ break;
7613+
7614+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7615+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7616+
7617+ err = get_user(ldw, (unsigned int *)addr);
7618+ err |= get_user(bv, (unsigned int *)(addr+4));
7619+ err |= get_user(ldw2, (unsigned int *)(addr+8));
7620+
7621+ if (err)
7622+ break;
7623+
7624+ if (ldw == 0x0E801096U &&
7625+ bv == 0xEAC0C000U &&
7626+ ldw2 == 0x0E881095U)
7627+ {
7628+ unsigned int resolver, map;
7629+
7630+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7631+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7632+ if (err)
7633+ break;
7634+
7635+ regs->gr[20] = instruction_pointer(regs)+8;
7636+ regs->gr[21] = map;
7637+ regs->gr[22] = resolver;
7638+ regs->iaoq[0] = resolver | 3UL;
7639+ regs->iaoq[1] = regs->iaoq[0] + 4;
7640+ return 3;
7641+ }
7642+ }
7643+ } while (0);
7644+#endif
7645+
7646+#ifdef CONFIG_PAX_EMUTRAMP
7647+
7648+#ifndef CONFIG_PAX_EMUSIGRT
7649+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7650+ return 1;
7651+#endif
7652+
7653+ do { /* PaX: rt_sigreturn emulation */
7654+ unsigned int ldi1, ldi2, bel, nop;
7655+
7656+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7657+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7658+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7659+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7660+
7661+ if (err)
7662+ break;
7663+
7664+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7665+ ldi2 == 0x3414015AU &&
7666+ bel == 0xE4008200U &&
7667+ nop == 0x08000240U)
7668+ {
7669+ regs->gr[25] = (ldi1 & 2) >> 1;
7670+ regs->gr[20] = __NR_rt_sigreturn;
7671+ regs->gr[31] = regs->iaoq[1] + 16;
7672+ regs->sr[0] = regs->iasq[1];
7673+ regs->iaoq[0] = 0x100UL;
7674+ regs->iaoq[1] = regs->iaoq[0] + 4;
7675+ regs->iasq[0] = regs->sr[2];
7676+ regs->iasq[1] = regs->sr[2];
7677+ return 2;
7678+ }
7679+ } while (0);
7680+#endif
7681+
7682+ return 1;
7683+}
7684+
7685+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7686+{
7687+ unsigned long i;
7688+
7689+ printk(KERN_ERR "PAX: bytes at PC: ");
7690+ for (i = 0; i < 5; i++) {
7691+ unsigned int c;
7692+ if (get_user(c, (unsigned int *)pc+i))
7693+ printk(KERN_CONT "???????? ");
7694+ else
7695+ printk(KERN_CONT "%08x ", c);
7696+ }
7697+ printk("\n");
7698+}
7699+#endif
7700+
7701 int fixup_exception(struct pt_regs *regs)
7702 {
7703 const struct exception_table_entry *fix;
7704@@ -234,8 +345,33 @@ retry:
7705
7706 good_area:
7707
7708- if ((vma->vm_flags & acc_type) != acc_type)
7709+ if ((vma->vm_flags & acc_type) != acc_type) {
7710+
7711+#ifdef CONFIG_PAX_PAGEEXEC
7712+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7713+ (address & ~3UL) == instruction_pointer(regs))
7714+ {
7715+ up_read(&mm->mmap_sem);
7716+ switch (pax_handle_fetch_fault(regs)) {
7717+
7718+#ifdef CONFIG_PAX_EMUPLT
7719+ case 3:
7720+ return;
7721+#endif
7722+
7723+#ifdef CONFIG_PAX_EMUTRAMP
7724+ case 2:
7725+ return;
7726+#endif
7727+
7728+ }
7729+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7730+ do_group_exit(SIGKILL);
7731+ }
7732+#endif
7733+
7734 goto bad_area;
7735+ }
7736
7737 /*
7738 * If for any reason at all we couldn't handle the fault, make
7739diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7740index a2a168e..e484682 100644
7741--- a/arch/powerpc/Kconfig
7742+++ b/arch/powerpc/Kconfig
7743@@ -408,6 +408,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
7744 config KEXEC
7745 bool "kexec system call"
7746 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7747+ depends on !GRKERNSEC_KMEM
7748 help
7749 kexec is a system call that implements the ability to shutdown your
7750 current kernel, and to start another kernel. It is like a reboot
7751diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7752index 512d278..d31fadd 100644
7753--- a/arch/powerpc/include/asm/atomic.h
7754+++ b/arch/powerpc/include/asm/atomic.h
7755@@ -12,6 +12,11 @@
7756
7757 #define ATOMIC_INIT(i) { (i) }
7758
7759+#define _ASM_EXTABLE(from, to) \
7760+" .section __ex_table,\"a\"\n" \
7761+ PPC_LONG" " #from ", " #to"\n" \
7762+" .previous\n"
7763+
7764 static __inline__ int atomic_read(const atomic_t *v)
7765 {
7766 int t;
7767@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
7768 return t;
7769 }
7770
7771+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
7772+{
7773+ int t;
7774+
7775+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
7776+
7777+ return t;
7778+}
7779+
7780 static __inline__ void atomic_set(atomic_t *v, int i)
7781 {
7782 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7783 }
7784
7785-#define ATOMIC_OP(op, asm_op) \
7786-static __inline__ void atomic_##op(int a, atomic_t *v) \
7787+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7788+{
7789+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
7790+}
7791+
7792+#ifdef CONFIG_PAX_REFCOUNT
7793+#define __REFCOUNT_OP(op) op##o.
7794+#define __OVERFLOW_PRE \
7795+ " mcrxr cr0\n"
7796+#define __OVERFLOW_POST \
7797+ " bf 4*cr0+so, 3f\n" \
7798+ "2: .long 0x00c00b00\n" \
7799+ "3:\n"
7800+#define __OVERFLOW_EXTABLE \
7801+ "\n4:\n"
7802+ _ASM_EXTABLE(2b, 4b)
7803+#else
7804+#define __REFCOUNT_OP(op) op
7805+#define __OVERFLOW_PRE
7806+#define __OVERFLOW_POST
7807+#define __OVERFLOW_EXTABLE
7808+#endif
7809+
7810+#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
7811+static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
7812 { \
7813 int t; \
7814 \
7815 __asm__ __volatile__( \
7816-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
7817+"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
7818+ pre_op \
7819 #asm_op " %0,%2,%0\n" \
7820+ post_op \
7821 PPC405_ERR77(0,%3) \
7822 " stwcx. %0,0,%3 \n" \
7823 " bne- 1b\n" \
7824+ extable \
7825 : "=&r" (t), "+m" (v->counter) \
7826 : "r" (a), "r" (&v->counter) \
7827 : "cc"); \
7828 } \
7829
7830-#define ATOMIC_OP_RETURN(op, asm_op) \
7831-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7832+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
7833+ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7834+
7835+#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
7836+static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
7837 { \
7838 int t; \
7839 \
7840 __asm__ __volatile__( \
7841 PPC_ATOMIC_ENTRY_BARRIER \
7842-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
7843+"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
7844+ pre_op \
7845 #asm_op " %0,%1,%0\n" \
7846+ post_op \
7847 PPC405_ERR77(0,%2) \
7848 " stwcx. %0,0,%2 \n" \
7849 " bne- 1b\n" \
7850+ extable \
7851 PPC_ATOMIC_EXIT_BARRIER \
7852 : "=&r" (t) \
7853 : "r" (a), "r" (&v->counter) \
7854@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
7855 return t; \
7856 }
7857
7858+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
7859+ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
7860+
7861 #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
7862
7863 ATOMIC_OPS(add, add)
7864@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
7865
7866 #undef ATOMIC_OPS
7867 #undef ATOMIC_OP_RETURN
7868+#undef __ATOMIC_OP_RETURN
7869 #undef ATOMIC_OP
7870+#undef __ATOMIC_OP
7871
7872 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
7873
7874-static __inline__ void atomic_inc(atomic_t *v)
7875-{
7876- int t;
7877+/*
7878+ * atomic_inc - increment atomic variable
7879+ * @v: pointer of type atomic_t
7880+ *
7881+ * Automatically increments @v by 1
7882+ */
7883+#define atomic_inc(v) atomic_add(1, (v))
7884+#define atomic_inc_return(v) atomic_add_return(1, (v))
7885
7886- __asm__ __volatile__(
7887-"1: lwarx %0,0,%2 # atomic_inc\n\
7888- addic %0,%0,1\n"
7889- PPC405_ERR77(0,%2)
7890-" stwcx. %0,0,%2 \n\
7891- bne- 1b"
7892- : "=&r" (t), "+m" (v->counter)
7893- : "r" (&v->counter)
7894- : "cc", "xer");
7895+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7896+{
7897+ atomic_add_unchecked(1, v);
7898 }
7899
7900-static __inline__ int atomic_inc_return(atomic_t *v)
7901+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7902 {
7903- int t;
7904-
7905- __asm__ __volatile__(
7906- PPC_ATOMIC_ENTRY_BARRIER
7907-"1: lwarx %0,0,%1 # atomic_inc_return\n\
7908- addic %0,%0,1\n"
7909- PPC405_ERR77(0,%1)
7910-" stwcx. %0,0,%1 \n\
7911- bne- 1b"
7912- PPC_ATOMIC_EXIT_BARRIER
7913- : "=&r" (t)
7914- : "r" (&v->counter)
7915- : "cc", "xer", "memory");
7916-
7917- return t;
7918+ return atomic_add_return_unchecked(1, v);
7919 }
7920
7921 /*
7922@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
7923 */
7924 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7925
7926-static __inline__ void atomic_dec(atomic_t *v)
7927+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7928 {
7929- int t;
7930-
7931- __asm__ __volatile__(
7932-"1: lwarx %0,0,%2 # atomic_dec\n\
7933- addic %0,%0,-1\n"
7934- PPC405_ERR77(0,%2)\
7935-" stwcx. %0,0,%2\n\
7936- bne- 1b"
7937- : "=&r" (t), "+m" (v->counter)
7938- : "r" (&v->counter)
7939- : "cc", "xer");
7940+ return atomic_add_return_unchecked(1, v) == 0;
7941 }
7942
7943-static __inline__ int atomic_dec_return(atomic_t *v)
7944+/*
7945+ * atomic_dec - decrement atomic variable
7946+ * @v: pointer of type atomic_t
7947+ *
7948+ * Atomically decrements @v by 1
7949+ */
7950+#define atomic_dec(v) atomic_sub(1, (v))
7951+#define atomic_dec_return(v) atomic_sub_return(1, (v))
7952+
7953+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
7954 {
7955- int t;
7956-
7957- __asm__ __volatile__(
7958- PPC_ATOMIC_ENTRY_BARRIER
7959-"1: lwarx %0,0,%1 # atomic_dec_return\n\
7960- addic %0,%0,-1\n"
7961- PPC405_ERR77(0,%1)
7962-" stwcx. %0,0,%1\n\
7963- bne- 1b"
7964- PPC_ATOMIC_EXIT_BARRIER
7965- : "=&r" (t)
7966- : "r" (&v->counter)
7967- : "cc", "xer", "memory");
7968-
7969- return t;
7970+ atomic_sub_unchecked(1, v);
7971 }
7972
7973 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7974 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7975
7976+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7977+{
7978+ return cmpxchg(&(v->counter), old, new);
7979+}
7980+
7981+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7982+{
7983+ return xchg(&(v->counter), new);
7984+}
7985+
7986 /**
7987 * __atomic_add_unless - add unless the number is a given value
7988 * @v: pointer of type atomic_t
7989@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
7990 PPC_ATOMIC_ENTRY_BARRIER
7991 "1: lwarx %0,0,%1 # __atomic_add_unless\n\
7992 cmpw 0,%0,%3 \n\
7993- beq- 2f \n\
7994- add %0,%2,%0 \n"
7995+ beq- 2f \n"
7996+
7997+#ifdef CONFIG_PAX_REFCOUNT
7998+" mcrxr cr0\n"
7999+" addo. %0,%2,%0\n"
8000+" bf 4*cr0+so, 4f\n"
8001+"3:.long " "0x00c00b00""\n"
8002+"4:\n"
8003+#else
8004+ "add %0,%2,%0 \n"
8005+#endif
8006+
8007 PPC405_ERR77(0,%2)
8008 " stwcx. %0,0,%1 \n\
8009 bne- 1b \n"
8010+"5:"
8011+
8012+#ifdef CONFIG_PAX_REFCOUNT
8013+ _ASM_EXTABLE(3b, 5b)
8014+#endif
8015+
8016 PPC_ATOMIC_EXIT_BARRIER
8017 " subf %0,%2,%0 \n\
8018 2:"
8019@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8020 }
8021 #define atomic_dec_if_positive atomic_dec_if_positive
8022
8023+#define smp_mb__before_atomic_dec() smp_mb()
8024+#define smp_mb__after_atomic_dec() smp_mb()
8025+#define smp_mb__before_atomic_inc() smp_mb()
8026+#define smp_mb__after_atomic_inc() smp_mb()
8027+
8028 #ifdef __powerpc64__
8029
8030 #define ATOMIC64_INIT(i) { (i) }
8031@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8032 return t;
8033 }
8034
8035+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8036+{
8037+ long t;
8038+
8039+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8040+
8041+ return t;
8042+}
8043+
8044 static __inline__ void atomic64_set(atomic64_t *v, long i)
8045 {
8046 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8047 }
8048
8049-#define ATOMIC64_OP(op, asm_op) \
8050-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
8051+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8052+{
8053+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8054+}
8055+
8056+#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
8057+static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
8058 { \
8059 long t; \
8060 \
8061 __asm__ __volatile__( \
8062 "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
8063+ pre_op \
8064 #asm_op " %0,%2,%0\n" \
8065+ post_op \
8066 " stdcx. %0,0,%3 \n" \
8067 " bne- 1b\n" \
8068+ extable \
8069 : "=&r" (t), "+m" (v->counter) \
8070 : "r" (a), "r" (&v->counter) \
8071 : "cc"); \
8072 }
8073
8074-#define ATOMIC64_OP_RETURN(op, asm_op) \
8075-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8076+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
8077+ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8078+
8079+#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
8080+static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
8081 { \
8082 long t; \
8083 \
8084 __asm__ __volatile__( \
8085 PPC_ATOMIC_ENTRY_BARRIER \
8086 "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
8087+ pre_op \
8088 #asm_op " %0,%1,%0\n" \
8089+ post_op \
8090 " stdcx. %0,0,%2 \n" \
8091 " bne- 1b\n" \
8092+ extable \
8093 PPC_ATOMIC_EXIT_BARRIER \
8094 : "=&r" (t) \
8095 : "r" (a), "r" (&v->counter) \
8096@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
8097 return t; \
8098 }
8099
8100+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
8101+ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
8102+
8103 #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
8104
8105 ATOMIC64_OPS(add, add)
8106@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
8107
8108 #undef ATOMIC64_OPS
8109 #undef ATOMIC64_OP_RETURN
8110+#undef __ATOMIC64_OP_RETURN
8111 #undef ATOMIC64_OP
8112+#undef __ATOMIC64_OP
8113+#undef __OVERFLOW_EXTABLE
8114+#undef __OVERFLOW_POST
8115+#undef __OVERFLOW_PRE
8116+#undef __REFCOUNT_OP
8117
8118 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
8119
8120-static __inline__ void atomic64_inc(atomic64_t *v)
8121-{
8122- long t;
8123+/*
8124+ * atomic64_inc - increment atomic variable
8125+ * @v: pointer of type atomic64_t
8126+ *
8127+ * Automatically increments @v by 1
8128+ */
8129+#define atomic64_inc(v) atomic64_add(1, (v))
8130+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8131
8132- __asm__ __volatile__(
8133-"1: ldarx %0,0,%2 # atomic64_inc\n\
8134- addic %0,%0,1\n\
8135- stdcx. %0,0,%2 \n\
8136- bne- 1b"
8137- : "=&r" (t), "+m" (v->counter)
8138- : "r" (&v->counter)
8139- : "cc", "xer");
8140+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8141+{
8142+ atomic64_add_unchecked(1, v);
8143 }
8144
8145-static __inline__ long atomic64_inc_return(atomic64_t *v)
8146+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8147 {
8148- long t;
8149-
8150- __asm__ __volatile__(
8151- PPC_ATOMIC_ENTRY_BARRIER
8152-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8153- addic %0,%0,1\n\
8154- stdcx. %0,0,%1 \n\
8155- bne- 1b"
8156- PPC_ATOMIC_EXIT_BARRIER
8157- : "=&r" (t)
8158- : "r" (&v->counter)
8159- : "cc", "xer", "memory");
8160-
8161- return t;
8162+ return atomic64_add_return_unchecked(1, v);
8163 }
8164
8165 /*
8166@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8167 */
8168 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8169
8170-static __inline__ void atomic64_dec(atomic64_t *v)
8171+/*
8172+ * atomic64_dec - decrement atomic variable
8173+ * @v: pointer of type atomic64_t
8174+ *
8175+ * Atomically decrements @v by 1
8176+ */
8177+#define atomic64_dec(v) atomic64_sub(1, (v))
8178+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8179+
8180+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8181 {
8182- long t;
8183-
8184- __asm__ __volatile__(
8185-"1: ldarx %0,0,%2 # atomic64_dec\n\
8186- addic %0,%0,-1\n\
8187- stdcx. %0,0,%2\n\
8188- bne- 1b"
8189- : "=&r" (t), "+m" (v->counter)
8190- : "r" (&v->counter)
8191- : "cc", "xer");
8192-}
8193-
8194-static __inline__ long atomic64_dec_return(atomic64_t *v)
8195-{
8196- long t;
8197-
8198- __asm__ __volatile__(
8199- PPC_ATOMIC_ENTRY_BARRIER
8200-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8201- addic %0,%0,-1\n\
8202- stdcx. %0,0,%1\n\
8203- bne- 1b"
8204- PPC_ATOMIC_EXIT_BARRIER
8205- : "=&r" (t)
8206- : "r" (&v->counter)
8207- : "cc", "xer", "memory");
8208-
8209- return t;
8210+ atomic64_sub_unchecked(1, v);
8211 }
8212
8213 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8214@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8215 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8216 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8217
8218+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8219+{
8220+ return cmpxchg(&(v->counter), old, new);
8221+}
8222+
8223+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8224+{
8225+ return xchg(&(v->counter), new);
8226+}
8227+
8228 /**
8229 * atomic64_add_unless - add unless the number is a given value
8230 * @v: pointer of type atomic64_t
8231@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
8232
8233 __asm__ __volatile__ (
8234 PPC_ATOMIC_ENTRY_BARRIER
8235-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
8236+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
8237 cmpd 0,%0,%3 \n\
8238- beq- 2f \n\
8239- add %0,%2,%0 \n"
8240+ beq- 2f \n"
8241+
8242+#ifdef CONFIG_PAX_REFCOUNT
8243+" mcrxr cr0\n"
8244+" addo. %0,%2,%0\n"
8245+" bf 4*cr0+so, 4f\n"
8246+"3:.long " "0x00c00b00""\n"
8247+"4:\n"
8248+#else
8249+ "add %0,%2,%0 \n"
8250+#endif
8251+
8252 " stdcx. %0,0,%1 \n\
8253 bne- 1b \n"
8254 PPC_ATOMIC_EXIT_BARRIER
8255+"5:"
8256+
8257+#ifdef CONFIG_PAX_REFCOUNT
8258+ _ASM_EXTABLE(3b, 5b)
8259+#endif
8260+
8261 " subf %0,%2,%0 \n\
8262 2:"
8263 : "=&r" (t)
8264diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8265index a3bf5be..e03ba81 100644
8266--- a/arch/powerpc/include/asm/barrier.h
8267+++ b/arch/powerpc/include/asm/barrier.h
8268@@ -76,7 +76,7 @@
8269 do { \
8270 compiletime_assert_atomic_type(*p); \
8271 smp_lwsync(); \
8272- ACCESS_ONCE(*p) = (v); \
8273+ ACCESS_ONCE_RW(*p) = (v); \
8274 } while (0)
8275
8276 #define smp_load_acquire(p) \
8277diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8278index 34a05a1..a1f2c67 100644
8279--- a/arch/powerpc/include/asm/cache.h
8280+++ b/arch/powerpc/include/asm/cache.h
8281@@ -4,6 +4,7 @@
8282 #ifdef __KERNEL__
8283
8284 #include <asm/reg.h>
8285+#include <linux/const.h>
8286
8287 /* bytes per L1 cache line */
8288 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8289@@ -23,7 +24,7 @@
8290 #define L1_CACHE_SHIFT 7
8291 #endif
8292
8293-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8294+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8295
8296 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8297
8298diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8299index 57d289a..b36c98c 100644
8300--- a/arch/powerpc/include/asm/elf.h
8301+++ b/arch/powerpc/include/asm/elf.h
8302@@ -30,6 +30,18 @@
8303
8304 #define ELF_ET_DYN_BASE 0x20000000
8305
8306+#ifdef CONFIG_PAX_ASLR
8307+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8308+
8309+#ifdef __powerpc64__
8310+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8311+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8312+#else
8313+#define PAX_DELTA_MMAP_LEN 15
8314+#define PAX_DELTA_STACK_LEN 15
8315+#endif
8316+#endif
8317+
8318 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8319
8320 /*
8321@@ -128,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8322 (0x7ff >> (PAGE_SHIFT - 12)) : \
8323 (0x3ffff >> (PAGE_SHIFT - 12)))
8324
8325-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8326-#define arch_randomize_brk arch_randomize_brk
8327-
8328-
8329 #ifdef CONFIG_SPU_BASE
8330 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8331 #define NT_SPU 1
8332diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8333index 8196e9c..d83a9f3 100644
8334--- a/arch/powerpc/include/asm/exec.h
8335+++ b/arch/powerpc/include/asm/exec.h
8336@@ -4,6 +4,6 @@
8337 #ifndef _ASM_POWERPC_EXEC_H
8338 #define _ASM_POWERPC_EXEC_H
8339
8340-extern unsigned long arch_align_stack(unsigned long sp);
8341+#define arch_align_stack(x) ((x) & ~0xfUL)
8342
8343 #endif /* _ASM_POWERPC_EXEC_H */
8344diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8345index 5acabbd..7ea14fa 100644
8346--- a/arch/powerpc/include/asm/kmap_types.h
8347+++ b/arch/powerpc/include/asm/kmap_types.h
8348@@ -10,7 +10,7 @@
8349 * 2 of the License, or (at your option) any later version.
8350 */
8351
8352-#define KM_TYPE_NR 16
8353+#define KM_TYPE_NR 17
8354
8355 #endif /* __KERNEL__ */
8356 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8357diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8358index b8da913..c02b593 100644
8359--- a/arch/powerpc/include/asm/local.h
8360+++ b/arch/powerpc/include/asm/local.h
8361@@ -9,21 +9,65 @@ typedef struct
8362 atomic_long_t a;
8363 } local_t;
8364
8365+typedef struct
8366+{
8367+ atomic_long_unchecked_t a;
8368+} local_unchecked_t;
8369+
8370 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8371
8372 #define local_read(l) atomic_long_read(&(l)->a)
8373+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8374 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8375+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8376
8377 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8378+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8379 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8380+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8381 #define local_inc(l) atomic_long_inc(&(l)->a)
8382+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8383 #define local_dec(l) atomic_long_dec(&(l)->a)
8384+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8385
8386 static __inline__ long local_add_return(long a, local_t *l)
8387 {
8388 long t;
8389
8390 __asm__ __volatile__(
8391+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8392+
8393+#ifdef CONFIG_PAX_REFCOUNT
8394+" mcrxr cr0\n"
8395+" addo. %0,%1,%0\n"
8396+" bf 4*cr0+so, 3f\n"
8397+"2:.long " "0x00c00b00""\n"
8398+#else
8399+" add %0,%1,%0\n"
8400+#endif
8401+
8402+"3:\n"
8403+ PPC405_ERR77(0,%2)
8404+ PPC_STLCX "%0,0,%2 \n\
8405+ bne- 1b"
8406+
8407+#ifdef CONFIG_PAX_REFCOUNT
8408+"\n4:\n"
8409+ _ASM_EXTABLE(2b, 4b)
8410+#endif
8411+
8412+ : "=&r" (t)
8413+ : "r" (a), "r" (&(l->a.counter))
8414+ : "cc", "memory");
8415+
8416+ return t;
8417+}
8418+
8419+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8420+{
8421+ long t;
8422+
8423+ __asm__ __volatile__(
8424 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8425 add %0,%1,%0\n"
8426 PPC405_ERR77(0,%2)
8427@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8428
8429 #define local_cmpxchg(l, o, n) \
8430 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8431+#define local_cmpxchg_unchecked(l, o, n) \
8432+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8433 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8434
8435 /**
8436diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8437index 8565c25..2865190 100644
8438--- a/arch/powerpc/include/asm/mman.h
8439+++ b/arch/powerpc/include/asm/mman.h
8440@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8441 }
8442 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8443
8444-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8445+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8446 {
8447 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
8448 }
8449diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
8450index 69c0598..2c56964 100644
8451--- a/arch/powerpc/include/asm/page.h
8452+++ b/arch/powerpc/include/asm/page.h
8453@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
8454 * and needs to be executable. This means the whole heap ends
8455 * up being executable.
8456 */
8457-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8458- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8459+#define VM_DATA_DEFAULT_FLAGS32 \
8460+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8461+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8462
8463 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8464 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8465@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
8466 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
8467 #endif
8468
8469+#define ktla_ktva(addr) (addr)
8470+#define ktva_ktla(addr) (addr)
8471+
8472 #ifndef CONFIG_PPC_BOOK3S_64
8473 /*
8474 * Use the top bit of the higher-level page table entries to indicate whether
8475diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
8476index d908a46..3753f71 100644
8477--- a/arch/powerpc/include/asm/page_64.h
8478+++ b/arch/powerpc/include/asm/page_64.h
8479@@ -172,15 +172,18 @@ do { \
8480 * stack by default, so in the absence of a PT_GNU_STACK program header
8481 * we turn execute permission off.
8482 */
8483-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
8484- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8485+#define VM_STACK_DEFAULT_FLAGS32 \
8486+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
8487+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8488
8489 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
8490 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
8491
8492+#ifndef CONFIG_PAX_PAGEEXEC
8493 #define VM_STACK_DEFAULT_FLAGS \
8494 (is_32bit_task() ? \
8495 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
8496+#endif
8497
8498 #include <asm-generic/getorder.h>
8499
8500diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
8501index 4b0be20..c15a27d 100644
8502--- a/arch/powerpc/include/asm/pgalloc-64.h
8503+++ b/arch/powerpc/include/asm/pgalloc-64.h
8504@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
8505 #ifndef CONFIG_PPC_64K_PAGES
8506
8507 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
8508+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
8509
8510 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
8511 {
8512@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8513 pud_set(pud, (unsigned long)pmd);
8514 }
8515
8516+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
8517+{
8518+ pud_populate(mm, pud, pmd);
8519+}
8520+
8521 #define pmd_populate(mm, pmd, pte_page) \
8522 pmd_populate_kernel(mm, pmd, page_address(pte_page))
8523 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
8524@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
8525 #endif
8526
8527 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
8528+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
8529
8530 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
8531 pte_t *pte)
8532diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
8533index a8805fe..6d69617 100644
8534--- a/arch/powerpc/include/asm/pgtable.h
8535+++ b/arch/powerpc/include/asm/pgtable.h
8536@@ -2,6 +2,7 @@
8537 #define _ASM_POWERPC_PGTABLE_H
8538 #ifdef __KERNEL__
8539
8540+#include <linux/const.h>
8541 #ifndef __ASSEMBLY__
8542 #include <linux/mmdebug.h>
8543 #include <linux/mmzone.h>
8544diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
8545index 4aad413..85d86bf 100644
8546--- a/arch/powerpc/include/asm/pte-hash32.h
8547+++ b/arch/powerpc/include/asm/pte-hash32.h
8548@@ -21,6 +21,7 @@
8549 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
8550 #define _PAGE_USER 0x004 /* usermode access allowed */
8551 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
8552+#define _PAGE_EXEC _PAGE_GUARDED
8553 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
8554 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
8555 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
8556diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
8557index 1c874fb..e8480a4 100644
8558--- a/arch/powerpc/include/asm/reg.h
8559+++ b/arch/powerpc/include/asm/reg.h
8560@@ -253,6 +253,7 @@
8561 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
8562 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
8563 #define DSISR_NOHPTE 0x40000000 /* no translation found */
8564+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
8565 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
8566 #define DSISR_ISSTORE 0x02000000 /* access was a store */
8567 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
8568diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
8569index 5a6614a..d89995d1 100644
8570--- a/arch/powerpc/include/asm/smp.h
8571+++ b/arch/powerpc/include/asm/smp.h
8572@@ -51,7 +51,7 @@ struct smp_ops_t {
8573 int (*cpu_disable)(void);
8574 void (*cpu_die)(unsigned int nr);
8575 int (*cpu_bootable)(unsigned int nr);
8576-};
8577+} __no_const;
8578
8579 extern void smp_send_debugger_break(void);
8580 extern void start_secondary_resume(void);
8581diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
8582index 4dbe072..b803275 100644
8583--- a/arch/powerpc/include/asm/spinlock.h
8584+++ b/arch/powerpc/include/asm/spinlock.h
8585@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
8586 __asm__ __volatile__(
8587 "1: " PPC_LWARX(%0,0,%1,1) "\n"
8588 __DO_SIGN_EXTEND
8589-" addic. %0,%0,1\n\
8590- ble- 2f\n"
8591+
8592+#ifdef CONFIG_PAX_REFCOUNT
8593+" mcrxr cr0\n"
8594+" addico. %0,%0,1\n"
8595+" bf 4*cr0+so, 3f\n"
8596+"2:.long " "0x00c00b00""\n"
8597+#else
8598+" addic. %0,%0,1\n"
8599+#endif
8600+
8601+"3:\n"
8602+ "ble- 4f\n"
8603 PPC405_ERR77(0,%1)
8604 " stwcx. %0,0,%1\n\
8605 bne- 1b\n"
8606 PPC_ACQUIRE_BARRIER
8607-"2:" : "=&r" (tmp)
8608+"4:"
8609+
8610+#ifdef CONFIG_PAX_REFCOUNT
8611+ _ASM_EXTABLE(2b,4b)
8612+#endif
8613+
8614+ : "=&r" (tmp)
8615 : "r" (&rw->lock)
8616 : "cr0", "xer", "memory");
8617
8618@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
8619 __asm__ __volatile__(
8620 "# read_unlock\n\t"
8621 PPC_RELEASE_BARRIER
8622-"1: lwarx %0,0,%1\n\
8623- addic %0,%0,-1\n"
8624+"1: lwarx %0,0,%1\n"
8625+
8626+#ifdef CONFIG_PAX_REFCOUNT
8627+" mcrxr cr0\n"
8628+" addico. %0,%0,-1\n"
8629+" bf 4*cr0+so, 3f\n"
8630+"2:.long " "0x00c00b00""\n"
8631+#else
8632+" addic. %0,%0,-1\n"
8633+#endif
8634+
8635+"3:\n"
8636 PPC405_ERR77(0,%1)
8637 " stwcx. %0,0,%1\n\
8638 bne- 1b"
8639+
8640+#ifdef CONFIG_PAX_REFCOUNT
8641+"\n4:\n"
8642+ _ASM_EXTABLE(2b, 4b)
8643+#endif
8644+
8645 : "=&r"(tmp)
8646 : "r"(&rw->lock)
8647 : "cr0", "xer", "memory");
8648diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
8649index 0be6c68..9c3c6ee 100644
8650--- a/arch/powerpc/include/asm/thread_info.h
8651+++ b/arch/powerpc/include/asm/thread_info.h
8652@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
8653 #if defined(CONFIG_PPC64)
8654 #define TIF_ELF2ABI 18 /* function descriptors must die! */
8655 #endif
8656+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
8657+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
8658
8659 /* as above, but as bit values */
8660 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
8661@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
8662 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8663 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
8664 #define _TIF_NOHZ (1<<TIF_NOHZ)
8665+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8666 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
8667 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
8668- _TIF_NOHZ)
8669+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
8670
8671 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
8672 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
8673diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
8674index a0c071d..49cdc7f 100644
8675--- a/arch/powerpc/include/asm/uaccess.h
8676+++ b/arch/powerpc/include/asm/uaccess.h
8677@@ -58,6 +58,7 @@
8678
8679 #endif
8680
8681+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
8682 #define access_ok(type, addr, size) \
8683 (__chk_user_ptr(addr), \
8684 __access_ok((__force unsigned long)(addr), (size), get_fs()))
8685@@ -318,52 +319,6 @@ do { \
8686 extern unsigned long __copy_tofrom_user(void __user *to,
8687 const void __user *from, unsigned long size);
8688
8689-#ifndef __powerpc64__
8690-
8691-static inline unsigned long copy_from_user(void *to,
8692- const void __user *from, unsigned long n)
8693-{
8694- unsigned long over;
8695-
8696- if (access_ok(VERIFY_READ, from, n))
8697- return __copy_tofrom_user((__force void __user *)to, from, n);
8698- if ((unsigned long)from < TASK_SIZE) {
8699- over = (unsigned long)from + n - TASK_SIZE;
8700- return __copy_tofrom_user((__force void __user *)to, from,
8701- n - over) + over;
8702- }
8703- return n;
8704-}
8705-
8706-static inline unsigned long copy_to_user(void __user *to,
8707- const void *from, unsigned long n)
8708-{
8709- unsigned long over;
8710-
8711- if (access_ok(VERIFY_WRITE, to, n))
8712- return __copy_tofrom_user(to, (__force void __user *)from, n);
8713- if ((unsigned long)to < TASK_SIZE) {
8714- over = (unsigned long)to + n - TASK_SIZE;
8715- return __copy_tofrom_user(to, (__force void __user *)from,
8716- n - over) + over;
8717- }
8718- return n;
8719-}
8720-
8721-#else /* __powerpc64__ */
8722-
8723-#define __copy_in_user(to, from, size) \
8724- __copy_tofrom_user((to), (from), (size))
8725-
8726-extern unsigned long copy_from_user(void *to, const void __user *from,
8727- unsigned long n);
8728-extern unsigned long copy_to_user(void __user *to, const void *from,
8729- unsigned long n);
8730-extern unsigned long copy_in_user(void __user *to, const void __user *from,
8731- unsigned long n);
8732-
8733-#endif /* __powerpc64__ */
8734-
8735 static inline unsigned long __copy_from_user_inatomic(void *to,
8736 const void __user *from, unsigned long n)
8737 {
8738@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
8739 if (ret == 0)
8740 return 0;
8741 }
8742+
8743+ if (!__builtin_constant_p(n))
8744+ check_object_size(to, n, false);
8745+
8746 return __copy_tofrom_user((__force void __user *)to, from, n);
8747 }
8748
8749@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
8750 if (ret == 0)
8751 return 0;
8752 }
8753+
8754+ if (!__builtin_constant_p(n))
8755+ check_object_size(from, n, true);
8756+
8757 return __copy_tofrom_user(to, (__force const void __user *)from, n);
8758 }
8759
8760@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
8761 return __copy_to_user_inatomic(to, from, size);
8762 }
8763
8764+#ifndef __powerpc64__
8765+
8766+static inline unsigned long __must_check copy_from_user(void *to,
8767+ const void __user *from, unsigned long n)
8768+{
8769+ unsigned long over;
8770+
8771+ if ((long)n < 0)
8772+ return n;
8773+
8774+ if (access_ok(VERIFY_READ, from, n)) {
8775+ if (!__builtin_constant_p(n))
8776+ check_object_size(to, n, false);
8777+ return __copy_tofrom_user((__force void __user *)to, from, n);
8778+ }
8779+ if ((unsigned long)from < TASK_SIZE) {
8780+ over = (unsigned long)from + n - TASK_SIZE;
8781+ if (!__builtin_constant_p(n - over))
8782+ check_object_size(to, n - over, false);
8783+ return __copy_tofrom_user((__force void __user *)to, from,
8784+ n - over) + over;
8785+ }
8786+ return n;
8787+}
8788+
8789+static inline unsigned long __must_check copy_to_user(void __user *to,
8790+ const void *from, unsigned long n)
8791+{
8792+ unsigned long over;
8793+
8794+ if ((long)n < 0)
8795+ return n;
8796+
8797+ if (access_ok(VERIFY_WRITE, to, n)) {
8798+ if (!__builtin_constant_p(n))
8799+ check_object_size(from, n, true);
8800+ return __copy_tofrom_user(to, (__force void __user *)from, n);
8801+ }
8802+ if ((unsigned long)to < TASK_SIZE) {
8803+ over = (unsigned long)to + n - TASK_SIZE;
8804+ if (!__builtin_constant_p(n))
8805+ check_object_size(from, n - over, true);
8806+ return __copy_tofrom_user(to, (__force void __user *)from,
8807+ n - over) + over;
8808+ }
8809+ return n;
8810+}
8811+
8812+#else /* __powerpc64__ */
8813+
8814+#define __copy_in_user(to, from, size) \
8815+ __copy_tofrom_user((to), (from), (size))
8816+
8817+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
8818+{
8819+ if ((long)n < 0 || n > INT_MAX)
8820+ return n;
8821+
8822+ if (!__builtin_constant_p(n))
8823+ check_object_size(to, n, false);
8824+
8825+ if (likely(access_ok(VERIFY_READ, from, n)))
8826+ n = __copy_from_user(to, from, n);
8827+ else
8828+ memset(to, 0, n);
8829+ return n;
8830+}
8831+
8832+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
8833+{
8834+ if ((long)n < 0 || n > INT_MAX)
8835+ return n;
8836+
8837+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
8838+ if (!__builtin_constant_p(n))
8839+ check_object_size(from, n, true);
8840+ n = __copy_to_user(to, from, n);
8841+ }
8842+ return n;
8843+}
8844+
8845+extern unsigned long copy_in_user(void __user *to, const void __user *from,
8846+ unsigned long n);
8847+
8848+#endif /* __powerpc64__ */
8849+
8850 extern unsigned long __clear_user(void __user *addr, unsigned long size);
8851
8852 static inline unsigned long clear_user(void __user *addr, unsigned long size)
8853diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
8854index 502cf69..53936a1 100644
8855--- a/arch/powerpc/kernel/Makefile
8856+++ b/arch/powerpc/kernel/Makefile
8857@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
8858 CFLAGS_btext.o += -fPIC
8859 endif
8860
8861+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8862+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8863+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8864+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8865+
8866 ifdef CONFIG_FUNCTION_TRACER
8867 # Do not trace early boot code
8868 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
8869@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
8870 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
8871 endif
8872
8873+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
8874+
8875 obj-y := cputable.o ptrace.o syscalls.o \
8876 irq.o align.o signal_32.o pmc.o vdso.o \
8877 process.o systbl.o idle.o \
8878diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
8879index 3e68d1c..72a5ee6 100644
8880--- a/arch/powerpc/kernel/exceptions-64e.S
8881+++ b/arch/powerpc/kernel/exceptions-64e.S
8882@@ -1010,6 +1010,7 @@ storage_fault_common:
8883 std r14,_DAR(r1)
8884 std r15,_DSISR(r1)
8885 addi r3,r1,STACK_FRAME_OVERHEAD
8886+ bl save_nvgprs
8887 mr r4,r14
8888 mr r5,r15
8889 ld r14,PACA_EXGEN+EX_R14(r13)
8890@@ -1018,8 +1019,7 @@ storage_fault_common:
8891 cmpdi r3,0
8892 bne- 1f
8893 b ret_from_except_lite
8894-1: bl save_nvgprs
8895- mr r5,r3
8896+1: mr r5,r3
8897 addi r3,r1,STACK_FRAME_OVERHEAD
8898 ld r4,_DAR(r1)
8899 bl bad_page_fault
8900diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8901index 9519e6b..13f6c38 100644
8902--- a/arch/powerpc/kernel/exceptions-64s.S
8903+++ b/arch/powerpc/kernel/exceptions-64s.S
8904@@ -1599,10 +1599,10 @@ handle_page_fault:
8905 11: ld r4,_DAR(r1)
8906 ld r5,_DSISR(r1)
8907 addi r3,r1,STACK_FRAME_OVERHEAD
8908+ bl save_nvgprs
8909 bl do_page_fault
8910 cmpdi r3,0
8911 beq+ 12f
8912- bl save_nvgprs
8913 mr r5,r3
8914 addi r3,r1,STACK_FRAME_OVERHEAD
8915 lwz r4,_DAR(r1)
8916diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
8917index 4509603..cdb491f 100644
8918--- a/arch/powerpc/kernel/irq.c
8919+++ b/arch/powerpc/kernel/irq.c
8920@@ -460,6 +460,8 @@ void migrate_irqs(void)
8921 }
8922 #endif
8923
8924+extern void gr_handle_kernel_exploit(void);
8925+
8926 static inline void check_stack_overflow(void)
8927 {
8928 #ifdef CONFIG_DEBUG_STACKOVERFLOW
8929@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
8930 pr_err("do_IRQ: stack overflow: %ld\n",
8931 sp - sizeof(struct thread_info));
8932 dump_stack();
8933+ gr_handle_kernel_exploit();
8934 }
8935 #endif
8936 }
8937diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8938index c94d2e0..992a9ce 100644
8939--- a/arch/powerpc/kernel/module_32.c
8940+++ b/arch/powerpc/kernel/module_32.c
8941@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8942 me->arch.core_plt_section = i;
8943 }
8944 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8945- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
8946+ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
8947 return -ENOEXEC;
8948 }
8949
8950@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
8951
8952 pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8953 /* Init, or core PLT? */
8954- if (location >= mod->module_core
8955- && location < mod->module_core + mod->core_size)
8956+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8957+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8958 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8959- else
8960+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8961+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8962 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8963+ else {
8964+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8965+ return ~0UL;
8966+ }
8967
8968 /* Find this entry, or if that fails, the next avail. entry */
8969 while (entry->jump[0]) {
8970@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
8971 }
8972 #ifdef CONFIG_DYNAMIC_FTRACE
8973 module->arch.tramp =
8974- do_plt_call(module->module_core,
8975+ do_plt_call(module->module_core_rx,
8976 (unsigned long)ftrace_caller,
8977 sechdrs, module);
8978 #endif
8979diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8980index b4cc7be..1fe8bb3 100644
8981--- a/arch/powerpc/kernel/process.c
8982+++ b/arch/powerpc/kernel/process.c
8983@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
8984 * Lookup NIP late so we have the best change of getting the
8985 * above info out without failing
8986 */
8987- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8988- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8989+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8990+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8991 #endif
8992 show_stack(current, (unsigned long *) regs->gpr[1]);
8993 if (!user_mode(regs))
8994@@ -1549,10 +1549,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8995 newsp = stack[0];
8996 ip = stack[STACK_FRAME_LR_SAVE];
8997 if (!firstframe || ip != lr) {
8998- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8999+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9000 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9001 if ((ip == rth) && curr_frame >= 0) {
9002- printk(" (%pS)",
9003+ printk(" (%pA)",
9004 (void *)current->ret_stack[curr_frame].ret);
9005 curr_frame--;
9006 }
9007@@ -1572,7 +1572,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9008 struct pt_regs *regs = (struct pt_regs *)
9009 (sp + STACK_FRAME_OVERHEAD);
9010 lr = regs->link;
9011- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9012+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9013 regs->trap, (void *)regs->nip, (void *)lr);
9014 firstframe = 1;
9015 }
9016@@ -1608,49 +1608,3 @@ void notrace __ppc64_runlatch_off(void)
9017 mtspr(SPRN_CTRLT, ctrl);
9018 }
9019 #endif /* CONFIG_PPC64 */
9020-
9021-unsigned long arch_align_stack(unsigned long sp)
9022-{
9023- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9024- sp -= get_random_int() & ~PAGE_MASK;
9025- return sp & ~0xf;
9026-}
9027-
9028-static inline unsigned long brk_rnd(void)
9029-{
9030- unsigned long rnd = 0;
9031-
9032- /* 8MB for 32bit, 1GB for 64bit */
9033- if (is_32bit_task())
9034- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9035- else
9036- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9037-
9038- return rnd << PAGE_SHIFT;
9039-}
9040-
9041-unsigned long arch_randomize_brk(struct mm_struct *mm)
9042-{
9043- unsigned long base = mm->brk;
9044- unsigned long ret;
9045-
9046-#ifdef CONFIG_PPC_STD_MMU_64
9047- /*
9048- * If we are using 1TB segments and we are allowed to randomise
9049- * the heap, we can put it above 1TB so it is backed by a 1TB
9050- * segment. Otherwise the heap will be in the bottom 1TB
9051- * which always uses 256MB segments and this may result in a
9052- * performance penalty.
9053- */
9054- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9055- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9056-#endif
9057-
9058- ret = PAGE_ALIGN(base + brk_rnd());
9059-
9060- if (ret < mm->brk)
9061- return mm->brk;
9062-
9063- return ret;
9064-}
9065-
9066diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9067index f21897b..28c0428 100644
9068--- a/arch/powerpc/kernel/ptrace.c
9069+++ b/arch/powerpc/kernel/ptrace.c
9070@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9071 return ret;
9072 }
9073
9074+#ifdef CONFIG_GRKERNSEC_SETXID
9075+extern void gr_delayed_cred_worker(void);
9076+#endif
9077+
9078 /*
9079 * We must return the syscall number to actually look up in the table.
9080 * This can be -1L to skip running any syscall at all.
9081@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9082
9083 secure_computing_strict(regs->gpr[0]);
9084
9085+#ifdef CONFIG_GRKERNSEC_SETXID
9086+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9087+ gr_delayed_cred_worker();
9088+#endif
9089+
9090 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9091 tracehook_report_syscall_entry(regs))
9092 /*
9093@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9094 {
9095 int step;
9096
9097+#ifdef CONFIG_GRKERNSEC_SETXID
9098+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9099+ gr_delayed_cred_worker();
9100+#endif
9101+
9102 audit_syscall_exit(regs);
9103
9104 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9105diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9106index b171001..4ac7ac5 100644
9107--- a/arch/powerpc/kernel/signal_32.c
9108+++ b/arch/powerpc/kernel/signal_32.c
9109@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9110 /* Save user registers on the stack */
9111 frame = &rt_sf->uc.uc_mcontext;
9112 addr = frame;
9113- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9114+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9115 sigret = 0;
9116 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9117 } else {
9118diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9119index 2cb0c94..c0c0bc9 100644
9120--- a/arch/powerpc/kernel/signal_64.c
9121+++ b/arch/powerpc/kernel/signal_64.c
9122@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9123 current->thread.fp_state.fpscr = 0;
9124
9125 /* Set up to return from userspace. */
9126- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9127+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9128 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9129 } else {
9130 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9131diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9132index e6595b7..24bde6e 100644
9133--- a/arch/powerpc/kernel/traps.c
9134+++ b/arch/powerpc/kernel/traps.c
9135@@ -36,6 +36,7 @@
9136 #include <linux/debugfs.h>
9137 #include <linux/ratelimit.h>
9138 #include <linux/context_tracking.h>
9139+#include <linux/uaccess.h>
9140
9141 #include <asm/emulated_ops.h>
9142 #include <asm/pgtable.h>
9143@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9144 return flags;
9145 }
9146
9147+extern void gr_handle_kernel_exploit(void);
9148+
9149 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9150 int signr)
9151 {
9152@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9153 panic("Fatal exception in interrupt");
9154 if (panic_on_oops)
9155 panic("Fatal exception");
9156+
9157+ gr_handle_kernel_exploit();
9158+
9159 do_exit(signr);
9160 }
9161
9162@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9163 enum ctx_state prev_state = exception_enter();
9164 unsigned int reason = get_reason(regs);
9165
9166+#ifdef CONFIG_PAX_REFCOUNT
9167+ unsigned int bkpt;
9168+ const struct exception_table_entry *entry;
9169+
9170+ if (reason & REASON_ILLEGAL) {
9171+ /* Check if PaX bad instruction */
9172+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9173+ current->thread.trap_nr = 0;
9174+ pax_report_refcount_overflow(regs);
9175+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9176+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9177+ regs->nip = entry->fixup;
9178+ return;
9179+ }
9180+ /* fixup_exception() could not handle */
9181+ goto bail;
9182+ }
9183+ }
9184+#endif
9185+
9186 /* We can now get here via a FP Unavailable exception if the core
9187 * has no FPU, in that case the reason flags will be 0 */
9188
9189diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9190index 305eb0d..accc5b40 100644
9191--- a/arch/powerpc/kernel/vdso.c
9192+++ b/arch/powerpc/kernel/vdso.c
9193@@ -34,6 +34,7 @@
9194 #include <asm/vdso.h>
9195 #include <asm/vdso_datapage.h>
9196 #include <asm/setup.h>
9197+#include <asm/mman.h>
9198
9199 #undef DEBUG
9200
9201@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9202 vdso_base = VDSO32_MBASE;
9203 #endif
9204
9205- current->mm->context.vdso_base = 0;
9206+ current->mm->context.vdso_base = ~0UL;
9207
9208 /* vDSO has a problem and was disabled, just don't "enable" it for the
9209 * process
9210@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9211 vdso_base = get_unmapped_area(NULL, vdso_base,
9212 (vdso_pages << PAGE_SHIFT) +
9213 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9214- 0, 0);
9215+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9216 if (IS_ERR_VALUE(vdso_base)) {
9217 rc = vdso_base;
9218 goto fail_mmapsem;
9219diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9220index c45eaab..5f41b57 100644
9221--- a/arch/powerpc/kvm/powerpc.c
9222+++ b/arch/powerpc/kvm/powerpc.c
9223@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9224 }
9225 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9226
9227-int kvm_arch_init(void *opaque)
9228+int kvm_arch_init(const void *opaque)
9229 {
9230 return 0;
9231 }
9232diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9233index 5eea6f3..5d10396 100644
9234--- a/arch/powerpc/lib/usercopy_64.c
9235+++ b/arch/powerpc/lib/usercopy_64.c
9236@@ -9,22 +9,6 @@
9237 #include <linux/module.h>
9238 #include <asm/uaccess.h>
9239
9240-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9241-{
9242- if (likely(access_ok(VERIFY_READ, from, n)))
9243- n = __copy_from_user(to, from, n);
9244- else
9245- memset(to, 0, n);
9246- return n;
9247-}
9248-
9249-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9250-{
9251- if (likely(access_ok(VERIFY_WRITE, to, n)))
9252- n = __copy_to_user(to, from, n);
9253- return n;
9254-}
9255-
9256 unsigned long copy_in_user(void __user *to, const void __user *from,
9257 unsigned long n)
9258 {
9259@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9260 return n;
9261 }
9262
9263-EXPORT_SYMBOL(copy_from_user);
9264-EXPORT_SYMBOL(copy_to_user);
9265 EXPORT_SYMBOL(copy_in_user);
9266
9267diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9268index 6154b0a..4de2b19 100644
9269--- a/arch/powerpc/mm/fault.c
9270+++ b/arch/powerpc/mm/fault.c
9271@@ -33,6 +33,10 @@
9272 #include <linux/ratelimit.h>
9273 #include <linux/context_tracking.h>
9274 #include <linux/hugetlb.h>
9275+#include <linux/slab.h>
9276+#include <linux/pagemap.h>
9277+#include <linux/compiler.h>
9278+#include <linux/unistd.h>
9279
9280 #include <asm/firmware.h>
9281 #include <asm/page.h>
9282@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9283 }
9284 #endif
9285
9286+#ifdef CONFIG_PAX_PAGEEXEC
9287+/*
9288+ * PaX: decide what to do with offenders (regs->nip = fault address)
9289+ *
9290+ * returns 1 when task should be killed
9291+ */
9292+static int pax_handle_fetch_fault(struct pt_regs *regs)
9293+{
9294+ return 1;
9295+}
9296+
9297+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9298+{
9299+ unsigned long i;
9300+
9301+ printk(KERN_ERR "PAX: bytes at PC: ");
9302+ for (i = 0; i < 5; i++) {
9303+ unsigned int c;
9304+ if (get_user(c, (unsigned int __user *)pc+i))
9305+ printk(KERN_CONT "???????? ");
9306+ else
9307+ printk(KERN_CONT "%08x ", c);
9308+ }
9309+ printk("\n");
9310+}
9311+#endif
9312+
9313 /*
9314 * Check whether the instruction at regs->nip is a store using
9315 * an update addressing form which will update r1.
9316@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9317 * indicate errors in DSISR but can validly be set in SRR1.
9318 */
9319 if (trap == 0x400)
9320- error_code &= 0x48200000;
9321+ error_code &= 0x58200000;
9322 else
9323 is_write = error_code & DSISR_ISSTORE;
9324 #else
9325@@ -383,7 +414,7 @@ good_area:
9326 * "undefined". Of those that can be set, this is the only
9327 * one which seems bad.
9328 */
9329- if (error_code & 0x10000000)
9330+ if (error_code & DSISR_GUARDED)
9331 /* Guarded storage error. */
9332 goto bad_area;
9333 #endif /* CONFIG_8xx */
9334@@ -398,7 +429,7 @@ good_area:
9335 * processors use the same I/D cache coherency mechanism
9336 * as embedded.
9337 */
9338- if (error_code & DSISR_PROTFAULT)
9339+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9340 goto bad_area;
9341 #endif /* CONFIG_PPC_STD_MMU */
9342
9343@@ -490,6 +521,23 @@ bad_area:
9344 bad_area_nosemaphore:
9345 /* User mode accesses cause a SIGSEGV */
9346 if (user_mode(regs)) {
9347+
9348+#ifdef CONFIG_PAX_PAGEEXEC
9349+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9350+#ifdef CONFIG_PPC_STD_MMU
9351+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9352+#else
9353+ if (is_exec && regs->nip == address) {
9354+#endif
9355+ switch (pax_handle_fetch_fault(regs)) {
9356+ }
9357+
9358+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9359+ do_group_exit(SIGKILL);
9360+ }
9361+ }
9362+#endif
9363+
9364 _exception(SIGSEGV, regs, code, address);
9365 goto bail;
9366 }
9367diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9368index cb8bdbe..cde4bc7 100644
9369--- a/arch/powerpc/mm/mmap.c
9370+++ b/arch/powerpc/mm/mmap.c
9371@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9372 return sysctl_legacy_va_layout;
9373 }
9374
9375-static unsigned long mmap_rnd(void)
9376+static unsigned long mmap_rnd(struct mm_struct *mm)
9377 {
9378 unsigned long rnd = 0;
9379
9380+#ifdef CONFIG_PAX_RANDMMAP
9381+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9382+#endif
9383+
9384 if (current->flags & PF_RANDOMIZE) {
9385 /* 8MB for 32bit, 1GB for 64bit */
9386 if (is_32bit_task())
9387@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9388 return rnd << PAGE_SHIFT;
9389 }
9390
9391-static inline unsigned long mmap_base(void)
9392+static inline unsigned long mmap_base(struct mm_struct *mm)
9393 {
9394 unsigned long gap = rlimit(RLIMIT_STACK);
9395
9396@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9397 else if (gap > MAX_GAP)
9398 gap = MAX_GAP;
9399
9400- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9401+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9402 }
9403
9404 /*
9405@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9406 */
9407 if (mmap_is_legacy()) {
9408 mm->mmap_base = TASK_UNMAPPED_BASE;
9409+
9410+#ifdef CONFIG_PAX_RANDMMAP
9411+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9412+ mm->mmap_base += mm->delta_mmap;
9413+#endif
9414+
9415 mm->get_unmapped_area = arch_get_unmapped_area;
9416 } else {
9417- mm->mmap_base = mmap_base();
9418+ mm->mmap_base = mmap_base(mm);
9419+
9420+#ifdef CONFIG_PAX_RANDMMAP
9421+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9422+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9423+#endif
9424+
9425 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9426 }
9427 }
9428diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9429index ded0ea1..f213a9b 100644
9430--- a/arch/powerpc/mm/slice.c
9431+++ b/arch/powerpc/mm/slice.c
9432@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9433 if ((mm->task_size - len) < addr)
9434 return 0;
9435 vma = find_vma(mm, addr);
9436- return (!vma || (addr + len) <= vma->vm_start);
9437+ return check_heap_stack_gap(vma, addr, len, 0);
9438 }
9439
9440 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9441@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9442 info.align_offset = 0;
9443
9444 addr = TASK_UNMAPPED_BASE;
9445+
9446+#ifdef CONFIG_PAX_RANDMMAP
9447+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9448+ addr += mm->delta_mmap;
9449+#endif
9450+
9451 while (addr < TASK_SIZE) {
9452 info.low_limit = addr;
9453 if (!slice_scan_available(addr, available, 1, &addr))
9454@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
9455 if (fixed && addr > (mm->task_size - len))
9456 return -ENOMEM;
9457
9458+#ifdef CONFIG_PAX_RANDMMAP
9459+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
9460+ addr = 0;
9461+#endif
9462+
9463 /* If hint, make sure it matches our alignment restrictions */
9464 if (!fixed && addr) {
9465 addr = _ALIGN_UP(addr, 1ul << pshift);
9466diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9467index f223875..94170e4 100644
9468--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9469+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
9470@@ -399,8 +399,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
9471 }
9472
9473 static struct pci_ops scc_pciex_pci_ops = {
9474- scc_pciex_read_config,
9475- scc_pciex_write_config,
9476+ .read = scc_pciex_read_config,
9477+ .write = scc_pciex_write_config,
9478 };
9479
9480 static void pciex_clear_intr_all(unsigned int __iomem *base)
9481diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
9482index d966bbe..372124a 100644
9483--- a/arch/powerpc/platforms/cell/spufs/file.c
9484+++ b/arch/powerpc/platforms/cell/spufs/file.c
9485@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9486 return VM_FAULT_NOPAGE;
9487 }
9488
9489-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
9490+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
9491 unsigned long address,
9492- void *buf, int len, int write)
9493+ void *buf, size_t len, int write)
9494 {
9495 struct spu_context *ctx = vma->vm_file->private_data;
9496 unsigned long offset = address - vma->vm_start;
9497diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
9498index fa934fe..c296056 100644
9499--- a/arch/s390/include/asm/atomic.h
9500+++ b/arch/s390/include/asm/atomic.h
9501@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
9502 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
9503 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9504
9505+#define atomic64_read_unchecked(v) atomic64_read(v)
9506+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9507+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9508+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9509+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9510+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9511+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9512+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9513+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9514+
9515 #endif /* __ARCH_S390_ATOMIC__ */
9516diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
9517index 8d72471..5322500 100644
9518--- a/arch/s390/include/asm/barrier.h
9519+++ b/arch/s390/include/asm/barrier.h
9520@@ -42,7 +42,7 @@
9521 do { \
9522 compiletime_assert_atomic_type(*p); \
9523 barrier(); \
9524- ACCESS_ONCE(*p) = (v); \
9525+ ACCESS_ONCE_RW(*p) = (v); \
9526 } while (0)
9527
9528 #define smp_load_acquire(p) \
9529diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
9530index 4d7ccac..d03d0ad 100644
9531--- a/arch/s390/include/asm/cache.h
9532+++ b/arch/s390/include/asm/cache.h
9533@@ -9,8 +9,10 @@
9534 #ifndef __ARCH_S390_CACHE_H
9535 #define __ARCH_S390_CACHE_H
9536
9537-#define L1_CACHE_BYTES 256
9538+#include <linux/const.h>
9539+
9540 #define L1_CACHE_SHIFT 8
9541+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9542 #define NET_SKB_PAD 32
9543
9544 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9545diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
9546index f6e43d3..5f57681 100644
9547--- a/arch/s390/include/asm/elf.h
9548+++ b/arch/s390/include/asm/elf.h
9549@@ -163,8 +163,14 @@ extern unsigned int vdso_enabled;
9550 the loader. We need to make sure that it is out of the way of the program
9551 that it will "exec", and that there is sufficient room for the brk. */
9552
9553-extern unsigned long randomize_et_dyn(unsigned long base);
9554-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
9555+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
9556+
9557+#ifdef CONFIG_PAX_ASLR
9558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
9559+
9560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
9562+#endif
9563
9564 /* This yields a mask that user programs can use to figure out what
9565 instruction set this CPU supports. */
9566@@ -223,9 +229,6 @@ struct linux_binprm;
9567 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
9568 int arch_setup_additional_pages(struct linux_binprm *, int);
9569
9570-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
9571-#define arch_randomize_brk arch_randomize_brk
9572-
9573 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vxrs);
9574
9575 #endif
9576diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
9577index c4a93d6..4d2a9b4 100644
9578--- a/arch/s390/include/asm/exec.h
9579+++ b/arch/s390/include/asm/exec.h
9580@@ -7,6 +7,6 @@
9581 #ifndef __ASM_EXEC_H
9582 #define __ASM_EXEC_H
9583
9584-extern unsigned long arch_align_stack(unsigned long sp);
9585+#define arch_align_stack(x) ((x) & ~0xfUL)
9586
9587 #endif /* __ASM_EXEC_H */
9588diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
9589index cd4c68e..6764641 100644
9590--- a/arch/s390/include/asm/uaccess.h
9591+++ b/arch/s390/include/asm/uaccess.h
9592@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
9593 __range_ok((unsigned long)(addr), (size)); \
9594 })
9595
9596+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9597 #define access_ok(type, addr, size) __access_ok(addr, size)
9598
9599 /*
9600@@ -275,6 +276,10 @@ static inline unsigned long __must_check
9601 copy_to_user(void __user *to, const void *from, unsigned long n)
9602 {
9603 might_fault();
9604+
9605+ if ((long)n < 0)
9606+ return n;
9607+
9608 return __copy_to_user(to, from, n);
9609 }
9610
9611@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
9612 static inline unsigned long __must_check
9613 copy_from_user(void *to, const void __user *from, unsigned long n)
9614 {
9615- unsigned int sz = __compiletime_object_size(to);
9616+ size_t sz = __compiletime_object_size(to);
9617
9618 might_fault();
9619- if (unlikely(sz != -1 && sz < n)) {
9620+
9621+ if ((long)n < 0)
9622+ return n;
9623+
9624+ if (unlikely(sz != (size_t)-1 && sz < n)) {
9625 copy_from_user_overflow();
9626 return n;
9627 }
9628diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
9629index 409d152..d90d368 100644
9630--- a/arch/s390/kernel/module.c
9631+++ b/arch/s390/kernel/module.c
9632@@ -165,11 +165,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
9633
9634 /* Increase core size by size of got & plt and set start
9635 offsets for got and plt. */
9636- me->core_size = ALIGN(me->core_size, 4);
9637- me->arch.got_offset = me->core_size;
9638- me->core_size += me->arch.got_size;
9639- me->arch.plt_offset = me->core_size;
9640- me->core_size += me->arch.plt_size;
9641+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
9642+ me->arch.got_offset = me->core_size_rw;
9643+ me->core_size_rw += me->arch.got_size;
9644+ me->arch.plt_offset = me->core_size_rx;
9645+ me->core_size_rx += me->arch.plt_size;
9646 return 0;
9647 }
9648
9649@@ -285,7 +285,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9650 if (info->got_initialized == 0) {
9651 Elf_Addr *gotent;
9652
9653- gotent = me->module_core + me->arch.got_offset +
9654+ gotent = me->module_core_rw + me->arch.got_offset +
9655 info->got_offset;
9656 *gotent = val;
9657 info->got_initialized = 1;
9658@@ -308,7 +308,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9659 rc = apply_rela_bits(loc, val, 0, 64, 0);
9660 else if (r_type == R_390_GOTENT ||
9661 r_type == R_390_GOTPLTENT) {
9662- val += (Elf_Addr) me->module_core - loc;
9663+ val += (Elf_Addr) me->module_core_rw - loc;
9664 rc = apply_rela_bits(loc, val, 1, 32, 1);
9665 }
9666 break;
9667@@ -321,7 +321,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9668 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
9669 if (info->plt_initialized == 0) {
9670 unsigned int *ip;
9671- ip = me->module_core + me->arch.plt_offset +
9672+ ip = me->module_core_rx + me->arch.plt_offset +
9673 info->plt_offset;
9674 #ifndef CONFIG_64BIT
9675 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
9676@@ -346,7 +346,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9677 val - loc + 0xffffUL < 0x1ffffeUL) ||
9678 (r_type == R_390_PLT32DBL &&
9679 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
9680- val = (Elf_Addr) me->module_core +
9681+ val = (Elf_Addr) me->module_core_rx +
9682 me->arch.plt_offset +
9683 info->plt_offset;
9684 val += rela->r_addend - loc;
9685@@ -368,7 +368,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9686 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
9687 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
9688 val = val + rela->r_addend -
9689- ((Elf_Addr) me->module_core + me->arch.got_offset);
9690+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
9691 if (r_type == R_390_GOTOFF16)
9692 rc = apply_rela_bits(loc, val, 0, 16, 0);
9693 else if (r_type == R_390_GOTOFF32)
9694@@ -378,7 +378,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
9695 break;
9696 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
9697 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
9698- val = (Elf_Addr) me->module_core + me->arch.got_offset +
9699+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
9700 rela->r_addend - loc;
9701 if (r_type == R_390_GOTPC)
9702 rc = apply_rela_bits(loc, val, 1, 32, 0);
9703diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
9704index aa7a839..6c2a916 100644
9705--- a/arch/s390/kernel/process.c
9706+++ b/arch/s390/kernel/process.c
9707@@ -219,37 +219,3 @@ unsigned long get_wchan(struct task_struct *p)
9708 }
9709 return 0;
9710 }
9711-
9712-unsigned long arch_align_stack(unsigned long sp)
9713-{
9714- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9715- sp -= get_random_int() & ~PAGE_MASK;
9716- return sp & ~0xf;
9717-}
9718-
9719-static inline unsigned long brk_rnd(void)
9720-{
9721- /* 8MB for 32bit, 1GB for 64bit */
9722- if (is_32bit_task())
9723- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
9724- else
9725- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
9726-}
9727-
9728-unsigned long arch_randomize_brk(struct mm_struct *mm)
9729-{
9730- unsigned long ret;
9731-
9732- ret = PAGE_ALIGN(mm->brk + brk_rnd());
9733- return (ret > mm->brk) ? ret : mm->brk;
9734-}
9735-
9736-unsigned long randomize_et_dyn(unsigned long base)
9737-{
9738- unsigned long ret;
9739-
9740- if (!(current->flags & PF_RANDOMIZE))
9741- return base;
9742- ret = PAGE_ALIGN(base + brk_rnd());
9743- return (ret > base) ? ret : base;
9744-}
9745diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
9746index 9b436c2..54fbf0a 100644
9747--- a/arch/s390/mm/mmap.c
9748+++ b/arch/s390/mm/mmap.c
9749@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9750 */
9751 if (mmap_is_legacy()) {
9752 mm->mmap_base = mmap_base_legacy();
9753+
9754+#ifdef CONFIG_PAX_RANDMMAP
9755+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9756+ mm->mmap_base += mm->delta_mmap;
9757+#endif
9758+
9759 mm->get_unmapped_area = arch_get_unmapped_area;
9760 } else {
9761 mm->mmap_base = mmap_base();
9762+
9763+#ifdef CONFIG_PAX_RANDMMAP
9764+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9765+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9766+#endif
9767+
9768 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9769 }
9770 }
9771@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9772 */
9773 if (mmap_is_legacy()) {
9774 mm->mmap_base = mmap_base_legacy();
9775+
9776+#ifdef CONFIG_PAX_RANDMMAP
9777+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9778+ mm->mmap_base += mm->delta_mmap;
9779+#endif
9780+
9781 mm->get_unmapped_area = s390_get_unmapped_area;
9782 } else {
9783 mm->mmap_base = mmap_base();
9784+
9785+#ifdef CONFIG_PAX_RANDMMAP
9786+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9787+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9788+#endif
9789+
9790 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
9791 }
9792 }
9793diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
9794index ae3d59f..f65f075 100644
9795--- a/arch/score/include/asm/cache.h
9796+++ b/arch/score/include/asm/cache.h
9797@@ -1,7 +1,9 @@
9798 #ifndef _ASM_SCORE_CACHE_H
9799 #define _ASM_SCORE_CACHE_H
9800
9801+#include <linux/const.h>
9802+
9803 #define L1_CACHE_SHIFT 4
9804-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9805+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9806
9807 #endif /* _ASM_SCORE_CACHE_H */
9808diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
9809index f9f3cd5..58ff438 100644
9810--- a/arch/score/include/asm/exec.h
9811+++ b/arch/score/include/asm/exec.h
9812@@ -1,6 +1,6 @@
9813 #ifndef _ASM_SCORE_EXEC_H
9814 #define _ASM_SCORE_EXEC_H
9815
9816-extern unsigned long arch_align_stack(unsigned long sp);
9817+#define arch_align_stack(x) (x)
9818
9819 #endif /* _ASM_SCORE_EXEC_H */
9820diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
9821index a1519ad3..e8ac1ff 100644
9822--- a/arch/score/kernel/process.c
9823+++ b/arch/score/kernel/process.c
9824@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
9825
9826 return task_pt_regs(task)->cp0_epc;
9827 }
9828-
9829-unsigned long arch_align_stack(unsigned long sp)
9830-{
9831- return sp;
9832-}
9833diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
9834index ef9e555..331bd29 100644
9835--- a/arch/sh/include/asm/cache.h
9836+++ b/arch/sh/include/asm/cache.h
9837@@ -9,10 +9,11 @@
9838 #define __ASM_SH_CACHE_H
9839 #ifdef __KERNEL__
9840
9841+#include <linux/const.h>
9842 #include <linux/init.h>
9843 #include <cpu/cache.h>
9844
9845-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9846+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9847
9848 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9849
9850diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
9851index 6777177..cb5e44f 100644
9852--- a/arch/sh/mm/mmap.c
9853+++ b/arch/sh/mm/mmap.c
9854@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9855 struct mm_struct *mm = current->mm;
9856 struct vm_area_struct *vma;
9857 int do_colour_align;
9858+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9859 struct vm_unmapped_area_info info;
9860
9861 if (flags & MAP_FIXED) {
9862@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9863 if (filp || (flags & MAP_SHARED))
9864 do_colour_align = 1;
9865
9866+#ifdef CONFIG_PAX_RANDMMAP
9867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9868+#endif
9869+
9870 if (addr) {
9871 if (do_colour_align)
9872 addr = COLOUR_ALIGN(addr, pgoff);
9873@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
9874 addr = PAGE_ALIGN(addr);
9875
9876 vma = find_vma(mm, addr);
9877- if (TASK_SIZE - len >= addr &&
9878- (!vma || addr + len <= vma->vm_start))
9879+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9880 return addr;
9881 }
9882
9883 info.flags = 0;
9884 info.length = len;
9885- info.low_limit = TASK_UNMAPPED_BASE;
9886+ info.low_limit = mm->mmap_base;
9887 info.high_limit = TASK_SIZE;
9888 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
9889 info.align_offset = pgoff << PAGE_SHIFT;
9890@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9891 struct mm_struct *mm = current->mm;
9892 unsigned long addr = addr0;
9893 int do_colour_align;
9894+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9895 struct vm_unmapped_area_info info;
9896
9897 if (flags & MAP_FIXED) {
9898@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9899 if (filp || (flags & MAP_SHARED))
9900 do_colour_align = 1;
9901
9902+#ifdef CONFIG_PAX_RANDMMAP
9903+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9904+#endif
9905+
9906 /* requesting a specific address */
9907 if (addr) {
9908 if (do_colour_align)
9909@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9910 addr = PAGE_ALIGN(addr);
9911
9912 vma = find_vma(mm, addr);
9913- if (TASK_SIZE - len >= addr &&
9914- (!vma || addr + len <= vma->vm_start))
9915+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9916 return addr;
9917 }
9918
9919@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9920 VM_BUG_ON(addr != -ENOMEM);
9921 info.flags = 0;
9922 info.low_limit = TASK_UNMAPPED_BASE;
9923+
9924+#ifdef CONFIG_PAX_RANDMMAP
9925+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9926+ info.low_limit += mm->delta_mmap;
9927+#endif
9928+
9929 info.high_limit = TASK_SIZE;
9930 addr = vm_unmapped_area(&info);
9931 }
9932diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
9933index 4082749..fd97781 100644
9934--- a/arch/sparc/include/asm/atomic_64.h
9935+++ b/arch/sparc/include/asm/atomic_64.h
9936@@ -15,18 +15,38 @@
9937 #define ATOMIC64_INIT(i) { (i) }
9938
9939 #define atomic_read(v) ACCESS_ONCE((v)->counter)
9940+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9941+{
9942+ return ACCESS_ONCE(v->counter);
9943+}
9944 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
9945+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9946+{
9947+ return ACCESS_ONCE(v->counter);
9948+}
9949
9950 #define atomic_set(v, i) (((v)->counter) = i)
9951+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9952+{
9953+ v->counter = i;
9954+}
9955 #define atomic64_set(v, i) (((v)->counter) = i)
9956+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9957+{
9958+ v->counter = i;
9959+}
9960
9961-#define ATOMIC_OP(op) \
9962-void atomic_##op(int, atomic_t *); \
9963-void atomic64_##op(long, atomic64_t *);
9964+#define __ATOMIC_OP(op, suffix) \
9965+void atomic_##op##suffix(int, atomic##suffix##_t *); \
9966+void atomic64_##op##suffix(long, atomic64##suffix##_t *);
9967
9968-#define ATOMIC_OP_RETURN(op) \
9969-int atomic_##op##_return(int, atomic_t *); \
9970-long atomic64_##op##_return(long, atomic64_t *);
9971+#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
9972+
9973+#define __ATOMIC_OP_RETURN(op, suffix) \
9974+int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
9975+long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
9976+
9977+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
9978
9979 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
9980
9981@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
9982
9983 #undef ATOMIC_OPS
9984 #undef ATOMIC_OP_RETURN
9985+#undef __ATOMIC_OP_RETURN
9986 #undef ATOMIC_OP
9987+#undef __ATOMIC_OP
9988
9989 #define atomic_dec_return(v) atomic_sub_return(1, v)
9990 #define atomic64_dec_return(v) atomic64_sub_return(1, v)
9991
9992 #define atomic_inc_return(v) atomic_add_return(1, v)
9993+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9994+{
9995+ return atomic_add_return_unchecked(1, v);
9996+}
9997 #define atomic64_inc_return(v) atomic64_add_return(1, v)
9998+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9999+{
10000+ return atomic64_add_return_unchecked(1, v);
10001+}
10002
10003 /*
10004 * atomic_inc_and_test - increment and test
10005@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
10006 * other cases.
10007 */
10008 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10009+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10010+{
10011+ return atomic_inc_return_unchecked(v) == 0;
10012+}
10013 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10014
10015 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
10016@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
10017 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
10018
10019 #define atomic_inc(v) atomic_add(1, v)
10020+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10021+{
10022+ atomic_add_unchecked(1, v);
10023+}
10024 #define atomic64_inc(v) atomic64_add(1, v)
10025+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10026+{
10027+ atomic64_add_unchecked(1, v);
10028+}
10029
10030 #define atomic_dec(v) atomic_sub(1, v)
10031+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10032+{
10033+ atomic_sub_unchecked(1, v);
10034+}
10035 #define atomic64_dec(v) atomic64_sub(1, v)
10036+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10037+{
10038+ atomic64_sub_unchecked(1, v);
10039+}
10040
10041 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
10042 #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
10043
10044 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10045+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10046+{
10047+ return cmpxchg(&v->counter, old, new);
10048+}
10049 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10050+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10051+{
10052+ return xchg(&v->counter, new);
10053+}
10054
10055 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10056 {
10057- int c, old;
10058+ int c, old, new;
10059 c = atomic_read(v);
10060 for (;;) {
10061- if (unlikely(c == (u)))
10062+ if (unlikely(c == u))
10063 break;
10064- old = atomic_cmpxchg((v), c, c + (a));
10065+
10066+ asm volatile("addcc %2, %0, %0\n"
10067+
10068+#ifdef CONFIG_PAX_REFCOUNT
10069+ "tvs %%icc, 6\n"
10070+#endif
10071+
10072+ : "=r" (new)
10073+ : "0" (c), "ir" (a)
10074+ : "cc");
10075+
10076+ old = atomic_cmpxchg(v, c, new);
10077 if (likely(old == c))
10078 break;
10079 c = old;
10080@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10081 #define atomic64_cmpxchg(v, o, n) \
10082 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10083 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10084+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10085+{
10086+ return xchg(&v->counter, new);
10087+}
10088
10089 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10090 {
10091- long c, old;
10092+ long c, old, new;
10093 c = atomic64_read(v);
10094 for (;;) {
10095- if (unlikely(c == (u)))
10096+ if (unlikely(c == u))
10097 break;
10098- old = atomic64_cmpxchg((v), c, c + (a));
10099+
10100+ asm volatile("addcc %2, %0, %0\n"
10101+
10102+#ifdef CONFIG_PAX_REFCOUNT
10103+ "tvs %%xcc, 6\n"
10104+#endif
10105+
10106+ : "=r" (new)
10107+ : "0" (c), "ir" (a)
10108+ : "cc");
10109+
10110+ old = atomic64_cmpxchg(v, c, new);
10111 if (likely(old == c))
10112 break;
10113 c = old;
10114 }
10115- return c != (u);
10116+ return c != u;
10117 }
10118
10119 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10120diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10121index 7664894..45a974b 100644
10122--- a/arch/sparc/include/asm/barrier_64.h
10123+++ b/arch/sparc/include/asm/barrier_64.h
10124@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10125 do { \
10126 compiletime_assert_atomic_type(*p); \
10127 barrier(); \
10128- ACCESS_ONCE(*p) = (v); \
10129+ ACCESS_ONCE_RW(*p) = (v); \
10130 } while (0)
10131
10132 #define smp_load_acquire(p) \
10133diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10134index 5bb6991..5c2132e 100644
10135--- a/arch/sparc/include/asm/cache.h
10136+++ b/arch/sparc/include/asm/cache.h
10137@@ -7,10 +7,12 @@
10138 #ifndef _SPARC_CACHE_H
10139 #define _SPARC_CACHE_H
10140
10141+#include <linux/const.h>
10142+
10143 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10144
10145 #define L1_CACHE_SHIFT 5
10146-#define L1_CACHE_BYTES 32
10147+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10148
10149 #ifdef CONFIG_SPARC32
10150 #define SMP_CACHE_BYTES_SHIFT 5
10151diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10152index a24e41f..47677ff 100644
10153--- a/arch/sparc/include/asm/elf_32.h
10154+++ b/arch/sparc/include/asm/elf_32.h
10155@@ -114,6 +114,13 @@ typedef struct {
10156
10157 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10158
10159+#ifdef CONFIG_PAX_ASLR
10160+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10161+
10162+#define PAX_DELTA_MMAP_LEN 16
10163+#define PAX_DELTA_STACK_LEN 16
10164+#endif
10165+
10166 /* This yields a mask that user programs can use to figure out what
10167 instruction set this cpu supports. This can NOT be done in userspace
10168 on Sparc. */
10169diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10170index 370ca1e..d4f4a98 100644
10171--- a/arch/sparc/include/asm/elf_64.h
10172+++ b/arch/sparc/include/asm/elf_64.h
10173@@ -189,6 +189,13 @@ typedef struct {
10174 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10175 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10176
10177+#ifdef CONFIG_PAX_ASLR
10178+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10179+
10180+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10181+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10182+#endif
10183+
10184 extern unsigned long sparc64_elf_hwcap;
10185 #define ELF_HWCAP sparc64_elf_hwcap
10186
10187diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10188index a3890da..f6a408e 100644
10189--- a/arch/sparc/include/asm/pgalloc_32.h
10190+++ b/arch/sparc/include/asm/pgalloc_32.h
10191@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10192 }
10193
10194 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10195+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10196
10197 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10198 unsigned long address)
10199diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10200index 5e31871..13469c6 100644
10201--- a/arch/sparc/include/asm/pgalloc_64.h
10202+++ b/arch/sparc/include/asm/pgalloc_64.h
10203@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10204 }
10205
10206 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10207+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10208
10209 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10210 {
10211@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10212 }
10213
10214 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10215+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10216
10217 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10218 {
10219diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10220index 59ba6f6..4518128 100644
10221--- a/arch/sparc/include/asm/pgtable.h
10222+++ b/arch/sparc/include/asm/pgtable.h
10223@@ -5,4 +5,8 @@
10224 #else
10225 #include <asm/pgtable_32.h>
10226 #endif
10227+
10228+#define ktla_ktva(addr) (addr)
10229+#define ktva_ktla(addr) (addr)
10230+
10231 #endif
10232diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10233index b9b91ae..950b91e 100644
10234--- a/arch/sparc/include/asm/pgtable_32.h
10235+++ b/arch/sparc/include/asm/pgtable_32.h
10236@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10237 #define PAGE_SHARED SRMMU_PAGE_SHARED
10238 #define PAGE_COPY SRMMU_PAGE_COPY
10239 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10240+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10241+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10242+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10243 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10244
10245 /* Top-level page directory - dummy used by init-mm.
10246@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10247
10248 /* xwr */
10249 #define __P000 PAGE_NONE
10250-#define __P001 PAGE_READONLY
10251-#define __P010 PAGE_COPY
10252-#define __P011 PAGE_COPY
10253+#define __P001 PAGE_READONLY_NOEXEC
10254+#define __P010 PAGE_COPY_NOEXEC
10255+#define __P011 PAGE_COPY_NOEXEC
10256 #define __P100 PAGE_READONLY
10257 #define __P101 PAGE_READONLY
10258 #define __P110 PAGE_COPY
10259 #define __P111 PAGE_COPY
10260
10261 #define __S000 PAGE_NONE
10262-#define __S001 PAGE_READONLY
10263-#define __S010 PAGE_SHARED
10264-#define __S011 PAGE_SHARED
10265+#define __S001 PAGE_READONLY_NOEXEC
10266+#define __S010 PAGE_SHARED_NOEXEC
10267+#define __S011 PAGE_SHARED_NOEXEC
10268 #define __S100 PAGE_READONLY
10269 #define __S101 PAGE_READONLY
10270 #define __S110 PAGE_SHARED
10271diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10272index 79da178..c2eede8 100644
10273--- a/arch/sparc/include/asm/pgtsrmmu.h
10274+++ b/arch/sparc/include/asm/pgtsrmmu.h
10275@@ -115,6 +115,11 @@
10276 SRMMU_EXEC | SRMMU_REF)
10277 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10278 SRMMU_EXEC | SRMMU_REF)
10279+
10280+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10281+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10282+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10283+
10284 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10285 SRMMU_DIRTY | SRMMU_REF)
10286
10287diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10288index 29d64b1..4272fe8 100644
10289--- a/arch/sparc/include/asm/setup.h
10290+++ b/arch/sparc/include/asm/setup.h
10291@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10292 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10293
10294 /* init_64.c */
10295-extern atomic_t dcpage_flushes;
10296-extern atomic_t dcpage_flushes_xcall;
10297+extern atomic_unchecked_t dcpage_flushes;
10298+extern atomic_unchecked_t dcpage_flushes_xcall;
10299
10300 extern int sysctl_tsb_ratio;
10301 #endif
10302diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10303index 9689176..63c18ea 100644
10304--- a/arch/sparc/include/asm/spinlock_64.h
10305+++ b/arch/sparc/include/asm/spinlock_64.h
10306@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10307
10308 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10309
10310-static void inline arch_read_lock(arch_rwlock_t *lock)
10311+static inline void arch_read_lock(arch_rwlock_t *lock)
10312 {
10313 unsigned long tmp1, tmp2;
10314
10315 __asm__ __volatile__ (
10316 "1: ldsw [%2], %0\n"
10317 " brlz,pn %0, 2f\n"
10318-"4: add %0, 1, %1\n"
10319+"4: addcc %0, 1, %1\n"
10320+
10321+#ifdef CONFIG_PAX_REFCOUNT
10322+" tvs %%icc, 6\n"
10323+#endif
10324+
10325 " cas [%2], %0, %1\n"
10326 " cmp %0, %1\n"
10327 " bne,pn %%icc, 1b\n"
10328@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10329 " .previous"
10330 : "=&r" (tmp1), "=&r" (tmp2)
10331 : "r" (lock)
10332- : "memory");
10333+ : "memory", "cc");
10334 }
10335
10336-static int inline arch_read_trylock(arch_rwlock_t *lock)
10337+static inline int arch_read_trylock(arch_rwlock_t *lock)
10338 {
10339 int tmp1, tmp2;
10340
10341@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10342 "1: ldsw [%2], %0\n"
10343 " brlz,a,pn %0, 2f\n"
10344 " mov 0, %0\n"
10345-" add %0, 1, %1\n"
10346+" addcc %0, 1, %1\n"
10347+
10348+#ifdef CONFIG_PAX_REFCOUNT
10349+" tvs %%icc, 6\n"
10350+#endif
10351+
10352 " cas [%2], %0, %1\n"
10353 " cmp %0, %1\n"
10354 " bne,pn %%icc, 1b\n"
10355@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10356 return tmp1;
10357 }
10358
10359-static void inline arch_read_unlock(arch_rwlock_t *lock)
10360+static inline void arch_read_unlock(arch_rwlock_t *lock)
10361 {
10362 unsigned long tmp1, tmp2;
10363
10364 __asm__ __volatile__(
10365 "1: lduw [%2], %0\n"
10366-" sub %0, 1, %1\n"
10367+" subcc %0, 1, %1\n"
10368+
10369+#ifdef CONFIG_PAX_REFCOUNT
10370+" tvs %%icc, 6\n"
10371+#endif
10372+
10373 " cas [%2], %0, %1\n"
10374 " cmp %0, %1\n"
10375 " bne,pn %%xcc, 1b\n"
10376@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10377 : "memory");
10378 }
10379
10380-static void inline arch_write_lock(arch_rwlock_t *lock)
10381+static inline void arch_write_lock(arch_rwlock_t *lock)
10382 {
10383 unsigned long mask, tmp1, tmp2;
10384
10385@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10386 : "memory");
10387 }
10388
10389-static void inline arch_write_unlock(arch_rwlock_t *lock)
10390+static inline void arch_write_unlock(arch_rwlock_t *lock)
10391 {
10392 __asm__ __volatile__(
10393 " stw %%g0, [%0]"
10394@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10395 : "memory");
10396 }
10397
10398-static int inline arch_write_trylock(arch_rwlock_t *lock)
10399+static inline int arch_write_trylock(arch_rwlock_t *lock)
10400 {
10401 unsigned long mask, tmp1, tmp2, result;
10402
10403diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10404index 025c984..a216504 100644
10405--- a/arch/sparc/include/asm/thread_info_32.h
10406+++ b/arch/sparc/include/asm/thread_info_32.h
10407@@ -49,6 +49,8 @@ struct thread_info {
10408 unsigned long w_saved;
10409
10410 struct restart_block restart_block;
10411+
10412+ unsigned long lowest_stack;
10413 };
10414
10415 /*
10416diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10417index 798f027..b009941 100644
10418--- a/arch/sparc/include/asm/thread_info_64.h
10419+++ b/arch/sparc/include/asm/thread_info_64.h
10420@@ -63,6 +63,8 @@ struct thread_info {
10421 struct pt_regs *kern_una_regs;
10422 unsigned int kern_una_insn;
10423
10424+ unsigned long lowest_stack;
10425+
10426 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10427 __attribute__ ((aligned(64)));
10428 };
10429@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10430 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10431 /* flag bit 4 is available */
10432 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
10433-/* flag bit 6 is available */
10434+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
10435 #define TIF_32BIT 7 /* 32-bit binary */
10436 #define TIF_NOHZ 8 /* in adaptive nohz mode */
10437 #define TIF_SECCOMP 9 /* secure computing */
10438 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
10439 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
10440+
10441 /* NOTE: Thread flags >= 12 should be ones we have no interest
10442 * in using in assembly, else we can't use the mask as
10443 * an immediate value in instructions such as andcc.
10444@@ -215,12 +218,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
10445 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
10446 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
10447 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
10448+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
10449
10450 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
10451 _TIF_DO_NOTIFY_RESUME_MASK | \
10452 _TIF_NEED_RESCHED)
10453 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
10454
10455+#define _TIF_WORK_SYSCALL \
10456+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
10457+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
10458+
10459 #define is_32bit_task() (test_thread_flag(TIF_32BIT))
10460
10461 /*
10462diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
10463index bd56c28..4b63d83 100644
10464--- a/arch/sparc/include/asm/uaccess.h
10465+++ b/arch/sparc/include/asm/uaccess.h
10466@@ -1,5 +1,6 @@
10467 #ifndef ___ASM_SPARC_UACCESS_H
10468 #define ___ASM_SPARC_UACCESS_H
10469+
10470 #if defined(__sparc__) && defined(__arch64__)
10471 #include <asm/uaccess_64.h>
10472 #else
10473diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
10474index 9634d08..020b2dc 100644
10475--- a/arch/sparc/include/asm/uaccess_32.h
10476+++ b/arch/sparc/include/asm/uaccess_32.h
10477@@ -47,6 +47,7 @@
10478 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
10479 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
10480 #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
10481+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10482 #define access_ok(type, addr, size) \
10483 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
10484
10485@@ -250,27 +251,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
10486
10487 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
10488 {
10489- if (n && __access_ok((unsigned long) to, n))
10490+ if ((long)n < 0)
10491+ return n;
10492+
10493+ if (n && __access_ok((unsigned long) to, n)) {
10494+ if (!__builtin_constant_p(n))
10495+ check_object_size(from, n, true);
10496 return __copy_user(to, (__force void __user *) from, n);
10497- else
10498+ } else
10499 return n;
10500 }
10501
10502 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
10503 {
10504+ if ((long)n < 0)
10505+ return n;
10506+
10507+ if (!__builtin_constant_p(n))
10508+ check_object_size(from, n, true);
10509+
10510 return __copy_user(to, (__force void __user *) from, n);
10511 }
10512
10513 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
10514 {
10515- if (n && __access_ok((unsigned long) from, n))
10516+ if ((long)n < 0)
10517+ return n;
10518+
10519+ if (n && __access_ok((unsigned long) from, n)) {
10520+ if (!__builtin_constant_p(n))
10521+ check_object_size(to, n, false);
10522 return __copy_user((__force void __user *) to, from, n);
10523- else
10524+ } else
10525 return n;
10526 }
10527
10528 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
10529 {
10530+ if ((long)n < 0)
10531+ return n;
10532+
10533 return __copy_user((__force void __user *) to, from, n);
10534 }
10535
10536diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
10537index c990a5e..7384856 100644
10538--- a/arch/sparc/include/asm/uaccess_64.h
10539+++ b/arch/sparc/include/asm/uaccess_64.h
10540@@ -10,6 +10,7 @@
10541 #include <linux/compiler.h>
10542 #include <linux/string.h>
10543 #include <linux/thread_info.h>
10544+#include <linux/kernel.h>
10545 #include <asm/asi.h>
10546 #include <asm/spitfire.h>
10547 #include <asm-generic/uaccess-unaligned.h>
10548@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
10549 return 1;
10550 }
10551
10552+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
10553+{
10554+ return 1;
10555+}
10556+
10557 static inline int access_ok(int type, const void __user * addr, unsigned long size)
10558 {
10559 return 1;
10560@@ -214,8 +220,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
10561 static inline unsigned long __must_check
10562 copy_from_user(void *to, const void __user *from, unsigned long size)
10563 {
10564- unsigned long ret = ___copy_from_user(to, from, size);
10565+ unsigned long ret;
10566
10567+ if ((long)size < 0 || size > INT_MAX)
10568+ return size;
10569+
10570+ if (!__builtin_constant_p(size))
10571+ check_object_size(to, size, false);
10572+
10573+ ret = ___copy_from_user(to, from, size);
10574 if (unlikely(ret))
10575 ret = copy_from_user_fixup(to, from, size);
10576
10577@@ -231,8 +244,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
10578 static inline unsigned long __must_check
10579 copy_to_user(void __user *to, const void *from, unsigned long size)
10580 {
10581- unsigned long ret = ___copy_to_user(to, from, size);
10582+ unsigned long ret;
10583
10584+ if ((long)size < 0 || size > INT_MAX)
10585+ return size;
10586+
10587+ if (!__builtin_constant_p(size))
10588+ check_object_size(from, size, true);
10589+
10590+ ret = ___copy_to_user(to, from, size);
10591 if (unlikely(ret))
10592 ret = copy_to_user_fixup(to, from, size);
10593 return ret;
10594diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
10595index 7cf9c6e..6206648 100644
10596--- a/arch/sparc/kernel/Makefile
10597+++ b/arch/sparc/kernel/Makefile
10598@@ -4,7 +4,7 @@
10599 #
10600
10601 asflags-y := -ansi
10602-ccflags-y := -Werror
10603+#ccflags-y := -Werror
10604
10605 extra-y := head_$(BITS).o
10606
10607diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
10608index 50e7b62..79fae35 100644
10609--- a/arch/sparc/kernel/process_32.c
10610+++ b/arch/sparc/kernel/process_32.c
10611@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
10612
10613 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
10614 r->psr, r->pc, r->npc, r->y, print_tainted());
10615- printk("PC: <%pS>\n", (void *) r->pc);
10616+ printk("PC: <%pA>\n", (void *) r->pc);
10617 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10618 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
10619 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
10620 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10621 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
10622 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
10623- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
10624+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
10625
10626 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
10627 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
10628@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10629 rw = (struct reg_window32 *) fp;
10630 pc = rw->ins[7];
10631 printk("[%08lx : ", pc);
10632- printk("%pS ] ", (void *) pc);
10633+ printk("%pA ] ", (void *) pc);
10634 fp = rw->ins[6];
10635 } while (++count < 16);
10636 printk("\n");
10637diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
10638index 46a5964..a35c62c 100644
10639--- a/arch/sparc/kernel/process_64.c
10640+++ b/arch/sparc/kernel/process_64.c
10641@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
10642 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
10643 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
10644 if (regs->tstate & TSTATE_PRIV)
10645- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
10646+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
10647 }
10648
10649 void show_regs(struct pt_regs *regs)
10650@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
10651
10652 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
10653 regs->tpc, regs->tnpc, regs->y, print_tainted());
10654- printk("TPC: <%pS>\n", (void *) regs->tpc);
10655+ printk("TPC: <%pA>\n", (void *) regs->tpc);
10656 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
10657 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
10658 regs->u_regs[3]);
10659@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
10660 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
10661 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
10662 regs->u_regs[15]);
10663- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
10664+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
10665 show_regwindow(regs);
10666 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
10667 }
10668@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
10669 ((tp && tp->task) ? tp->task->pid : -1));
10670
10671 if (gp->tstate & TSTATE_PRIV) {
10672- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
10673+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
10674 (void *) gp->tpc,
10675 (void *) gp->o7,
10676 (void *) gp->i7,
10677diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
10678index 79cc0d1..ec62734 100644
10679--- a/arch/sparc/kernel/prom_common.c
10680+++ b/arch/sparc/kernel/prom_common.c
10681@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
10682
10683 unsigned int prom_early_allocated __initdata;
10684
10685-static struct of_pdt_ops prom_sparc_ops __initdata = {
10686+static struct of_pdt_ops prom_sparc_ops __initconst = {
10687 .nextprop = prom_common_nextprop,
10688 .getproplen = prom_getproplen,
10689 .getproperty = prom_getproperty,
10690diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
10691index 9ddc492..27a5619 100644
10692--- a/arch/sparc/kernel/ptrace_64.c
10693+++ b/arch/sparc/kernel/ptrace_64.c
10694@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
10695 return ret;
10696 }
10697
10698+#ifdef CONFIG_GRKERNSEC_SETXID
10699+extern void gr_delayed_cred_worker(void);
10700+#endif
10701+
10702 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10703 {
10704 int ret = 0;
10705@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
10706 if (test_thread_flag(TIF_NOHZ))
10707 user_exit();
10708
10709+#ifdef CONFIG_GRKERNSEC_SETXID
10710+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10711+ gr_delayed_cred_worker();
10712+#endif
10713+
10714 if (test_thread_flag(TIF_SYSCALL_TRACE))
10715 ret = tracehook_report_syscall_entry(regs);
10716
10717@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
10718 if (test_thread_flag(TIF_NOHZ))
10719 user_exit();
10720
10721+#ifdef CONFIG_GRKERNSEC_SETXID
10722+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
10723+ gr_delayed_cred_worker();
10724+#endif
10725+
10726 audit_syscall_exit(regs);
10727
10728 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
10729diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
10730index da6f1a7..e5dea8f 100644
10731--- a/arch/sparc/kernel/smp_64.c
10732+++ b/arch/sparc/kernel/smp_64.c
10733@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10734 return;
10735
10736 #ifdef CONFIG_DEBUG_DCFLUSH
10737- atomic_inc(&dcpage_flushes);
10738+ atomic_inc_unchecked(&dcpage_flushes);
10739 #endif
10740
10741 this_cpu = get_cpu();
10742@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
10743 xcall_deliver(data0, __pa(pg_addr),
10744 (u64) pg_addr, cpumask_of(cpu));
10745 #ifdef CONFIG_DEBUG_DCFLUSH
10746- atomic_inc(&dcpage_flushes_xcall);
10747+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10748 #endif
10749 }
10750 }
10751@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10752 preempt_disable();
10753
10754 #ifdef CONFIG_DEBUG_DCFLUSH
10755- atomic_inc(&dcpage_flushes);
10756+ atomic_inc_unchecked(&dcpage_flushes);
10757 #endif
10758 data0 = 0;
10759 pg_addr = page_address(page);
10760@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
10761 xcall_deliver(data0, __pa(pg_addr),
10762 (u64) pg_addr, cpu_online_mask);
10763 #ifdef CONFIG_DEBUG_DCFLUSH
10764- atomic_inc(&dcpage_flushes_xcall);
10765+ atomic_inc_unchecked(&dcpage_flushes_xcall);
10766 #endif
10767 }
10768 __local_flush_dcache_page(page);
10769diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
10770index 646988d..b88905f 100644
10771--- a/arch/sparc/kernel/sys_sparc_32.c
10772+++ b/arch/sparc/kernel/sys_sparc_32.c
10773@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10774 if (len > TASK_SIZE - PAGE_SIZE)
10775 return -ENOMEM;
10776 if (!addr)
10777- addr = TASK_UNMAPPED_BASE;
10778+ addr = current->mm->mmap_base;
10779
10780 info.flags = 0;
10781 info.length = len;
10782diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
10783index 30e7ddb..266a3b0 100644
10784--- a/arch/sparc/kernel/sys_sparc_64.c
10785+++ b/arch/sparc/kernel/sys_sparc_64.c
10786@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10787 struct vm_area_struct * vma;
10788 unsigned long task_size = TASK_SIZE;
10789 int do_color_align;
10790+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10791 struct vm_unmapped_area_info info;
10792
10793 if (flags & MAP_FIXED) {
10794 /* We do not accept a shared mapping if it would violate
10795 * cache aliasing constraints.
10796 */
10797- if ((flags & MAP_SHARED) &&
10798+ if ((filp || (flags & MAP_SHARED)) &&
10799 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10800 return -EINVAL;
10801 return addr;
10802@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10803 if (filp || (flags & MAP_SHARED))
10804 do_color_align = 1;
10805
10806+#ifdef CONFIG_PAX_RANDMMAP
10807+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10808+#endif
10809+
10810 if (addr) {
10811 if (do_color_align)
10812 addr = COLOR_ALIGN(addr, pgoff);
10813@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
10814 addr = PAGE_ALIGN(addr);
10815
10816 vma = find_vma(mm, addr);
10817- if (task_size - len >= addr &&
10818- (!vma || addr + len <= vma->vm_start))
10819+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10820 return addr;
10821 }
10822
10823 info.flags = 0;
10824 info.length = len;
10825- info.low_limit = TASK_UNMAPPED_BASE;
10826+ info.low_limit = mm->mmap_base;
10827 info.high_limit = min(task_size, VA_EXCLUDE_START);
10828 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10829 info.align_offset = pgoff << PAGE_SHIFT;
10830+ info.threadstack_offset = offset;
10831 addr = vm_unmapped_area(&info);
10832
10833 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
10834 VM_BUG_ON(addr != -ENOMEM);
10835 info.low_limit = VA_EXCLUDE_END;
10836+
10837+#ifdef CONFIG_PAX_RANDMMAP
10838+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10839+ info.low_limit += mm->delta_mmap;
10840+#endif
10841+
10842 info.high_limit = task_size;
10843 addr = vm_unmapped_area(&info);
10844 }
10845@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10846 unsigned long task_size = STACK_TOP32;
10847 unsigned long addr = addr0;
10848 int do_color_align;
10849+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10850 struct vm_unmapped_area_info info;
10851
10852 /* This should only ever run for 32-bit processes. */
10853@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10854 /* We do not accept a shared mapping if it would violate
10855 * cache aliasing constraints.
10856 */
10857- if ((flags & MAP_SHARED) &&
10858+ if ((filp || (flags & MAP_SHARED)) &&
10859 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
10860 return -EINVAL;
10861 return addr;
10862@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10863 if (filp || (flags & MAP_SHARED))
10864 do_color_align = 1;
10865
10866+#ifdef CONFIG_PAX_RANDMMAP
10867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10868+#endif
10869+
10870 /* requesting a specific address */
10871 if (addr) {
10872 if (do_color_align)
10873@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10874 addr = PAGE_ALIGN(addr);
10875
10876 vma = find_vma(mm, addr);
10877- if (task_size - len >= addr &&
10878- (!vma || addr + len <= vma->vm_start))
10879+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10880 return addr;
10881 }
10882
10883@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10884 info.high_limit = mm->mmap_base;
10885 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
10886 info.align_offset = pgoff << PAGE_SHIFT;
10887+ info.threadstack_offset = offset;
10888 addr = vm_unmapped_area(&info);
10889
10890 /*
10891@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10892 VM_BUG_ON(addr != -ENOMEM);
10893 info.flags = 0;
10894 info.low_limit = TASK_UNMAPPED_BASE;
10895+
10896+#ifdef CONFIG_PAX_RANDMMAP
10897+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10898+ info.low_limit += mm->delta_mmap;
10899+#endif
10900+
10901 info.high_limit = STACK_TOP32;
10902 addr = vm_unmapped_area(&info);
10903 }
10904@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
10905 EXPORT_SYMBOL(get_fb_unmapped_area);
10906
10907 /* Essentially the same as PowerPC. */
10908-static unsigned long mmap_rnd(void)
10909+static unsigned long mmap_rnd(struct mm_struct *mm)
10910 {
10911 unsigned long rnd = 0UL;
10912
10913+#ifdef CONFIG_PAX_RANDMMAP
10914+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10915+#endif
10916+
10917 if (current->flags & PF_RANDOMIZE) {
10918 unsigned long val = get_random_int();
10919 if (test_thread_flag(TIF_32BIT))
10920@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
10921
10922 void arch_pick_mmap_layout(struct mm_struct *mm)
10923 {
10924- unsigned long random_factor = mmap_rnd();
10925+ unsigned long random_factor = mmap_rnd(mm);
10926 unsigned long gap;
10927
10928 /*
10929@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10930 gap == RLIM_INFINITY ||
10931 sysctl_legacy_va_layout) {
10932 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
10933+
10934+#ifdef CONFIG_PAX_RANDMMAP
10935+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10936+ mm->mmap_base += mm->delta_mmap;
10937+#endif
10938+
10939 mm->get_unmapped_area = arch_get_unmapped_area;
10940 } else {
10941 /* We know it's 32-bit */
10942@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10943 gap = (task_size / 6 * 5);
10944
10945 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
10946+
10947+#ifdef CONFIG_PAX_RANDMMAP
10948+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10949+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10950+#endif
10951+
10952 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10953 }
10954 }
10955diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
10956index bb00089..e0ea580 100644
10957--- a/arch/sparc/kernel/syscalls.S
10958+++ b/arch/sparc/kernel/syscalls.S
10959@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
10960 #endif
10961 .align 32
10962 1: ldx [%g6 + TI_FLAGS], %l5
10963- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10964+ andcc %l5, _TIF_WORK_SYSCALL, %g0
10965 be,pt %icc, rtrap
10966 nop
10967 call syscall_trace_leave
10968@@ -194,7 +194,7 @@ linux_sparc_syscall32:
10969
10970 srl %i3, 0, %o3 ! IEU0
10971 srl %i2, 0, %o2 ! IEU0 Group
10972- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10973+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10974 bne,pn %icc, linux_syscall_trace32 ! CTI
10975 mov %i0, %l5 ! IEU1
10976 5: call %l7 ! CTI Group brk forced
10977@@ -218,7 +218,7 @@ linux_sparc_syscall:
10978
10979 mov %i3, %o3 ! IEU1
10980 mov %i4, %o4 ! IEU0 Group
10981- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10982+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10983 bne,pn %icc, linux_syscall_trace ! CTI Group
10984 mov %i0, %l5 ! IEU0
10985 2: call %l7 ! CTI Group brk forced
10986@@ -233,7 +233,7 @@ ret_sys_call:
10987
10988 cmp %o0, -ERESTART_RESTARTBLOCK
10989 bgeu,pn %xcc, 1f
10990- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
10991+ andcc %l0, _TIF_WORK_SYSCALL, %g0
10992 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
10993
10994 2:
10995diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
10996index 6fd386c5..6907d81 100644
10997--- a/arch/sparc/kernel/traps_32.c
10998+++ b/arch/sparc/kernel/traps_32.c
10999@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11000 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11001 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11002
11003+extern void gr_handle_kernel_exploit(void);
11004+
11005 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11006 {
11007 static int die_counter;
11008@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11009 count++ < 30 &&
11010 (((unsigned long) rw) >= PAGE_OFFSET) &&
11011 !(((unsigned long) rw) & 0x7)) {
11012- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11013+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11014 (void *) rw->ins[7]);
11015 rw = (struct reg_window32 *)rw->ins[6];
11016 }
11017 }
11018 printk("Instruction DUMP:");
11019 instruction_dump ((unsigned long *) regs->pc);
11020- if(regs->psr & PSR_PS)
11021+ if(regs->psr & PSR_PS) {
11022+ gr_handle_kernel_exploit();
11023 do_exit(SIGKILL);
11024+ }
11025 do_exit(SIGSEGV);
11026 }
11027
11028diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11029index 981a769..d906eda 100644
11030--- a/arch/sparc/kernel/traps_64.c
11031+++ b/arch/sparc/kernel/traps_64.c
11032@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11033 i + 1,
11034 p->trapstack[i].tstate, p->trapstack[i].tpc,
11035 p->trapstack[i].tnpc, p->trapstack[i].tt);
11036- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11037+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11038 }
11039 }
11040
11041@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11042
11043 lvl -= 0x100;
11044 if (regs->tstate & TSTATE_PRIV) {
11045+
11046+#ifdef CONFIG_PAX_REFCOUNT
11047+ if (lvl == 6)
11048+ pax_report_refcount_overflow(regs);
11049+#endif
11050+
11051 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11052 die_if_kernel(buffer, regs);
11053 }
11054@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11055 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11056 {
11057 char buffer[32];
11058-
11059+
11060 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11061 0, lvl, SIGTRAP) == NOTIFY_STOP)
11062 return;
11063
11064+#ifdef CONFIG_PAX_REFCOUNT
11065+ if (lvl == 6)
11066+ pax_report_refcount_overflow(regs);
11067+#endif
11068+
11069 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11070
11071 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11072@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11073 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11074 printk("%s" "ERROR(%d): ",
11075 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11076- printk("TPC<%pS>\n", (void *) regs->tpc);
11077+ printk("TPC<%pA>\n", (void *) regs->tpc);
11078 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11079 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11080 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11081@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11082 smp_processor_id(),
11083 (type & 0x1) ? 'I' : 'D',
11084 regs->tpc);
11085- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11086+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11087 panic("Irrecoverable Cheetah+ parity error.");
11088 }
11089
11090@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11091 smp_processor_id(),
11092 (type & 0x1) ? 'I' : 'D',
11093 regs->tpc);
11094- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11095+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11096 }
11097
11098 struct sun4v_error_entry {
11099@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11100 /*0x38*/u64 reserved_5;
11101 };
11102
11103-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11104-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11105+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11106+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11107
11108 static const char *sun4v_err_type_to_str(u8 type)
11109 {
11110@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11111 }
11112
11113 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11114- int cpu, const char *pfx, atomic_t *ocnt)
11115+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11116 {
11117 u64 *raw_ptr = (u64 *) ent;
11118 u32 attrs;
11119@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11120
11121 show_regs(regs);
11122
11123- if ((cnt = atomic_read(ocnt)) != 0) {
11124- atomic_set(ocnt, 0);
11125+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11126+ atomic_set_unchecked(ocnt, 0);
11127 wmb();
11128 printk("%s: Queue overflowed %d times.\n",
11129 pfx, cnt);
11130@@ -2048,7 +2059,7 @@ out:
11131 */
11132 void sun4v_resum_overflow(struct pt_regs *regs)
11133 {
11134- atomic_inc(&sun4v_resum_oflow_cnt);
11135+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11136 }
11137
11138 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11139@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11140 /* XXX Actually even this can make not that much sense. Perhaps
11141 * XXX we should just pull the plug and panic directly from here?
11142 */
11143- atomic_inc(&sun4v_nonresum_oflow_cnt);
11144+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11145 }
11146
11147 static void sun4v_tlb_error(struct pt_regs *regs)
11148@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11149
11150 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11151 regs->tpc, tl);
11152- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11153+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11154 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11155- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11156+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11157 (void *) regs->u_regs[UREG_I7]);
11158 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11159 "pte[%lx] error[%lx]\n",
11160@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11161
11162 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11163 regs->tpc, tl);
11164- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11165+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11166 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11167- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11168+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11169 (void *) regs->u_regs[UREG_I7]);
11170 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11171 "pte[%lx] error[%lx]\n",
11172@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11173 fp = (unsigned long)sf->fp + STACK_BIAS;
11174 }
11175
11176- printk(" [%016lx] %pS\n", pc, (void *) pc);
11177+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11178 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11179 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11180 int index = tsk->curr_ret_stack;
11181 if (tsk->ret_stack && index >= graph) {
11182 pc = tsk->ret_stack[index - graph].ret;
11183- printk(" [%016lx] %pS\n", pc, (void *) pc);
11184+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11185 graph++;
11186 }
11187 }
11188@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11189 return (struct reg_window *) (fp + STACK_BIAS);
11190 }
11191
11192+extern void gr_handle_kernel_exploit(void);
11193+
11194 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11195 {
11196 static int die_counter;
11197@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11198 while (rw &&
11199 count++ < 30 &&
11200 kstack_valid(tp, (unsigned long) rw)) {
11201- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11202+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11203 (void *) rw->ins[7]);
11204
11205 rw = kernel_stack_up(rw);
11206@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11207 }
11208 user_instruction_dump ((unsigned int __user *) regs->tpc);
11209 }
11210- if (regs->tstate & TSTATE_PRIV)
11211+ if (regs->tstate & TSTATE_PRIV) {
11212+ gr_handle_kernel_exploit();
11213 do_exit(SIGKILL);
11214+ }
11215 do_exit(SIGSEGV);
11216 }
11217 EXPORT_SYMBOL(die_if_kernel);
11218diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11219index 62098a8..547ab2c 100644
11220--- a/arch/sparc/kernel/unaligned_64.c
11221+++ b/arch/sparc/kernel/unaligned_64.c
11222@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11223 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11224
11225 if (__ratelimit(&ratelimit)) {
11226- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11227+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11228 regs->tpc, (void *) regs->tpc);
11229 }
11230 }
11231diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11232index 3269b02..64f5231 100644
11233--- a/arch/sparc/lib/Makefile
11234+++ b/arch/sparc/lib/Makefile
11235@@ -2,7 +2,7 @@
11236 #
11237
11238 asflags-y := -ansi -DST_DIV0=0x02
11239-ccflags-y := -Werror
11240+#ccflags-y := -Werror
11241
11242 lib-$(CONFIG_SPARC32) += ashrdi3.o
11243 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11244diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11245index 05dac43..76f8ed4 100644
11246--- a/arch/sparc/lib/atomic_64.S
11247+++ b/arch/sparc/lib/atomic_64.S
11248@@ -15,11 +15,22 @@
11249 * a value and does the barriers.
11250 */
11251
11252-#define ATOMIC_OP(op) \
11253-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11254+#ifdef CONFIG_PAX_REFCOUNT
11255+#define __REFCOUNT_OP(op) op##cc
11256+#define __OVERFLOW_IOP tvs %icc, 6;
11257+#define __OVERFLOW_XOP tvs %xcc, 6;
11258+#else
11259+#define __REFCOUNT_OP(op) op
11260+#define __OVERFLOW_IOP
11261+#define __OVERFLOW_XOP
11262+#endif
11263+
11264+#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
11265+ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11266 BACKOFF_SETUP(%o2); \
11267 1: lduw [%o1], %g1; \
11268- op %g1, %o0, %g7; \
11269+ asm_op %g1, %o0, %g7; \
11270+ post_op \
11271 cas [%o1], %g1, %g7; \
11272 cmp %g1, %g7; \
11273 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11274@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11275 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11276 ENDPROC(atomic_##op); \
11277
11278-#define ATOMIC_OP_RETURN(op) \
11279-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11280+#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
11281+ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11282+
11283+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
11284+ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11285 BACKOFF_SETUP(%o2); \
11286 1: lduw [%o1], %g1; \
11287- op %g1, %o0, %g7; \
11288+ asm_op %g1, %o0, %g7; \
11289+ post_op \
11290 cas [%o1], %g1, %g7; \
11291 cmp %g1, %g7; \
11292 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
11293@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11294 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11295 ENDPROC(atomic_##op##_return);
11296
11297+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
11298+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
11299+
11300 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11301
11302 ATOMIC_OPS(add)
11303@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
11304
11305 #undef ATOMIC_OPS
11306 #undef ATOMIC_OP_RETURN
11307+#undef __ATOMIC_OP_RETURN
11308 #undef ATOMIC_OP
11309+#undef __ATOMIC_OP
11310
11311-#define ATOMIC64_OP(op) \
11312-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11313+#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
11314+ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
11315 BACKOFF_SETUP(%o2); \
11316 1: ldx [%o1], %g1; \
11317- op %g1, %o0, %g7; \
11318+ asm_op %g1, %o0, %g7; \
11319+ post_op \
11320 casx [%o1], %g1, %g7; \
11321 cmp %g1, %g7; \
11322 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11323@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
11324 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11325 ENDPROC(atomic64_##op); \
11326
11327-#define ATOMIC64_OP_RETURN(op) \
11328-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11329+#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
11330+ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11331+
11332+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
11333+ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
11334 BACKOFF_SETUP(%o2); \
11335 1: ldx [%o1], %g1; \
11336- op %g1, %o0, %g7; \
11337+ asm_op %g1, %o0, %g7; \
11338+ post_op \
11339 casx [%o1], %g1, %g7; \
11340 cmp %g1, %g7; \
11341 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
11342@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
11343 2: BACKOFF_SPIN(%o2, %o3, 1b); \
11344 ENDPROC(atomic64_##op##_return);
11345
11346+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
11347+i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
11348+
11349 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
11350
11351 ATOMIC64_OPS(add)
11352@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
11353
11354 #undef ATOMIC64_OPS
11355 #undef ATOMIC64_OP_RETURN
11356+#undef __ATOMIC64_OP_RETURN
11357 #undef ATOMIC64_OP
11358+#undef __ATOMIC64_OP
11359+#undef __OVERFLOW_XOP
11360+#undef __OVERFLOW_IOP
11361+#undef __REFCOUNT_OP
11362
11363 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
11364 BACKOFF_SETUP(%o2)
11365diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
11366index 1d649a9..fbc5bfc 100644
11367--- a/arch/sparc/lib/ksyms.c
11368+++ b/arch/sparc/lib/ksyms.c
11369@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
11370 /* Atomic counter implementation. */
11371 #define ATOMIC_OP(op) \
11372 EXPORT_SYMBOL(atomic_##op); \
11373-EXPORT_SYMBOL(atomic64_##op);
11374+EXPORT_SYMBOL(atomic_##op##_unchecked); \
11375+EXPORT_SYMBOL(atomic64_##op); \
11376+EXPORT_SYMBOL(atomic64_##op##_unchecked);
11377
11378 #define ATOMIC_OP_RETURN(op) \
11379 EXPORT_SYMBOL(atomic_##op##_return); \
11380@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
11381 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
11382
11383 ATOMIC_OPS(add)
11384+EXPORT_SYMBOL(atomic_add_ret_unchecked);
11385+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
11386 ATOMIC_OPS(sub)
11387
11388 #undef ATOMIC_OPS
11389diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
11390index 30c3ecc..736f015 100644
11391--- a/arch/sparc/mm/Makefile
11392+++ b/arch/sparc/mm/Makefile
11393@@ -2,7 +2,7 @@
11394 #
11395
11396 asflags-y := -ansi
11397-ccflags-y := -Werror
11398+#ccflags-y := -Werror
11399
11400 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
11401 obj-y += fault_$(BITS).o
11402diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
11403index 70d8171..274c6c0 100644
11404--- a/arch/sparc/mm/fault_32.c
11405+++ b/arch/sparc/mm/fault_32.c
11406@@ -21,6 +21,9 @@
11407 #include <linux/perf_event.h>
11408 #include <linux/interrupt.h>
11409 #include <linux/kdebug.h>
11410+#include <linux/slab.h>
11411+#include <linux/pagemap.h>
11412+#include <linux/compiler.h>
11413
11414 #include <asm/page.h>
11415 #include <asm/pgtable.h>
11416@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
11417 return safe_compute_effective_address(regs, insn);
11418 }
11419
11420+#ifdef CONFIG_PAX_PAGEEXEC
11421+#ifdef CONFIG_PAX_DLRESOLVE
11422+static void pax_emuplt_close(struct vm_area_struct *vma)
11423+{
11424+ vma->vm_mm->call_dl_resolve = 0UL;
11425+}
11426+
11427+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11428+{
11429+ unsigned int *kaddr;
11430+
11431+ vmf->page = alloc_page(GFP_HIGHUSER);
11432+ if (!vmf->page)
11433+ return VM_FAULT_OOM;
11434+
11435+ kaddr = kmap(vmf->page);
11436+ memset(kaddr, 0, PAGE_SIZE);
11437+ kaddr[0] = 0x9DE3BFA8U; /* save */
11438+ flush_dcache_page(vmf->page);
11439+ kunmap(vmf->page);
11440+ return VM_FAULT_MAJOR;
11441+}
11442+
11443+static const struct vm_operations_struct pax_vm_ops = {
11444+ .close = pax_emuplt_close,
11445+ .fault = pax_emuplt_fault
11446+};
11447+
11448+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11449+{
11450+ int ret;
11451+
11452+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11453+ vma->vm_mm = current->mm;
11454+ vma->vm_start = addr;
11455+ vma->vm_end = addr + PAGE_SIZE;
11456+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11457+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11458+ vma->vm_ops = &pax_vm_ops;
11459+
11460+ ret = insert_vm_struct(current->mm, vma);
11461+ if (ret)
11462+ return ret;
11463+
11464+ ++current->mm->total_vm;
11465+ return 0;
11466+}
11467+#endif
11468+
11469+/*
11470+ * PaX: decide what to do with offenders (regs->pc = fault address)
11471+ *
11472+ * returns 1 when task should be killed
11473+ * 2 when patched PLT trampoline was detected
11474+ * 3 when unpatched PLT trampoline was detected
11475+ */
11476+static int pax_handle_fetch_fault(struct pt_regs *regs)
11477+{
11478+
11479+#ifdef CONFIG_PAX_EMUPLT
11480+ int err;
11481+
11482+ do { /* PaX: patched PLT emulation #1 */
11483+ unsigned int sethi1, sethi2, jmpl;
11484+
11485+ err = get_user(sethi1, (unsigned int *)regs->pc);
11486+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
11487+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
11488+
11489+ if (err)
11490+ break;
11491+
11492+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11493+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11494+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11495+ {
11496+ unsigned int addr;
11497+
11498+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11499+ addr = regs->u_regs[UREG_G1];
11500+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11501+ regs->pc = addr;
11502+ regs->npc = addr+4;
11503+ return 2;
11504+ }
11505+ } while (0);
11506+
11507+ do { /* PaX: patched PLT emulation #2 */
11508+ unsigned int ba;
11509+
11510+ err = get_user(ba, (unsigned int *)regs->pc);
11511+
11512+ if (err)
11513+ break;
11514+
11515+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11516+ unsigned int addr;
11517+
11518+ if ((ba & 0xFFC00000U) == 0x30800000U)
11519+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11520+ else
11521+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11522+ regs->pc = addr;
11523+ regs->npc = addr+4;
11524+ return 2;
11525+ }
11526+ } while (0);
11527+
11528+ do { /* PaX: patched PLT emulation #3 */
11529+ unsigned int sethi, bajmpl, nop;
11530+
11531+ err = get_user(sethi, (unsigned int *)regs->pc);
11532+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
11533+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11534+
11535+ if (err)
11536+ break;
11537+
11538+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11539+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11540+ nop == 0x01000000U)
11541+ {
11542+ unsigned int addr;
11543+
11544+ addr = (sethi & 0x003FFFFFU) << 10;
11545+ regs->u_regs[UREG_G1] = addr;
11546+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11547+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11548+ else
11549+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11550+ regs->pc = addr;
11551+ regs->npc = addr+4;
11552+ return 2;
11553+ }
11554+ } while (0);
11555+
11556+ do { /* PaX: unpatched PLT emulation step 1 */
11557+ unsigned int sethi, ba, nop;
11558+
11559+ err = get_user(sethi, (unsigned int *)regs->pc);
11560+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
11561+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
11562+
11563+ if (err)
11564+ break;
11565+
11566+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11567+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11568+ nop == 0x01000000U)
11569+ {
11570+ unsigned int addr, save, call;
11571+
11572+ if ((ba & 0xFFC00000U) == 0x30800000U)
11573+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
11574+ else
11575+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
11576+
11577+ err = get_user(save, (unsigned int *)addr);
11578+ err |= get_user(call, (unsigned int *)(addr+4));
11579+ err |= get_user(nop, (unsigned int *)(addr+8));
11580+ if (err)
11581+ break;
11582+
11583+#ifdef CONFIG_PAX_DLRESOLVE
11584+ if (save == 0x9DE3BFA8U &&
11585+ (call & 0xC0000000U) == 0x40000000U &&
11586+ nop == 0x01000000U)
11587+ {
11588+ struct vm_area_struct *vma;
11589+ unsigned long call_dl_resolve;
11590+
11591+ down_read(&current->mm->mmap_sem);
11592+ call_dl_resolve = current->mm->call_dl_resolve;
11593+ up_read(&current->mm->mmap_sem);
11594+ if (likely(call_dl_resolve))
11595+ goto emulate;
11596+
11597+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11598+
11599+ down_write(&current->mm->mmap_sem);
11600+ if (current->mm->call_dl_resolve) {
11601+ call_dl_resolve = current->mm->call_dl_resolve;
11602+ up_write(&current->mm->mmap_sem);
11603+ if (vma)
11604+ kmem_cache_free(vm_area_cachep, vma);
11605+ goto emulate;
11606+ }
11607+
11608+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11609+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11610+ up_write(&current->mm->mmap_sem);
11611+ if (vma)
11612+ kmem_cache_free(vm_area_cachep, vma);
11613+ return 1;
11614+ }
11615+
11616+ if (pax_insert_vma(vma, call_dl_resolve)) {
11617+ up_write(&current->mm->mmap_sem);
11618+ kmem_cache_free(vm_area_cachep, vma);
11619+ return 1;
11620+ }
11621+
11622+ current->mm->call_dl_resolve = call_dl_resolve;
11623+ up_write(&current->mm->mmap_sem);
11624+
11625+emulate:
11626+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11627+ regs->pc = call_dl_resolve;
11628+ regs->npc = addr+4;
11629+ return 3;
11630+ }
11631+#endif
11632+
11633+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11634+ if ((save & 0xFFC00000U) == 0x05000000U &&
11635+ (call & 0xFFFFE000U) == 0x85C0A000U &&
11636+ nop == 0x01000000U)
11637+ {
11638+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11639+ regs->u_regs[UREG_G2] = addr + 4;
11640+ addr = (save & 0x003FFFFFU) << 10;
11641+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
11642+ regs->pc = addr;
11643+ regs->npc = addr+4;
11644+ return 3;
11645+ }
11646+ }
11647+ } while (0);
11648+
11649+ do { /* PaX: unpatched PLT emulation step 2 */
11650+ unsigned int save, call, nop;
11651+
11652+ err = get_user(save, (unsigned int *)(regs->pc-4));
11653+ err |= get_user(call, (unsigned int *)regs->pc);
11654+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
11655+ if (err)
11656+ break;
11657+
11658+ if (save == 0x9DE3BFA8U &&
11659+ (call & 0xC0000000U) == 0x40000000U &&
11660+ nop == 0x01000000U)
11661+ {
11662+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
11663+
11664+ regs->u_regs[UREG_RETPC] = regs->pc;
11665+ regs->pc = dl_resolve;
11666+ regs->npc = dl_resolve+4;
11667+ return 3;
11668+ }
11669+ } while (0);
11670+#endif
11671+
11672+ return 1;
11673+}
11674+
11675+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11676+{
11677+ unsigned long i;
11678+
11679+ printk(KERN_ERR "PAX: bytes at PC: ");
11680+ for (i = 0; i < 8; i++) {
11681+ unsigned int c;
11682+ if (get_user(c, (unsigned int *)pc+i))
11683+ printk(KERN_CONT "???????? ");
11684+ else
11685+ printk(KERN_CONT "%08x ", c);
11686+ }
11687+ printk("\n");
11688+}
11689+#endif
11690+
11691 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
11692 int text_fault)
11693 {
11694@@ -226,6 +500,24 @@ good_area:
11695 if (!(vma->vm_flags & VM_WRITE))
11696 goto bad_area;
11697 } else {
11698+
11699+#ifdef CONFIG_PAX_PAGEEXEC
11700+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
11701+ up_read(&mm->mmap_sem);
11702+ switch (pax_handle_fetch_fault(regs)) {
11703+
11704+#ifdef CONFIG_PAX_EMUPLT
11705+ case 2:
11706+ case 3:
11707+ return;
11708+#endif
11709+
11710+ }
11711+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
11712+ do_group_exit(SIGKILL);
11713+ }
11714+#endif
11715+
11716 /* Allow reads even for write-only mappings */
11717 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
11718 goto bad_area;
11719diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
11720index 4798232..f76e3aa 100644
11721--- a/arch/sparc/mm/fault_64.c
11722+++ b/arch/sparc/mm/fault_64.c
11723@@ -22,6 +22,9 @@
11724 #include <linux/kdebug.h>
11725 #include <linux/percpu.h>
11726 #include <linux/context_tracking.h>
11727+#include <linux/slab.h>
11728+#include <linux/pagemap.h>
11729+#include <linux/compiler.h>
11730
11731 #include <asm/page.h>
11732 #include <asm/pgtable.h>
11733@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
11734 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
11735 regs->tpc);
11736 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
11737- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
11738+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
11739 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
11740 dump_stack();
11741 unhandled_fault(regs->tpc, current, regs);
11742@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
11743 show_regs(regs);
11744 }
11745
11746+#ifdef CONFIG_PAX_PAGEEXEC
11747+#ifdef CONFIG_PAX_DLRESOLVE
11748+static void pax_emuplt_close(struct vm_area_struct *vma)
11749+{
11750+ vma->vm_mm->call_dl_resolve = 0UL;
11751+}
11752+
11753+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
11754+{
11755+ unsigned int *kaddr;
11756+
11757+ vmf->page = alloc_page(GFP_HIGHUSER);
11758+ if (!vmf->page)
11759+ return VM_FAULT_OOM;
11760+
11761+ kaddr = kmap(vmf->page);
11762+ memset(kaddr, 0, PAGE_SIZE);
11763+ kaddr[0] = 0x9DE3BFA8U; /* save */
11764+ flush_dcache_page(vmf->page);
11765+ kunmap(vmf->page);
11766+ return VM_FAULT_MAJOR;
11767+}
11768+
11769+static const struct vm_operations_struct pax_vm_ops = {
11770+ .close = pax_emuplt_close,
11771+ .fault = pax_emuplt_fault
11772+};
11773+
11774+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
11775+{
11776+ int ret;
11777+
11778+ INIT_LIST_HEAD(&vma->anon_vma_chain);
11779+ vma->vm_mm = current->mm;
11780+ vma->vm_start = addr;
11781+ vma->vm_end = addr + PAGE_SIZE;
11782+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
11783+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
11784+ vma->vm_ops = &pax_vm_ops;
11785+
11786+ ret = insert_vm_struct(current->mm, vma);
11787+ if (ret)
11788+ return ret;
11789+
11790+ ++current->mm->total_vm;
11791+ return 0;
11792+}
11793+#endif
11794+
11795+/*
11796+ * PaX: decide what to do with offenders (regs->tpc = fault address)
11797+ *
11798+ * returns 1 when task should be killed
11799+ * 2 when patched PLT trampoline was detected
11800+ * 3 when unpatched PLT trampoline was detected
11801+ */
11802+static int pax_handle_fetch_fault(struct pt_regs *regs)
11803+{
11804+
11805+#ifdef CONFIG_PAX_EMUPLT
11806+ int err;
11807+
11808+ do { /* PaX: patched PLT emulation #1 */
11809+ unsigned int sethi1, sethi2, jmpl;
11810+
11811+ err = get_user(sethi1, (unsigned int *)regs->tpc);
11812+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
11813+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
11814+
11815+ if (err)
11816+ break;
11817+
11818+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
11819+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
11820+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
11821+ {
11822+ unsigned long addr;
11823+
11824+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
11825+ addr = regs->u_regs[UREG_G1];
11826+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11827+
11828+ if (test_thread_flag(TIF_32BIT))
11829+ addr &= 0xFFFFFFFFUL;
11830+
11831+ regs->tpc = addr;
11832+ regs->tnpc = addr+4;
11833+ return 2;
11834+ }
11835+ } while (0);
11836+
11837+ do { /* PaX: patched PLT emulation #2 */
11838+ unsigned int ba;
11839+
11840+ err = get_user(ba, (unsigned int *)regs->tpc);
11841+
11842+ if (err)
11843+ break;
11844+
11845+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
11846+ unsigned long addr;
11847+
11848+ if ((ba & 0xFFC00000U) == 0x30800000U)
11849+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11850+ else
11851+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11852+
11853+ if (test_thread_flag(TIF_32BIT))
11854+ addr &= 0xFFFFFFFFUL;
11855+
11856+ regs->tpc = addr;
11857+ regs->tnpc = addr+4;
11858+ return 2;
11859+ }
11860+ } while (0);
11861+
11862+ do { /* PaX: patched PLT emulation #3 */
11863+ unsigned int sethi, bajmpl, nop;
11864+
11865+ err = get_user(sethi, (unsigned int *)regs->tpc);
11866+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
11867+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11868+
11869+ if (err)
11870+ break;
11871+
11872+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11873+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
11874+ nop == 0x01000000U)
11875+ {
11876+ unsigned long addr;
11877+
11878+ addr = (sethi & 0x003FFFFFU) << 10;
11879+ regs->u_regs[UREG_G1] = addr;
11880+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
11881+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11882+ else
11883+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11884+
11885+ if (test_thread_flag(TIF_32BIT))
11886+ addr &= 0xFFFFFFFFUL;
11887+
11888+ regs->tpc = addr;
11889+ regs->tnpc = addr+4;
11890+ return 2;
11891+ }
11892+ } while (0);
11893+
11894+ do { /* PaX: patched PLT emulation #4 */
11895+ unsigned int sethi, mov1, call, mov2;
11896+
11897+ err = get_user(sethi, (unsigned int *)regs->tpc);
11898+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
11899+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
11900+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
11901+
11902+ if (err)
11903+ break;
11904+
11905+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11906+ mov1 == 0x8210000FU &&
11907+ (call & 0xC0000000U) == 0x40000000U &&
11908+ mov2 == 0x9E100001U)
11909+ {
11910+ unsigned long addr;
11911+
11912+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
11913+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11914+
11915+ if (test_thread_flag(TIF_32BIT))
11916+ addr &= 0xFFFFFFFFUL;
11917+
11918+ regs->tpc = addr;
11919+ regs->tnpc = addr+4;
11920+ return 2;
11921+ }
11922+ } while (0);
11923+
11924+ do { /* PaX: patched PLT emulation #5 */
11925+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
11926+
11927+ err = get_user(sethi, (unsigned int *)regs->tpc);
11928+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11929+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11930+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
11931+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
11932+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
11933+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
11934+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
11935+
11936+ if (err)
11937+ break;
11938+
11939+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11940+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11941+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11942+ (or1 & 0xFFFFE000U) == 0x82106000U &&
11943+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
11944+ sllx == 0x83287020U &&
11945+ jmpl == 0x81C04005U &&
11946+ nop == 0x01000000U)
11947+ {
11948+ unsigned long addr;
11949+
11950+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11951+ regs->u_regs[UREG_G1] <<= 32;
11952+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11953+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11954+ regs->tpc = addr;
11955+ regs->tnpc = addr+4;
11956+ return 2;
11957+ }
11958+ } while (0);
11959+
11960+ do { /* PaX: patched PLT emulation #6 */
11961+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
11962+
11963+ err = get_user(sethi, (unsigned int *)regs->tpc);
11964+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
11965+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
11966+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
11967+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
11968+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
11969+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
11970+
11971+ if (err)
11972+ break;
11973+
11974+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
11975+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
11976+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11977+ sllx == 0x83287020U &&
11978+ (or & 0xFFFFE000U) == 0x8A116000U &&
11979+ jmpl == 0x81C04005U &&
11980+ nop == 0x01000000U)
11981+ {
11982+ unsigned long addr;
11983+
11984+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11985+ regs->u_regs[UREG_G1] <<= 32;
11986+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11987+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11988+ regs->tpc = addr;
11989+ regs->tnpc = addr+4;
11990+ return 2;
11991+ }
11992+ } while (0);
11993+
11994+ do { /* PaX: unpatched PLT emulation step 1 */
11995+ unsigned int sethi, ba, nop;
11996+
11997+ err = get_user(sethi, (unsigned int *)regs->tpc);
11998+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11999+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12000+
12001+ if (err)
12002+ break;
12003+
12004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12005+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12006+ nop == 0x01000000U)
12007+ {
12008+ unsigned long addr;
12009+ unsigned int save, call;
12010+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12011+
12012+ if ((ba & 0xFFC00000U) == 0x30800000U)
12013+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12014+ else
12015+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12016+
12017+ if (test_thread_flag(TIF_32BIT))
12018+ addr &= 0xFFFFFFFFUL;
12019+
12020+ err = get_user(save, (unsigned int *)addr);
12021+ err |= get_user(call, (unsigned int *)(addr+4));
12022+ err |= get_user(nop, (unsigned int *)(addr+8));
12023+ if (err)
12024+ break;
12025+
12026+#ifdef CONFIG_PAX_DLRESOLVE
12027+ if (save == 0x9DE3BFA8U &&
12028+ (call & 0xC0000000U) == 0x40000000U &&
12029+ nop == 0x01000000U)
12030+ {
12031+ struct vm_area_struct *vma;
12032+ unsigned long call_dl_resolve;
12033+
12034+ down_read(&current->mm->mmap_sem);
12035+ call_dl_resolve = current->mm->call_dl_resolve;
12036+ up_read(&current->mm->mmap_sem);
12037+ if (likely(call_dl_resolve))
12038+ goto emulate;
12039+
12040+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12041+
12042+ down_write(&current->mm->mmap_sem);
12043+ if (current->mm->call_dl_resolve) {
12044+ call_dl_resolve = current->mm->call_dl_resolve;
12045+ up_write(&current->mm->mmap_sem);
12046+ if (vma)
12047+ kmem_cache_free(vm_area_cachep, vma);
12048+ goto emulate;
12049+ }
12050+
12051+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12052+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12053+ up_write(&current->mm->mmap_sem);
12054+ if (vma)
12055+ kmem_cache_free(vm_area_cachep, vma);
12056+ return 1;
12057+ }
12058+
12059+ if (pax_insert_vma(vma, call_dl_resolve)) {
12060+ up_write(&current->mm->mmap_sem);
12061+ kmem_cache_free(vm_area_cachep, vma);
12062+ return 1;
12063+ }
12064+
12065+ current->mm->call_dl_resolve = call_dl_resolve;
12066+ up_write(&current->mm->mmap_sem);
12067+
12068+emulate:
12069+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12070+ regs->tpc = call_dl_resolve;
12071+ regs->tnpc = addr+4;
12072+ return 3;
12073+ }
12074+#endif
12075+
12076+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12077+ if ((save & 0xFFC00000U) == 0x05000000U &&
12078+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12079+ nop == 0x01000000U)
12080+ {
12081+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12082+ regs->u_regs[UREG_G2] = addr + 4;
12083+ addr = (save & 0x003FFFFFU) << 10;
12084+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12085+
12086+ if (test_thread_flag(TIF_32BIT))
12087+ addr &= 0xFFFFFFFFUL;
12088+
12089+ regs->tpc = addr;
12090+ regs->tnpc = addr+4;
12091+ return 3;
12092+ }
12093+
12094+ /* PaX: 64-bit PLT stub */
12095+ err = get_user(sethi1, (unsigned int *)addr);
12096+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12097+ err |= get_user(or1, (unsigned int *)(addr+8));
12098+ err |= get_user(or2, (unsigned int *)(addr+12));
12099+ err |= get_user(sllx, (unsigned int *)(addr+16));
12100+ err |= get_user(add, (unsigned int *)(addr+20));
12101+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12102+ err |= get_user(nop, (unsigned int *)(addr+28));
12103+ if (err)
12104+ break;
12105+
12106+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12107+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12108+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12109+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12110+ sllx == 0x89293020U &&
12111+ add == 0x8A010005U &&
12112+ jmpl == 0x89C14000U &&
12113+ nop == 0x01000000U)
12114+ {
12115+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12116+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12117+ regs->u_regs[UREG_G4] <<= 32;
12118+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12119+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12120+ regs->u_regs[UREG_G4] = addr + 24;
12121+ addr = regs->u_regs[UREG_G5];
12122+ regs->tpc = addr;
12123+ regs->tnpc = addr+4;
12124+ return 3;
12125+ }
12126+ }
12127+ } while (0);
12128+
12129+#ifdef CONFIG_PAX_DLRESOLVE
12130+ do { /* PaX: unpatched PLT emulation step 2 */
12131+ unsigned int save, call, nop;
12132+
12133+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12134+ err |= get_user(call, (unsigned int *)regs->tpc);
12135+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12136+ if (err)
12137+ break;
12138+
12139+ if (save == 0x9DE3BFA8U &&
12140+ (call & 0xC0000000U) == 0x40000000U &&
12141+ nop == 0x01000000U)
12142+ {
12143+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12144+
12145+ if (test_thread_flag(TIF_32BIT))
12146+ dl_resolve &= 0xFFFFFFFFUL;
12147+
12148+ regs->u_regs[UREG_RETPC] = regs->tpc;
12149+ regs->tpc = dl_resolve;
12150+ regs->tnpc = dl_resolve+4;
12151+ return 3;
12152+ }
12153+ } while (0);
12154+#endif
12155+
12156+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12157+ unsigned int sethi, ba, nop;
12158+
12159+ err = get_user(sethi, (unsigned int *)regs->tpc);
12160+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12161+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12162+
12163+ if (err)
12164+ break;
12165+
12166+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12167+ (ba & 0xFFF00000U) == 0x30600000U &&
12168+ nop == 0x01000000U)
12169+ {
12170+ unsigned long addr;
12171+
12172+ addr = (sethi & 0x003FFFFFU) << 10;
12173+ regs->u_regs[UREG_G1] = addr;
12174+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12175+
12176+ if (test_thread_flag(TIF_32BIT))
12177+ addr &= 0xFFFFFFFFUL;
12178+
12179+ regs->tpc = addr;
12180+ regs->tnpc = addr+4;
12181+ return 2;
12182+ }
12183+ } while (0);
12184+
12185+#endif
12186+
12187+ return 1;
12188+}
12189+
12190+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12191+{
12192+ unsigned long i;
12193+
12194+ printk(KERN_ERR "PAX: bytes at PC: ");
12195+ for (i = 0; i < 8; i++) {
12196+ unsigned int c;
12197+ if (get_user(c, (unsigned int *)pc+i))
12198+ printk(KERN_CONT "???????? ");
12199+ else
12200+ printk(KERN_CONT "%08x ", c);
12201+ }
12202+ printk("\n");
12203+}
12204+#endif
12205+
12206 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12207 {
12208 enum ctx_state prev_state = exception_enter();
12209@@ -353,6 +816,29 @@ retry:
12210 if (!vma)
12211 goto bad_area;
12212
12213+#ifdef CONFIG_PAX_PAGEEXEC
12214+ /* PaX: detect ITLB misses on non-exec pages */
12215+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12216+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12217+ {
12218+ if (address != regs->tpc)
12219+ goto good_area;
12220+
12221+ up_read(&mm->mmap_sem);
12222+ switch (pax_handle_fetch_fault(regs)) {
12223+
12224+#ifdef CONFIG_PAX_EMUPLT
12225+ case 2:
12226+ case 3:
12227+ return;
12228+#endif
12229+
12230+ }
12231+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12232+ do_group_exit(SIGKILL);
12233+ }
12234+#endif
12235+
12236 /* Pure DTLB misses do not tell us whether the fault causing
12237 * load/store/atomic was a write or not, it only says that there
12238 * was no match. So in such a case we (carefully) read the
12239diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12240index d329537..2c3746a 100644
12241--- a/arch/sparc/mm/hugetlbpage.c
12242+++ b/arch/sparc/mm/hugetlbpage.c
12243@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12244 unsigned long addr,
12245 unsigned long len,
12246 unsigned long pgoff,
12247- unsigned long flags)
12248+ unsigned long flags,
12249+ unsigned long offset)
12250 {
12251+ struct mm_struct *mm = current->mm;
12252 unsigned long task_size = TASK_SIZE;
12253 struct vm_unmapped_area_info info;
12254
12255@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12256
12257 info.flags = 0;
12258 info.length = len;
12259- info.low_limit = TASK_UNMAPPED_BASE;
12260+ info.low_limit = mm->mmap_base;
12261 info.high_limit = min(task_size, VA_EXCLUDE_START);
12262 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12263 info.align_offset = 0;
12264+ info.threadstack_offset = offset;
12265 addr = vm_unmapped_area(&info);
12266
12267 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12268 VM_BUG_ON(addr != -ENOMEM);
12269 info.low_limit = VA_EXCLUDE_END;
12270+
12271+#ifdef CONFIG_PAX_RANDMMAP
12272+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12273+ info.low_limit += mm->delta_mmap;
12274+#endif
12275+
12276 info.high_limit = task_size;
12277 addr = vm_unmapped_area(&info);
12278 }
12279@@ -55,7 +64,8 @@ static unsigned long
12280 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12281 const unsigned long len,
12282 const unsigned long pgoff,
12283- const unsigned long flags)
12284+ const unsigned long flags,
12285+ const unsigned long offset)
12286 {
12287 struct mm_struct *mm = current->mm;
12288 unsigned long addr = addr0;
12289@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12290 info.high_limit = mm->mmap_base;
12291 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12292 info.align_offset = 0;
12293+ info.threadstack_offset = offset;
12294 addr = vm_unmapped_area(&info);
12295
12296 /*
12297@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12298 VM_BUG_ON(addr != -ENOMEM);
12299 info.flags = 0;
12300 info.low_limit = TASK_UNMAPPED_BASE;
12301+
12302+#ifdef CONFIG_PAX_RANDMMAP
12303+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12304+ info.low_limit += mm->delta_mmap;
12305+#endif
12306+
12307 info.high_limit = STACK_TOP32;
12308 addr = vm_unmapped_area(&info);
12309 }
12310@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12311 struct mm_struct *mm = current->mm;
12312 struct vm_area_struct *vma;
12313 unsigned long task_size = TASK_SIZE;
12314+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12315
12316 if (test_thread_flag(TIF_32BIT))
12317 task_size = STACK_TOP32;
12318@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12319 return addr;
12320 }
12321
12322+#ifdef CONFIG_PAX_RANDMMAP
12323+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12324+#endif
12325+
12326 if (addr) {
12327 addr = ALIGN(addr, HPAGE_SIZE);
12328 vma = find_vma(mm, addr);
12329- if (task_size - len >= addr &&
12330- (!vma || addr + len <= vma->vm_start))
12331+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12332 return addr;
12333 }
12334 if (mm->get_unmapped_area == arch_get_unmapped_area)
12335 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12336- pgoff, flags);
12337+ pgoff, flags, offset);
12338 else
12339 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12340- pgoff, flags);
12341+ pgoff, flags, offset);
12342 }
12343
12344 pte_t *huge_pte_alloc(struct mm_struct *mm,
12345diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12346index 3ea267c..93f0659 100644
12347--- a/arch/sparc/mm/init_64.c
12348+++ b/arch/sparc/mm/init_64.c
12349@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12350 int num_kernel_image_mappings;
12351
12352 #ifdef CONFIG_DEBUG_DCFLUSH
12353-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12354+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12355 #ifdef CONFIG_SMP
12356-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12357+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12358 #endif
12359 #endif
12360
12361@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
12362 {
12363 BUG_ON(tlb_type == hypervisor);
12364 #ifdef CONFIG_DEBUG_DCFLUSH
12365- atomic_inc(&dcpage_flushes);
12366+ atomic_inc_unchecked(&dcpage_flushes);
12367 #endif
12368
12369 #ifdef DCACHE_ALIASING_POSSIBLE
12370@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
12371
12372 #ifdef CONFIG_DEBUG_DCFLUSH
12373 seq_printf(m, "DCPageFlushes\t: %d\n",
12374- atomic_read(&dcpage_flushes));
12375+ atomic_read_unchecked(&dcpage_flushes));
12376 #ifdef CONFIG_SMP
12377 seq_printf(m, "DCPageFlushesXC\t: %d\n",
12378- atomic_read(&dcpage_flushes_xcall));
12379+ atomic_read_unchecked(&dcpage_flushes_xcall));
12380 #endif /* CONFIG_SMP */
12381 #endif /* CONFIG_DEBUG_DCFLUSH */
12382 }
12383diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
12384index 7cca418..53fc030 100644
12385--- a/arch/tile/Kconfig
12386+++ b/arch/tile/Kconfig
12387@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
12388
12389 config KEXEC
12390 bool "kexec system call"
12391+ depends on !GRKERNSEC_KMEM
12392 ---help---
12393 kexec is a system call that implements the ability to shutdown your
12394 current kernel, and to start another kernel. It is like a reboot
12395diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
12396index 7b11c5f..755a026 100644
12397--- a/arch/tile/include/asm/atomic_64.h
12398+++ b/arch/tile/include/asm/atomic_64.h
12399@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
12400
12401 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12402
12403+#define atomic64_read_unchecked(v) atomic64_read(v)
12404+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
12405+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
12406+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
12407+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
12408+#define atomic64_inc_unchecked(v) atomic64_inc(v)
12409+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
12410+#define atomic64_dec_unchecked(v) atomic64_dec(v)
12411+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
12412+
12413 /* Define this to indicate that cmpxchg is an efficient operation. */
12414 #define __HAVE_ARCH_CMPXCHG
12415
12416diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
12417index 6160761..00cac88 100644
12418--- a/arch/tile/include/asm/cache.h
12419+++ b/arch/tile/include/asm/cache.h
12420@@ -15,11 +15,12 @@
12421 #ifndef _ASM_TILE_CACHE_H
12422 #define _ASM_TILE_CACHE_H
12423
12424+#include <linux/const.h>
12425 #include <arch/chip.h>
12426
12427 /* bytes per L1 data cache line */
12428 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
12429-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12430+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12431
12432 /* bytes per L2 cache line */
12433 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
12434diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
12435index b6cde32..c0cb736 100644
12436--- a/arch/tile/include/asm/uaccess.h
12437+++ b/arch/tile/include/asm/uaccess.h
12438@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
12439 const void __user *from,
12440 unsigned long n)
12441 {
12442- int sz = __compiletime_object_size(to);
12443+ size_t sz = __compiletime_object_size(to);
12444
12445- if (likely(sz == -1 || sz >= n))
12446+ if (likely(sz == (size_t)-1 || sz >= n))
12447 n = _copy_from_user(to, from, n);
12448 else
12449 copy_from_user_overflow();
12450diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
12451index 3270e00..a77236e 100644
12452--- a/arch/tile/mm/hugetlbpage.c
12453+++ b/arch/tile/mm/hugetlbpage.c
12454@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
12455 info.high_limit = TASK_SIZE;
12456 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12457 info.align_offset = 0;
12458+ info.threadstack_offset = 0;
12459 return vm_unmapped_area(&info);
12460 }
12461
12462@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
12463 info.high_limit = current->mm->mmap_base;
12464 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
12465 info.align_offset = 0;
12466+ info.threadstack_offset = 0;
12467 addr = vm_unmapped_area(&info);
12468
12469 /*
12470diff --git a/arch/um/Makefile b/arch/um/Makefile
12471index e4b1a96..16162f8 100644
12472--- a/arch/um/Makefile
12473+++ b/arch/um/Makefile
12474@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
12475 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
12476 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
12477
12478+ifdef CONSTIFY_PLUGIN
12479+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12480+endif
12481+
12482 #This will adjust *FLAGS accordingly to the platform.
12483 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
12484
12485diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
12486index 19e1bdd..3665b77 100644
12487--- a/arch/um/include/asm/cache.h
12488+++ b/arch/um/include/asm/cache.h
12489@@ -1,6 +1,7 @@
12490 #ifndef __UM_CACHE_H
12491 #define __UM_CACHE_H
12492
12493+#include <linux/const.h>
12494
12495 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
12496 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12497@@ -12,6 +13,6 @@
12498 # define L1_CACHE_SHIFT 5
12499 #endif
12500
12501-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12502+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12503
12504 #endif
12505diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
12506index 2e0a6b1..a64d0f5 100644
12507--- a/arch/um/include/asm/kmap_types.h
12508+++ b/arch/um/include/asm/kmap_types.h
12509@@ -8,6 +8,6 @@
12510
12511 /* No more #include "asm/arch/kmap_types.h" ! */
12512
12513-#define KM_TYPE_NR 14
12514+#define KM_TYPE_NR 15
12515
12516 #endif
12517diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
12518index 71c5d13..4c7b9f1 100644
12519--- a/arch/um/include/asm/page.h
12520+++ b/arch/um/include/asm/page.h
12521@@ -14,6 +14,9 @@
12522 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
12523 #define PAGE_MASK (~(PAGE_SIZE-1))
12524
12525+#define ktla_ktva(addr) (addr)
12526+#define ktva_ktla(addr) (addr)
12527+
12528 #ifndef __ASSEMBLY__
12529
12530 struct page;
12531diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
12532index 0032f92..cd151e0 100644
12533--- a/arch/um/include/asm/pgtable-3level.h
12534+++ b/arch/um/include/asm/pgtable-3level.h
12535@@ -58,6 +58,7 @@
12536 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
12537 #define pud_populate(mm, pud, pmd) \
12538 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
12539+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
12540
12541 #ifdef CONFIG_64BIT
12542 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
12543diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
12544index f17bca8..48adb87 100644
12545--- a/arch/um/kernel/process.c
12546+++ b/arch/um/kernel/process.c
12547@@ -356,22 +356,6 @@ int singlestepping(void * t)
12548 return 2;
12549 }
12550
12551-/*
12552- * Only x86 and x86_64 have an arch_align_stack().
12553- * All other arches have "#define arch_align_stack(x) (x)"
12554- * in their asm/exec.h
12555- * As this is included in UML from asm-um/system-generic.h,
12556- * we can use it to behave as the subarch does.
12557- */
12558-#ifndef arch_align_stack
12559-unsigned long arch_align_stack(unsigned long sp)
12560-{
12561- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
12562- sp -= get_random_int() % 8192;
12563- return sp & ~0xf;
12564-}
12565-#endif
12566-
12567 unsigned long get_wchan(struct task_struct *p)
12568 {
12569 unsigned long stack_page, sp, ip;
12570diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
12571index ad8f795..2c7eec6 100644
12572--- a/arch/unicore32/include/asm/cache.h
12573+++ b/arch/unicore32/include/asm/cache.h
12574@@ -12,8 +12,10 @@
12575 #ifndef __UNICORE_CACHE_H__
12576 #define __UNICORE_CACHE_H__
12577
12578-#define L1_CACHE_SHIFT (5)
12579-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12580+#include <linux/const.h>
12581+
12582+#define L1_CACHE_SHIFT 5
12583+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12584
12585 /*
12586 * Memory returned by kmalloc() may be used for DMA, so we must make
12587diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
12588index 0dc9d01..98df103 100644
12589--- a/arch/x86/Kconfig
12590+++ b/arch/x86/Kconfig
12591@@ -130,7 +130,7 @@ config X86
12592 select RTC_LIB
12593 select HAVE_DEBUG_STACKOVERFLOW
12594 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
12595- select HAVE_CC_STACKPROTECTOR
12596+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
12597 select GENERIC_CPU_AUTOPROBE
12598 select HAVE_ARCH_AUDITSYSCALL
12599 select ARCH_SUPPORTS_ATOMIC_RMW
12600@@ -263,7 +263,7 @@ config X86_HT
12601
12602 config X86_32_LAZY_GS
12603 def_bool y
12604- depends on X86_32 && !CC_STACKPROTECTOR
12605+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
12606
12607 config ARCH_HWEIGHT_CFLAGS
12608 string
12609@@ -601,6 +601,7 @@ config SCHED_OMIT_FRAME_POINTER
12610
12611 menuconfig HYPERVISOR_GUEST
12612 bool "Linux guest support"
12613+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
12614 ---help---
12615 Say Y here to enable options for running Linux under various hyper-
12616 visors. This option enables basic hypervisor detection and platform
12617@@ -978,6 +979,7 @@ config VM86
12618
12619 config X86_16BIT
12620 bool "Enable support for 16-bit segments" if EXPERT
12621+ depends on !GRKERNSEC
12622 default y
12623 ---help---
12624 This option is required by programs like Wine to run 16-bit
12625@@ -1151,6 +1153,7 @@ choice
12626
12627 config NOHIGHMEM
12628 bool "off"
12629+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12630 ---help---
12631 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
12632 However, the address space of 32-bit x86 processors is only 4
12633@@ -1187,6 +1190,7 @@ config NOHIGHMEM
12634
12635 config HIGHMEM4G
12636 bool "4GB"
12637+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
12638 ---help---
12639 Select this if you have a 32-bit processor and between 1 and 4
12640 gigabytes of physical RAM.
12641@@ -1239,7 +1243,7 @@ config PAGE_OFFSET
12642 hex
12643 default 0xB0000000 if VMSPLIT_3G_OPT
12644 default 0x80000000 if VMSPLIT_2G
12645- default 0x78000000 if VMSPLIT_2G_OPT
12646+ default 0x70000000 if VMSPLIT_2G_OPT
12647 default 0x40000000 if VMSPLIT_1G
12648 default 0xC0000000
12649 depends on X86_32
12650@@ -1680,6 +1684,7 @@ source kernel/Kconfig.hz
12651
12652 config KEXEC
12653 bool "kexec system call"
12654+ depends on !GRKERNSEC_KMEM
12655 ---help---
12656 kexec is a system call that implements the ability to shutdown your
12657 current kernel, and to start another kernel. It is like a reboot
12658@@ -1865,7 +1870,9 @@ config X86_NEED_RELOCS
12659
12660 config PHYSICAL_ALIGN
12661 hex "Alignment value to which kernel should be aligned"
12662- default "0x200000"
12663+ default "0x1000000"
12664+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
12665+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
12666 range 0x2000 0x1000000 if X86_32
12667 range 0x200000 0x1000000 if X86_64
12668 ---help---
12669@@ -1948,6 +1955,7 @@ config COMPAT_VDSO
12670 def_bool n
12671 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
12672 depends on X86_32 || IA32_EMULATION
12673+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
12674 ---help---
12675 Certain buggy versions of glibc will crash if they are
12676 presented with a 32-bit vDSO that is not mapped at the address
12677diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
12678index 6983314..54ad7e8 100644
12679--- a/arch/x86/Kconfig.cpu
12680+++ b/arch/x86/Kconfig.cpu
12681@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
12682
12683 config X86_F00F_BUG
12684 def_bool y
12685- depends on M586MMX || M586TSC || M586 || M486
12686+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
12687
12688 config X86_INVD_BUG
12689 def_bool y
12690@@ -327,7 +327,7 @@ config X86_INVD_BUG
12691
12692 config X86_ALIGNMENT_16
12693 def_bool y
12694- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12695+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
12696
12697 config X86_INTEL_USERCOPY
12698 def_bool y
12699@@ -369,7 +369,7 @@ config X86_CMPXCHG64
12700 # generates cmov.
12701 config X86_CMOV
12702 def_bool y
12703- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12704+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
12705
12706 config X86_MINIMUM_CPU_FAMILY
12707 int
12708diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
12709index 61bd2ad..50b625d 100644
12710--- a/arch/x86/Kconfig.debug
12711+++ b/arch/x86/Kconfig.debug
12712@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
12713 config DEBUG_RODATA
12714 bool "Write protect kernel read-only data structures"
12715 default y
12716- depends on DEBUG_KERNEL
12717+ depends on DEBUG_KERNEL && BROKEN
12718 ---help---
12719 Mark the kernel read-only data as write-protected in the pagetables,
12720 in order to catch accidental (and incorrect) writes to such const
12721@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
12722
12723 config DEBUG_SET_MODULE_RONX
12724 bool "Set loadable kernel module data as NX and text as RO"
12725- depends on MODULES
12726+ depends on MODULES && BROKEN
12727 ---help---
12728 This option helps catch unintended modifications to loadable
12729 kernel module's text and read-only data. It also prevents execution
12730diff --git a/arch/x86/Makefile b/arch/x86/Makefile
12731index 920e616..ac3d4df 100644
12732--- a/arch/x86/Makefile
12733+++ b/arch/x86/Makefile
12734@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
12735 # CPU-specific tuning. Anything which can be shared with UML should go here.
12736 include $(srctree)/arch/x86/Makefile_32.cpu
12737 KBUILD_CFLAGS += $(cflags-y)
12738-
12739- # temporary until string.h is fixed
12740- KBUILD_CFLAGS += -ffreestanding
12741 else
12742 BITS := 64
12743 UTS_MACHINE := x86_64
12744@@ -107,6 +104,9 @@ else
12745 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
12746 endif
12747
12748+# temporary until string.h is fixed
12749+KBUILD_CFLAGS += -ffreestanding
12750+
12751 # Make sure compiler does not have buggy stack-protector support.
12752 ifdef CONFIG_CC_STACKPROTECTOR
12753 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
12754@@ -180,6 +180,7 @@ archheaders:
12755 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
12756
12757 archprepare:
12758+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
12759 ifeq ($(CONFIG_KEXEC_FILE),y)
12760 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
12761 endif
12762@@ -263,3 +264,9 @@ define archhelp
12763 echo ' FDARGS="..." arguments for the booted kernel'
12764 echo ' FDINITRD=file initrd for the booted kernel'
12765 endef
12766+
12767+define OLD_LD
12768+
12769+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
12770+*** Please upgrade your binutils to 2.18 or newer
12771+endef
12772diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
12773index 3db07f3..9d81d0f 100644
12774--- a/arch/x86/boot/Makefile
12775+++ b/arch/x86/boot/Makefile
12776@@ -56,6 +56,9 @@ clean-files += cpustr.h
12777 # ---------------------------------------------------------------------------
12778
12779 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
12780+ifdef CONSTIFY_PLUGIN
12781+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12782+endif
12783 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12784 GCOV_PROFILE := n
12785
12786diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
12787index 878e4b9..20537ab 100644
12788--- a/arch/x86/boot/bitops.h
12789+++ b/arch/x86/boot/bitops.h
12790@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12791 u8 v;
12792 const u32 *p = (const u32 *)addr;
12793
12794- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12795+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
12796 return v;
12797 }
12798
12799@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
12800
12801 static inline void set_bit(int nr, void *addr)
12802 {
12803- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12804+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
12805 }
12806
12807 #endif /* BOOT_BITOPS_H */
12808diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
12809index bd49ec6..94c7f58 100644
12810--- a/arch/x86/boot/boot.h
12811+++ b/arch/x86/boot/boot.h
12812@@ -84,7 +84,7 @@ static inline void io_delay(void)
12813 static inline u16 ds(void)
12814 {
12815 u16 seg;
12816- asm("movw %%ds,%0" : "=rm" (seg));
12817+ asm volatile("movw %%ds,%0" : "=rm" (seg));
12818 return seg;
12819 }
12820
12821diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
12822index 8bd44e8..6b111e9 100644
12823--- a/arch/x86/boot/compressed/Makefile
12824+++ b/arch/x86/boot/compressed/Makefile
12825@@ -28,6 +28,9 @@ KBUILD_CFLAGS += $(cflags-y)
12826 KBUILD_CFLAGS += -mno-mmx -mno-sse
12827 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
12828 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
12829+ifdef CONSTIFY_PLUGIN
12830+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
12831+endif
12832
12833 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
12834 GCOV_PROFILE := n
12835diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
12836index a53440e..c3dbf1e 100644
12837--- a/arch/x86/boot/compressed/efi_stub_32.S
12838+++ b/arch/x86/boot/compressed/efi_stub_32.S
12839@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
12840 * parameter 2, ..., param n. To make things easy, we save the return
12841 * address of efi_call_phys in a global variable.
12842 */
12843- popl %ecx
12844- movl %ecx, saved_return_addr(%edx)
12845- /* get the function pointer into ECX*/
12846- popl %ecx
12847- movl %ecx, efi_rt_function_ptr(%edx)
12848+ popl saved_return_addr(%edx)
12849+ popl efi_rt_function_ptr(%edx)
12850
12851 /*
12852 * 3. Call the physical function.
12853 */
12854- call *%ecx
12855+ call *efi_rt_function_ptr(%edx)
12856
12857 /*
12858 * 4. Balance the stack. And because EAX contain the return value,
12859@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
12860 1: popl %edx
12861 subl $1b, %edx
12862
12863- movl efi_rt_function_ptr(%edx), %ecx
12864- pushl %ecx
12865+ pushl efi_rt_function_ptr(%edx)
12866
12867 /*
12868 * 10. Push the saved return address onto the stack and return.
12869 */
12870- movl saved_return_addr(%edx), %ecx
12871- pushl %ecx
12872- ret
12873+ jmpl *saved_return_addr(%edx)
12874 ENDPROC(efi_call_phys)
12875 .previous
12876
12877diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
12878index 630384a..278e788 100644
12879--- a/arch/x86/boot/compressed/efi_thunk_64.S
12880+++ b/arch/x86/boot/compressed/efi_thunk_64.S
12881@@ -189,8 +189,8 @@ efi_gdt64:
12882 .long 0 /* Filled out by user */
12883 .word 0
12884 .quad 0x0000000000000000 /* NULL descriptor */
12885- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12886- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12887+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12888+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12889 .quad 0x0080890000000000 /* TS descriptor */
12890 .quad 0x0000000000000000 /* TS continued */
12891 efi_gdt64_end:
12892diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
12893index 1d7fbbc..36ecd58 100644
12894--- a/arch/x86/boot/compressed/head_32.S
12895+++ b/arch/x86/boot/compressed/head_32.S
12896@@ -140,10 +140,10 @@ preferred_addr:
12897 addl %eax, %ebx
12898 notl %eax
12899 andl %eax, %ebx
12900- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12901+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12902 jge 1f
12903 #endif
12904- movl $LOAD_PHYSICAL_ADDR, %ebx
12905+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12906 1:
12907
12908 /* Target address to relocate to for decompression */
12909diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
12910index 6b1766c..ad465c9 100644
12911--- a/arch/x86/boot/compressed/head_64.S
12912+++ b/arch/x86/boot/compressed/head_64.S
12913@@ -94,10 +94,10 @@ ENTRY(startup_32)
12914 addl %eax, %ebx
12915 notl %eax
12916 andl %eax, %ebx
12917- cmpl $LOAD_PHYSICAL_ADDR, %ebx
12918+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
12919 jge 1f
12920 #endif
12921- movl $LOAD_PHYSICAL_ADDR, %ebx
12922+ movl $____LOAD_PHYSICAL_ADDR, %ebx
12923 1:
12924
12925 /* Target address to relocate to for decompression */
12926@@ -322,10 +322,10 @@ preferred_addr:
12927 addq %rax, %rbp
12928 notq %rax
12929 andq %rax, %rbp
12930- cmpq $LOAD_PHYSICAL_ADDR, %rbp
12931+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
12932 jge 1f
12933 #endif
12934- movq $LOAD_PHYSICAL_ADDR, %rbp
12935+ movq $____LOAD_PHYSICAL_ADDR, %rbp
12936 1:
12937
12938 /* Target address to relocate to for decompression */
12939@@ -434,8 +434,8 @@ gdt:
12940 .long gdt
12941 .word 0
12942 .quad 0x0000000000000000 /* NULL descriptor */
12943- .quad 0x00af9a000000ffff /* __KERNEL_CS */
12944- .quad 0x00cf92000000ffff /* __KERNEL_DS */
12945+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
12946+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
12947 .quad 0x0080890000000000 /* TS descriptor */
12948 .quad 0x0000000000000000 /* TS continued */
12949 gdt_end:
12950diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
12951index a950864..c710239 100644
12952--- a/arch/x86/boot/compressed/misc.c
12953+++ b/arch/x86/boot/compressed/misc.c
12954@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
12955 * Calculate the delta between where vmlinux was linked to load
12956 * and where it was actually loaded.
12957 */
12958- delta = min_addr - LOAD_PHYSICAL_ADDR;
12959+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
12960 if (!delta) {
12961 debug_putstr("No relocation needed... ");
12962 return;
12963@@ -324,7 +324,7 @@ static void parse_elf(void *output)
12964 Elf32_Ehdr ehdr;
12965 Elf32_Phdr *phdrs, *phdr;
12966 #endif
12967- void *dest;
12968+ void *dest, *prev;
12969 int i;
12970
12971 memcpy(&ehdr, output, sizeof(ehdr));
12972@@ -351,13 +351,16 @@ static void parse_elf(void *output)
12973 case PT_LOAD:
12974 #ifdef CONFIG_RELOCATABLE
12975 dest = output;
12976- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
12977+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
12978 #else
12979 dest = (void *)(phdr->p_paddr);
12980 #endif
12981 memcpy(dest,
12982 output + phdr->p_offset,
12983 phdr->p_filesz);
12984+ if (i)
12985+ memset(prev, 0xff, dest - prev);
12986+ prev = dest + phdr->p_filesz;
12987 break;
12988 default: /* Ignore other PT_* */ break;
12989 }
12990@@ -416,7 +419,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
12991 error("Destination address too large");
12992 #endif
12993 #ifndef CONFIG_RELOCATABLE
12994- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12995+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12996 error("Wrong destination address");
12997 #endif
12998
12999diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13000index 1fd7d57..0f7d096 100644
13001--- a/arch/x86/boot/cpucheck.c
13002+++ b/arch/x86/boot/cpucheck.c
13003@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13004 u32 ecx = MSR_K7_HWCR;
13005 u32 eax, edx;
13006
13007- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13008+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13009 eax &= ~(1 << 15);
13010- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13011+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13012
13013 get_cpuflags(); /* Make sure it really did something */
13014 err = check_cpuflags();
13015@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13016 u32 ecx = MSR_VIA_FCR;
13017 u32 eax, edx;
13018
13019- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13020+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13021 eax |= (1<<1)|(1<<7);
13022- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13023+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13024
13025 set_bit(X86_FEATURE_CX8, cpu.flags);
13026 err = check_cpuflags();
13027@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13028 u32 eax, edx;
13029 u32 level = 1;
13030
13031- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13032- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13033- asm("cpuid"
13034+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13035+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13036+ asm volatile("cpuid"
13037 : "+a" (level), "=d" (cpu.flags[0])
13038 : : "ecx", "ebx");
13039- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13040+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13041
13042 err = check_cpuflags();
13043 } else if (err == 0x01 &&
13044diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13045index 16ef025..91e033b 100644
13046--- a/arch/x86/boot/header.S
13047+++ b/arch/x86/boot/header.S
13048@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13049 # single linked list of
13050 # struct setup_data
13051
13052-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13053+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13054
13055 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13056+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13057+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13058+#else
13059 #define VO_INIT_SIZE (VO__end - VO__text)
13060+#endif
13061 #if ZO_INIT_SIZE > VO_INIT_SIZE
13062 #define INIT_SIZE ZO_INIT_SIZE
13063 #else
13064diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13065index db75d07..8e6d0af 100644
13066--- a/arch/x86/boot/memory.c
13067+++ b/arch/x86/boot/memory.c
13068@@ -19,7 +19,7 @@
13069
13070 static int detect_memory_e820(void)
13071 {
13072- int count = 0;
13073+ unsigned int count = 0;
13074 struct biosregs ireg, oreg;
13075 struct e820entry *desc = boot_params.e820_map;
13076 static struct e820entry buf; /* static so it is zeroed */
13077diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13078index ba3e100..6501b8f 100644
13079--- a/arch/x86/boot/video-vesa.c
13080+++ b/arch/x86/boot/video-vesa.c
13081@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13082
13083 boot_params.screen_info.vesapm_seg = oreg.es;
13084 boot_params.screen_info.vesapm_off = oreg.di;
13085+ boot_params.screen_info.vesapm_size = oreg.cx;
13086 }
13087
13088 /*
13089diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13090index 43eda28..5ab5fdb 100644
13091--- a/arch/x86/boot/video.c
13092+++ b/arch/x86/boot/video.c
13093@@ -96,7 +96,7 @@ static void store_mode_params(void)
13094 static unsigned int get_entry(void)
13095 {
13096 char entry_buf[4];
13097- int i, len = 0;
13098+ unsigned int i, len = 0;
13099 int key;
13100 unsigned int v;
13101
13102diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13103index 9105655..41779c1 100644
13104--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13105+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13106@@ -8,6 +8,8 @@
13107 * including this sentence is retained in full.
13108 */
13109
13110+#include <asm/alternative-asm.h>
13111+
13112 .extern crypto_ft_tab
13113 .extern crypto_it_tab
13114 .extern crypto_fl_tab
13115@@ -70,6 +72,8 @@
13116 je B192; \
13117 leaq 32(r9),r9;
13118
13119+#define ret pax_force_retaddr; ret
13120+
13121 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13122 movq r1,r2; \
13123 movq r3,r4; \
13124diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13125index 477e9d7..c92c7d8 100644
13126--- a/arch/x86/crypto/aesni-intel_asm.S
13127+++ b/arch/x86/crypto/aesni-intel_asm.S
13128@@ -31,6 +31,7 @@
13129
13130 #include <linux/linkage.h>
13131 #include <asm/inst.h>
13132+#include <asm/alternative-asm.h>
13133
13134 #ifdef __x86_64__
13135 .data
13136@@ -205,7 +206,7 @@ enc: .octa 0x2
13137 * num_initial_blocks = b mod 4
13138 * encrypt the initial num_initial_blocks blocks and apply ghash on
13139 * the ciphertext
13140-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13141+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13142 * are clobbered
13143 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13144 */
13145@@ -214,8 +215,8 @@ enc: .octa 0x2
13146 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13147 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13148 mov arg7, %r10 # %r10 = AAD
13149- mov arg8, %r12 # %r12 = aadLen
13150- mov %r12, %r11
13151+ mov arg8, %r15 # %r15 = aadLen
13152+ mov %r15, %r11
13153 pxor %xmm\i, %xmm\i
13154 _get_AAD_loop\num_initial_blocks\operation:
13155 movd (%r10), \TMP1
13156@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13157 psrldq $4, %xmm\i
13158 pxor \TMP1, %xmm\i
13159 add $4, %r10
13160- sub $4, %r12
13161+ sub $4, %r15
13162 jne _get_AAD_loop\num_initial_blocks\operation
13163 cmp $16, %r11
13164 je _get_AAD_loop2_done\num_initial_blocks\operation
13165- mov $16, %r12
13166+ mov $16, %r15
13167 _get_AAD_loop2\num_initial_blocks\operation:
13168 psrldq $4, %xmm\i
13169- sub $4, %r12
13170- cmp %r11, %r12
13171+ sub $4, %r15
13172+ cmp %r11, %r15
13173 jne _get_AAD_loop2\num_initial_blocks\operation
13174 _get_AAD_loop2_done\num_initial_blocks\operation:
13175 movdqa SHUF_MASK(%rip), %xmm14
13176@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13177 * num_initial_blocks = b mod 4
13178 * encrypt the initial num_initial_blocks blocks and apply ghash on
13179 * the ciphertext
13180-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13181+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13182 * are clobbered
13183 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13184 */
13185@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13186 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13187 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13188 mov arg7, %r10 # %r10 = AAD
13189- mov arg8, %r12 # %r12 = aadLen
13190- mov %r12, %r11
13191+ mov arg8, %r15 # %r15 = aadLen
13192+ mov %r15, %r11
13193 pxor %xmm\i, %xmm\i
13194 _get_AAD_loop\num_initial_blocks\operation:
13195 movd (%r10), \TMP1
13196@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13197 psrldq $4, %xmm\i
13198 pxor \TMP1, %xmm\i
13199 add $4, %r10
13200- sub $4, %r12
13201+ sub $4, %r15
13202 jne _get_AAD_loop\num_initial_blocks\operation
13203 cmp $16, %r11
13204 je _get_AAD_loop2_done\num_initial_blocks\operation
13205- mov $16, %r12
13206+ mov $16, %r15
13207 _get_AAD_loop2\num_initial_blocks\operation:
13208 psrldq $4, %xmm\i
13209- sub $4, %r12
13210- cmp %r11, %r12
13211+ sub $4, %r15
13212+ cmp %r11, %r15
13213 jne _get_AAD_loop2\num_initial_blocks\operation
13214 _get_AAD_loop2_done\num_initial_blocks\operation:
13215 movdqa SHUF_MASK(%rip), %xmm14
13216@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13217 *
13218 *****************************************************************************/
13219 ENTRY(aesni_gcm_dec)
13220- push %r12
13221+ push %r15
13222 push %r13
13223 push %r14
13224 mov %rsp, %r14
13225@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13226 */
13227 sub $VARIABLE_OFFSET, %rsp
13228 and $~63, %rsp # align rsp to 64 bytes
13229- mov %arg6, %r12
13230- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13231+ mov %arg6, %r15
13232+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13233 movdqa SHUF_MASK(%rip), %xmm2
13234 PSHUFB_XMM %xmm2, %xmm13
13235
13236@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13237 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13238 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13239 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13240- mov %r13, %r12
13241- and $(3<<4), %r12
13242+ mov %r13, %r15
13243+ and $(3<<4), %r15
13244 jz _initial_num_blocks_is_0_decrypt
13245- cmp $(2<<4), %r12
13246+ cmp $(2<<4), %r15
13247 jb _initial_num_blocks_is_1_decrypt
13248 je _initial_num_blocks_is_2_decrypt
13249 _initial_num_blocks_is_3_decrypt:
13250@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13251 sub $16, %r11
13252 add %r13, %r11
13253 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13254- lea SHIFT_MASK+16(%rip), %r12
13255- sub %r13, %r12
13256+ lea SHIFT_MASK+16(%rip), %r15
13257+ sub %r13, %r15
13258 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13259 # (%r13 is the number of bytes in plaintext mod 16)
13260- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13261+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13262 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13263
13264 movdqa %xmm1, %xmm2
13265 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13266- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13267+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13268 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13269 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13270 pand %xmm1, %xmm2
13271@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13272 sub $1, %r13
13273 jne _less_than_8_bytes_left_decrypt
13274 _multiple_of_16_bytes_decrypt:
13275- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13276- shl $3, %r12 # convert into number of bits
13277- movd %r12d, %xmm15 # len(A) in %xmm15
13278+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13279+ shl $3, %r15 # convert into number of bits
13280+ movd %r15d, %xmm15 # len(A) in %xmm15
13281 shl $3, %arg4 # len(C) in bits (*128)
13282 MOVQ_R64_XMM %arg4, %xmm1
13283 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13284@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13285 mov %r14, %rsp
13286 pop %r14
13287 pop %r13
13288- pop %r12
13289+ pop %r15
13290+ pax_force_retaddr
13291 ret
13292 ENDPROC(aesni_gcm_dec)
13293
13294@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13295 * poly = x^128 + x^127 + x^126 + x^121 + 1
13296 ***************************************************************************/
13297 ENTRY(aesni_gcm_enc)
13298- push %r12
13299+ push %r15
13300 push %r13
13301 push %r14
13302 mov %rsp, %r14
13303@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13304 #
13305 sub $VARIABLE_OFFSET, %rsp
13306 and $~63, %rsp
13307- mov %arg6, %r12
13308- movdqu (%r12), %xmm13
13309+ mov %arg6, %r15
13310+ movdqu (%r15), %xmm13
13311 movdqa SHUF_MASK(%rip), %xmm2
13312 PSHUFB_XMM %xmm2, %xmm13
13313
13314@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13315 movdqa %xmm13, HashKey(%rsp)
13316 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13317 and $-16, %r13
13318- mov %r13, %r12
13319+ mov %r13, %r15
13320
13321 # Encrypt first few blocks
13322
13323- and $(3<<4), %r12
13324+ and $(3<<4), %r15
13325 jz _initial_num_blocks_is_0_encrypt
13326- cmp $(2<<4), %r12
13327+ cmp $(2<<4), %r15
13328 jb _initial_num_blocks_is_1_encrypt
13329 je _initial_num_blocks_is_2_encrypt
13330 _initial_num_blocks_is_3_encrypt:
13331@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13332 sub $16, %r11
13333 add %r13, %r11
13334 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13335- lea SHIFT_MASK+16(%rip), %r12
13336- sub %r13, %r12
13337+ lea SHIFT_MASK+16(%rip), %r15
13338+ sub %r13, %r15
13339 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13340 # (%r13 is the number of bytes in plaintext mod 16)
13341- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13342+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13343 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13344 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13345- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13346+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13347 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13348 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13349 movdqa SHUF_MASK(%rip), %xmm10
13350@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13351 sub $1, %r13
13352 jne _less_than_8_bytes_left_encrypt
13353 _multiple_of_16_bytes_encrypt:
13354- mov arg8, %r12 # %r12 = addLen (number of bytes)
13355- shl $3, %r12
13356- movd %r12d, %xmm15 # len(A) in %xmm15
13357+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13358+ shl $3, %r15
13359+ movd %r15d, %xmm15 # len(A) in %xmm15
13360 shl $3, %arg4 # len(C) in bits (*128)
13361 MOVQ_R64_XMM %arg4, %xmm1
13362 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13363@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13364 mov %r14, %rsp
13365 pop %r14
13366 pop %r13
13367- pop %r12
13368+ pop %r15
13369+ pax_force_retaddr
13370 ret
13371 ENDPROC(aesni_gcm_enc)
13372
13373@@ -1722,6 +1725,7 @@ _key_expansion_256a:
13374 pxor %xmm1, %xmm0
13375 movaps %xmm0, (TKEYP)
13376 add $0x10, TKEYP
13377+ pax_force_retaddr
13378 ret
13379 ENDPROC(_key_expansion_128)
13380 ENDPROC(_key_expansion_256a)
13381@@ -1748,6 +1752,7 @@ _key_expansion_192a:
13382 shufps $0b01001110, %xmm2, %xmm1
13383 movaps %xmm1, 0x10(TKEYP)
13384 add $0x20, TKEYP
13385+ pax_force_retaddr
13386 ret
13387 ENDPROC(_key_expansion_192a)
13388
13389@@ -1768,6 +1773,7 @@ _key_expansion_192b:
13390
13391 movaps %xmm0, (TKEYP)
13392 add $0x10, TKEYP
13393+ pax_force_retaddr
13394 ret
13395 ENDPROC(_key_expansion_192b)
13396
13397@@ -1781,6 +1787,7 @@ _key_expansion_256b:
13398 pxor %xmm1, %xmm2
13399 movaps %xmm2, (TKEYP)
13400 add $0x10, TKEYP
13401+ pax_force_retaddr
13402 ret
13403 ENDPROC(_key_expansion_256b)
13404
13405@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
13406 #ifndef __x86_64__
13407 popl KEYP
13408 #endif
13409+ pax_force_retaddr
13410 ret
13411 ENDPROC(aesni_set_key)
13412
13413@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
13414 popl KLEN
13415 popl KEYP
13416 #endif
13417+ pax_force_retaddr
13418 ret
13419 ENDPROC(aesni_enc)
13420
13421@@ -1974,6 +1983,7 @@ _aesni_enc1:
13422 AESENC KEY STATE
13423 movaps 0x70(TKEYP), KEY
13424 AESENCLAST KEY STATE
13425+ pax_force_retaddr
13426 ret
13427 ENDPROC(_aesni_enc1)
13428
13429@@ -2083,6 +2093,7 @@ _aesni_enc4:
13430 AESENCLAST KEY STATE2
13431 AESENCLAST KEY STATE3
13432 AESENCLAST KEY STATE4
13433+ pax_force_retaddr
13434 ret
13435 ENDPROC(_aesni_enc4)
13436
13437@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
13438 popl KLEN
13439 popl KEYP
13440 #endif
13441+ pax_force_retaddr
13442 ret
13443 ENDPROC(aesni_dec)
13444
13445@@ -2164,6 +2176,7 @@ _aesni_dec1:
13446 AESDEC KEY STATE
13447 movaps 0x70(TKEYP), KEY
13448 AESDECLAST KEY STATE
13449+ pax_force_retaddr
13450 ret
13451 ENDPROC(_aesni_dec1)
13452
13453@@ -2273,6 +2286,7 @@ _aesni_dec4:
13454 AESDECLAST KEY STATE2
13455 AESDECLAST KEY STATE3
13456 AESDECLAST KEY STATE4
13457+ pax_force_retaddr
13458 ret
13459 ENDPROC(_aesni_dec4)
13460
13461@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
13462 popl KEYP
13463 popl LEN
13464 #endif
13465+ pax_force_retaddr
13466 ret
13467 ENDPROC(aesni_ecb_enc)
13468
13469@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
13470 popl KEYP
13471 popl LEN
13472 #endif
13473+ pax_force_retaddr
13474 ret
13475 ENDPROC(aesni_ecb_dec)
13476
13477@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
13478 popl LEN
13479 popl IVP
13480 #endif
13481+ pax_force_retaddr
13482 ret
13483 ENDPROC(aesni_cbc_enc)
13484
13485@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
13486 popl LEN
13487 popl IVP
13488 #endif
13489+ pax_force_retaddr
13490 ret
13491 ENDPROC(aesni_cbc_dec)
13492
13493@@ -2550,6 +2568,7 @@ _aesni_inc_init:
13494 mov $1, TCTR_LOW
13495 MOVQ_R64_XMM TCTR_LOW INC
13496 MOVQ_R64_XMM CTR TCTR_LOW
13497+ pax_force_retaddr
13498 ret
13499 ENDPROC(_aesni_inc_init)
13500
13501@@ -2579,6 +2598,7 @@ _aesni_inc:
13502 .Linc_low:
13503 movaps CTR, IV
13504 PSHUFB_XMM BSWAP_MASK IV
13505+ pax_force_retaddr
13506 ret
13507 ENDPROC(_aesni_inc)
13508
13509@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
13510 .Lctr_enc_ret:
13511 movups IV, (IVP)
13512 .Lctr_enc_just_ret:
13513+ pax_force_retaddr
13514 ret
13515 ENDPROC(aesni_ctr_enc)
13516
13517@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
13518 pxor INC, STATE4
13519 movdqu STATE4, 0x70(OUTP)
13520
13521+ pax_force_retaddr
13522 ret
13523 ENDPROC(aesni_xts_crypt8)
13524
13525diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13526index 246c670..466e2d6 100644
13527--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
13528+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
13529@@ -21,6 +21,7 @@
13530 */
13531
13532 #include <linux/linkage.h>
13533+#include <asm/alternative-asm.h>
13534
13535 .file "blowfish-x86_64-asm.S"
13536 .text
13537@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
13538 jnz .L__enc_xor;
13539
13540 write_block();
13541+ pax_force_retaddr
13542 ret;
13543 .L__enc_xor:
13544 xor_block();
13545+ pax_force_retaddr
13546 ret;
13547 ENDPROC(__blowfish_enc_blk)
13548
13549@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
13550
13551 movq %r11, %rbp;
13552
13553+ pax_force_retaddr
13554 ret;
13555 ENDPROC(blowfish_dec_blk)
13556
13557@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
13558
13559 popq %rbx;
13560 popq %rbp;
13561+ pax_force_retaddr
13562 ret;
13563
13564 .L__enc_xor4:
13565@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
13566
13567 popq %rbx;
13568 popq %rbp;
13569+ pax_force_retaddr
13570 ret;
13571 ENDPROC(__blowfish_enc_blk_4way)
13572
13573@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
13574 popq %rbx;
13575 popq %rbp;
13576
13577+ pax_force_retaddr
13578 ret;
13579 ENDPROC(blowfish_dec_blk_4way)
13580diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13581index ce71f92..1dce7ec 100644
13582--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13583+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
13584@@ -16,6 +16,7 @@
13585 */
13586
13587 #include <linux/linkage.h>
13588+#include <asm/alternative-asm.h>
13589
13590 #define CAMELLIA_TABLE_BYTE_LEN 272
13591
13592@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13593 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
13594 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
13595 %rcx, (%r9));
13596+ pax_force_retaddr
13597 ret;
13598 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13599
13600@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13601 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
13602 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
13603 %rax, (%r9));
13604+ pax_force_retaddr
13605 ret;
13606 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13607
13608@@ -780,6 +783,7 @@ __camellia_enc_blk16:
13609 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13610 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
13611
13612+ pax_force_retaddr
13613 ret;
13614
13615 .align 8
13616@@ -865,6 +869,7 @@ __camellia_dec_blk16:
13617 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
13618 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
13619
13620+ pax_force_retaddr
13621 ret;
13622
13623 .align 8
13624@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
13625 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13626 %xmm8, %rsi);
13627
13628+ pax_force_retaddr
13629 ret;
13630 ENDPROC(camellia_ecb_enc_16way)
13631
13632@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
13633 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13634 %xmm8, %rsi);
13635
13636+ pax_force_retaddr
13637 ret;
13638 ENDPROC(camellia_ecb_dec_16way)
13639
13640@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
13641 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13642 %xmm8, %rsi);
13643
13644+ pax_force_retaddr
13645 ret;
13646 ENDPROC(camellia_cbc_dec_16way)
13647
13648@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
13649 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13650 %xmm8, %rsi);
13651
13652+ pax_force_retaddr
13653 ret;
13654 ENDPROC(camellia_ctr_16way)
13655
13656@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
13657 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
13658 %xmm8, %rsi);
13659
13660+ pax_force_retaddr
13661 ret;
13662 ENDPROC(camellia_xts_crypt_16way)
13663
13664diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13665index 0e0b886..5a3123c 100644
13666--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13667+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
13668@@ -11,6 +11,7 @@
13669 */
13670
13671 #include <linux/linkage.h>
13672+#include <asm/alternative-asm.h>
13673
13674 #define CAMELLIA_TABLE_BYTE_LEN 272
13675
13676@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
13677 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
13678 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
13679 %rcx, (%r9));
13680+ pax_force_retaddr
13681 ret;
13682 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
13683
13684@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
13685 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
13686 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
13687 %rax, (%r9));
13688+ pax_force_retaddr
13689 ret;
13690 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
13691
13692@@ -820,6 +823,7 @@ __camellia_enc_blk32:
13693 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13694 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
13695
13696+ pax_force_retaddr
13697 ret;
13698
13699 .align 8
13700@@ -905,6 +909,7 @@ __camellia_dec_blk32:
13701 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
13702 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
13703
13704+ pax_force_retaddr
13705 ret;
13706
13707 .align 8
13708@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
13709
13710 vzeroupper;
13711
13712+ pax_force_retaddr
13713 ret;
13714 ENDPROC(camellia_ecb_enc_32way)
13715
13716@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
13717
13718 vzeroupper;
13719
13720+ pax_force_retaddr
13721 ret;
13722 ENDPROC(camellia_ecb_dec_32way)
13723
13724@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
13725
13726 vzeroupper;
13727
13728+ pax_force_retaddr
13729 ret;
13730 ENDPROC(camellia_cbc_dec_32way)
13731
13732@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
13733
13734 vzeroupper;
13735
13736+ pax_force_retaddr
13737 ret;
13738 ENDPROC(camellia_ctr_32way)
13739
13740@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
13741
13742 vzeroupper;
13743
13744+ pax_force_retaddr
13745 ret;
13746 ENDPROC(camellia_xts_crypt_32way)
13747
13748diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
13749index 310319c..db3d7b5 100644
13750--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
13751+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
13752@@ -21,6 +21,7 @@
13753 */
13754
13755 #include <linux/linkage.h>
13756+#include <asm/alternative-asm.h>
13757
13758 .file "camellia-x86_64-asm_64.S"
13759 .text
13760@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
13761 enc_outunpack(mov, RT1);
13762
13763 movq RRBP, %rbp;
13764+ pax_force_retaddr
13765 ret;
13766
13767 .L__enc_xor:
13768 enc_outunpack(xor, RT1);
13769
13770 movq RRBP, %rbp;
13771+ pax_force_retaddr
13772 ret;
13773 ENDPROC(__camellia_enc_blk)
13774
13775@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
13776 dec_outunpack();
13777
13778 movq RRBP, %rbp;
13779+ pax_force_retaddr
13780 ret;
13781 ENDPROC(camellia_dec_blk)
13782
13783@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
13784
13785 movq RRBP, %rbp;
13786 popq %rbx;
13787+ pax_force_retaddr
13788 ret;
13789
13790 .L__enc2_xor:
13791@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
13792
13793 movq RRBP, %rbp;
13794 popq %rbx;
13795+ pax_force_retaddr
13796 ret;
13797 ENDPROC(__camellia_enc_blk_2way)
13798
13799@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
13800
13801 movq RRBP, %rbp;
13802 movq RXOR, %rbx;
13803+ pax_force_retaddr
13804 ret;
13805 ENDPROC(camellia_dec_blk_2way)
13806diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13807index c35fd5d..2d8c7db 100644
13808--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13809+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
13810@@ -24,6 +24,7 @@
13811 */
13812
13813 #include <linux/linkage.h>
13814+#include <asm/alternative-asm.h>
13815
13816 .file "cast5-avx-x86_64-asm_64.S"
13817
13818@@ -281,6 +282,7 @@ __cast5_enc_blk16:
13819 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13820 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13821
13822+ pax_force_retaddr
13823 ret;
13824 ENDPROC(__cast5_enc_blk16)
13825
13826@@ -352,6 +354,7 @@ __cast5_dec_blk16:
13827 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
13828 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
13829
13830+ pax_force_retaddr
13831 ret;
13832
13833 .L__skip_dec:
13834@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
13835 vmovdqu RR4, (6*4*4)(%r11);
13836 vmovdqu RL4, (7*4*4)(%r11);
13837
13838+ pax_force_retaddr
13839 ret;
13840 ENDPROC(cast5_ecb_enc_16way)
13841
13842@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
13843 vmovdqu RR4, (6*4*4)(%r11);
13844 vmovdqu RL4, (7*4*4)(%r11);
13845
13846+ pax_force_retaddr
13847 ret;
13848 ENDPROC(cast5_ecb_dec_16way)
13849
13850@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
13851 * %rdx: src
13852 */
13853
13854- pushq %r12;
13855+ pushq %r14;
13856
13857 movq %rsi, %r11;
13858- movq %rdx, %r12;
13859+ movq %rdx, %r14;
13860
13861 vmovdqu (0*16)(%rdx), RL1;
13862 vmovdqu (1*16)(%rdx), RR1;
13863@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
13864 call __cast5_dec_blk16;
13865
13866 /* xor with src */
13867- vmovq (%r12), RX;
13868+ vmovq (%r14), RX;
13869 vpshufd $0x4f, RX, RX;
13870 vpxor RX, RR1, RR1;
13871- vpxor 0*16+8(%r12), RL1, RL1;
13872- vpxor 1*16+8(%r12), RR2, RR2;
13873- vpxor 2*16+8(%r12), RL2, RL2;
13874- vpxor 3*16+8(%r12), RR3, RR3;
13875- vpxor 4*16+8(%r12), RL3, RL3;
13876- vpxor 5*16+8(%r12), RR4, RR4;
13877- vpxor 6*16+8(%r12), RL4, RL4;
13878+ vpxor 0*16+8(%r14), RL1, RL1;
13879+ vpxor 1*16+8(%r14), RR2, RR2;
13880+ vpxor 2*16+8(%r14), RL2, RL2;
13881+ vpxor 3*16+8(%r14), RR3, RR3;
13882+ vpxor 4*16+8(%r14), RL3, RL3;
13883+ vpxor 5*16+8(%r14), RR4, RR4;
13884+ vpxor 6*16+8(%r14), RL4, RL4;
13885
13886 vmovdqu RR1, (0*16)(%r11);
13887 vmovdqu RL1, (1*16)(%r11);
13888@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
13889 vmovdqu RR4, (6*16)(%r11);
13890 vmovdqu RL4, (7*16)(%r11);
13891
13892- popq %r12;
13893+ popq %r14;
13894
13895+ pax_force_retaddr
13896 ret;
13897 ENDPROC(cast5_cbc_dec_16way)
13898
13899@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
13900 * %rcx: iv (big endian, 64bit)
13901 */
13902
13903- pushq %r12;
13904+ pushq %r14;
13905
13906 movq %rsi, %r11;
13907- movq %rdx, %r12;
13908+ movq %rdx, %r14;
13909
13910 vpcmpeqd RTMP, RTMP, RTMP;
13911 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
13912@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
13913 call __cast5_enc_blk16;
13914
13915 /* dst = src ^ iv */
13916- vpxor (0*16)(%r12), RR1, RR1;
13917- vpxor (1*16)(%r12), RL1, RL1;
13918- vpxor (2*16)(%r12), RR2, RR2;
13919- vpxor (3*16)(%r12), RL2, RL2;
13920- vpxor (4*16)(%r12), RR3, RR3;
13921- vpxor (5*16)(%r12), RL3, RL3;
13922- vpxor (6*16)(%r12), RR4, RR4;
13923- vpxor (7*16)(%r12), RL4, RL4;
13924+ vpxor (0*16)(%r14), RR1, RR1;
13925+ vpxor (1*16)(%r14), RL1, RL1;
13926+ vpxor (2*16)(%r14), RR2, RR2;
13927+ vpxor (3*16)(%r14), RL2, RL2;
13928+ vpxor (4*16)(%r14), RR3, RR3;
13929+ vpxor (5*16)(%r14), RL3, RL3;
13930+ vpxor (6*16)(%r14), RR4, RR4;
13931+ vpxor (7*16)(%r14), RL4, RL4;
13932 vmovdqu RR1, (0*16)(%r11);
13933 vmovdqu RL1, (1*16)(%r11);
13934 vmovdqu RR2, (2*16)(%r11);
13935@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13936 vmovdqu RR4, (6*16)(%r11);
13937 vmovdqu RL4, (7*16)(%r11);
13938
13939- popq %r12;
13940+ popq %r14;
13941
13942+ pax_force_retaddr
13943 ret;
13944 ENDPROC(cast5_ctr_16way)
13945diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13946index e3531f8..e123f35 100644
13947--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13948+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13949@@ -24,6 +24,7 @@
13950 */
13951
13952 #include <linux/linkage.h>
13953+#include <asm/alternative-asm.h>
13954 #include "glue_helper-asm-avx.S"
13955
13956 .file "cast6-avx-x86_64-asm_64.S"
13957@@ -295,6 +296,7 @@ __cast6_enc_blk8:
13958 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13959 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13960
13961+ pax_force_retaddr
13962 ret;
13963 ENDPROC(__cast6_enc_blk8)
13964
13965@@ -340,6 +342,7 @@ __cast6_dec_blk8:
13966 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13967 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13968
13969+ pax_force_retaddr
13970 ret;
13971 ENDPROC(__cast6_dec_blk8)
13972
13973@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13974
13975 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13976
13977+ pax_force_retaddr
13978 ret;
13979 ENDPROC(cast6_ecb_enc_8way)
13980
13981@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13982
13983 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13984
13985+ pax_force_retaddr
13986 ret;
13987 ENDPROC(cast6_ecb_dec_8way)
13988
13989@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13990 * %rdx: src
13991 */
13992
13993- pushq %r12;
13994+ pushq %r14;
13995
13996 movq %rsi, %r11;
13997- movq %rdx, %r12;
13998+ movq %rdx, %r14;
13999
14000 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14001
14002 call __cast6_dec_blk8;
14003
14004- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14005+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14006
14007- popq %r12;
14008+ popq %r14;
14009
14010+ pax_force_retaddr
14011 ret;
14012 ENDPROC(cast6_cbc_dec_8way)
14013
14014@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14015 * %rcx: iv (little endian, 128bit)
14016 */
14017
14018- pushq %r12;
14019+ pushq %r14;
14020
14021 movq %rsi, %r11;
14022- movq %rdx, %r12;
14023+ movq %rdx, %r14;
14024
14025 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14026 RD2, RX, RKR, RKM);
14027
14028 call __cast6_enc_blk8;
14029
14030- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14031+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14032
14033- popq %r12;
14034+ popq %r14;
14035
14036+ pax_force_retaddr
14037 ret;
14038 ENDPROC(cast6_ctr_8way)
14039
14040@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14041 /* dst <= regs xor IVs(in dst) */
14042 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14043
14044+ pax_force_retaddr
14045 ret;
14046 ENDPROC(cast6_xts_enc_8way)
14047
14048@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14049 /* dst <= regs xor IVs(in dst) */
14050 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14051
14052+ pax_force_retaddr
14053 ret;
14054 ENDPROC(cast6_xts_dec_8way)
14055diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14056index 26d49eb..c0a8c84 100644
14057--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14058+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14059@@ -45,6 +45,7 @@
14060
14061 #include <asm/inst.h>
14062 #include <linux/linkage.h>
14063+#include <asm/alternative-asm.h>
14064
14065 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14066
14067@@ -309,6 +310,7 @@ do_return:
14068 popq %rsi
14069 popq %rdi
14070 popq %rbx
14071+ pax_force_retaddr
14072 ret
14073
14074 ################################################################
14075diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14076index 5d1e007..098cb4f 100644
14077--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14078+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14079@@ -18,6 +18,7 @@
14080
14081 #include <linux/linkage.h>
14082 #include <asm/inst.h>
14083+#include <asm/alternative-asm.h>
14084
14085 .data
14086
14087@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14088 psrlq $1, T2
14089 pxor T2, T1
14090 pxor T1, DATA
14091+ pax_force_retaddr
14092 ret
14093 ENDPROC(__clmul_gf128mul_ble)
14094
14095@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14096 call __clmul_gf128mul_ble
14097 PSHUFB_XMM BSWAP DATA
14098 movups DATA, (%rdi)
14099+ pax_force_retaddr
14100 ret
14101 ENDPROC(clmul_ghash_mul)
14102
14103@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14104 PSHUFB_XMM BSWAP DATA
14105 movups DATA, (%rdi)
14106 .Lupdate_just_ret:
14107+ pax_force_retaddr
14108 ret
14109 ENDPROC(clmul_ghash_update)
14110diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14111index 9279e0b..c4b3d2c 100644
14112--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14113+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14114@@ -1,4 +1,5 @@
14115 #include <linux/linkage.h>
14116+#include <asm/alternative-asm.h>
14117
14118 # enter salsa20_encrypt_bytes
14119 ENTRY(salsa20_encrypt_bytes)
14120@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14121 add %r11,%rsp
14122 mov %rdi,%rax
14123 mov %rsi,%rdx
14124+ pax_force_retaddr
14125 ret
14126 # bytesatleast65:
14127 ._bytesatleast65:
14128@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14129 add %r11,%rsp
14130 mov %rdi,%rax
14131 mov %rsi,%rdx
14132+ pax_force_retaddr
14133 ret
14134 ENDPROC(salsa20_keysetup)
14135
14136@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14137 add %r11,%rsp
14138 mov %rdi,%rax
14139 mov %rsi,%rdx
14140+ pax_force_retaddr
14141 ret
14142 ENDPROC(salsa20_ivsetup)
14143diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14144index 2f202f4..d9164d6 100644
14145--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14146+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14147@@ -24,6 +24,7 @@
14148 */
14149
14150 #include <linux/linkage.h>
14151+#include <asm/alternative-asm.h>
14152 #include "glue_helper-asm-avx.S"
14153
14154 .file "serpent-avx-x86_64-asm_64.S"
14155@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14156 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14157 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14158
14159+ pax_force_retaddr
14160 ret;
14161 ENDPROC(__serpent_enc_blk8_avx)
14162
14163@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14164 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14165 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14166
14167+ pax_force_retaddr
14168 ret;
14169 ENDPROC(__serpent_dec_blk8_avx)
14170
14171@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14172
14173 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14174
14175+ pax_force_retaddr
14176 ret;
14177 ENDPROC(serpent_ecb_enc_8way_avx)
14178
14179@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14180
14181 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14182
14183+ pax_force_retaddr
14184 ret;
14185 ENDPROC(serpent_ecb_dec_8way_avx)
14186
14187@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14188
14189 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14190
14191+ pax_force_retaddr
14192 ret;
14193 ENDPROC(serpent_cbc_dec_8way_avx)
14194
14195@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14196
14197 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14198
14199+ pax_force_retaddr
14200 ret;
14201 ENDPROC(serpent_ctr_8way_avx)
14202
14203@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14204 /* dst <= regs xor IVs(in dst) */
14205 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14206
14207+ pax_force_retaddr
14208 ret;
14209 ENDPROC(serpent_xts_enc_8way_avx)
14210
14211@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14212 /* dst <= regs xor IVs(in dst) */
14213 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14214
14215+ pax_force_retaddr
14216 ret;
14217 ENDPROC(serpent_xts_dec_8way_avx)
14218diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14219index b222085..abd483c 100644
14220--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14221+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14222@@ -15,6 +15,7 @@
14223 */
14224
14225 #include <linux/linkage.h>
14226+#include <asm/alternative-asm.h>
14227 #include "glue_helper-asm-avx2.S"
14228
14229 .file "serpent-avx2-asm_64.S"
14230@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14231 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14232 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14233
14234+ pax_force_retaddr
14235 ret;
14236 ENDPROC(__serpent_enc_blk16)
14237
14238@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14239 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14240 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14241
14242+ pax_force_retaddr
14243 ret;
14244 ENDPROC(__serpent_dec_blk16)
14245
14246@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14247
14248 vzeroupper;
14249
14250+ pax_force_retaddr
14251 ret;
14252 ENDPROC(serpent_ecb_enc_16way)
14253
14254@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14255
14256 vzeroupper;
14257
14258+ pax_force_retaddr
14259 ret;
14260 ENDPROC(serpent_ecb_dec_16way)
14261
14262@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14263
14264 vzeroupper;
14265
14266+ pax_force_retaddr
14267 ret;
14268 ENDPROC(serpent_cbc_dec_16way)
14269
14270@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14271
14272 vzeroupper;
14273
14274+ pax_force_retaddr
14275 ret;
14276 ENDPROC(serpent_ctr_16way)
14277
14278@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14279
14280 vzeroupper;
14281
14282+ pax_force_retaddr
14283 ret;
14284 ENDPROC(serpent_xts_enc_16way)
14285
14286@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14287
14288 vzeroupper;
14289
14290+ pax_force_retaddr
14291 ret;
14292 ENDPROC(serpent_xts_dec_16way)
14293diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14294index acc066c..1559cc4 100644
14295--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14296+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14297@@ -25,6 +25,7 @@
14298 */
14299
14300 #include <linux/linkage.h>
14301+#include <asm/alternative-asm.h>
14302
14303 .file "serpent-sse2-x86_64-asm_64.S"
14304 .text
14305@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14306 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14307 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14308
14309+ pax_force_retaddr
14310 ret;
14311
14312 .L__enc_xor8:
14313 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14314 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14315
14316+ pax_force_retaddr
14317 ret;
14318 ENDPROC(__serpent_enc_blk_8way)
14319
14320@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14321 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14322 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14323
14324+ pax_force_retaddr
14325 ret;
14326 ENDPROC(serpent_dec_blk_8way)
14327diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14328index a410950..9dfe7ad 100644
14329--- a/arch/x86/crypto/sha1_ssse3_asm.S
14330+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14331@@ -29,6 +29,7 @@
14332 */
14333
14334 #include <linux/linkage.h>
14335+#include <asm/alternative-asm.h>
14336
14337 #define CTX %rdi // arg1
14338 #define BUF %rsi // arg2
14339@@ -75,9 +76,9 @@
14340
14341 push %rbx
14342 push %rbp
14343- push %r12
14344+ push %r14
14345
14346- mov %rsp, %r12
14347+ mov %rsp, %r14
14348 sub $64, %rsp # allocate workspace
14349 and $~15, %rsp # align stack
14350
14351@@ -99,11 +100,12 @@
14352 xor %rax, %rax
14353 rep stosq
14354
14355- mov %r12, %rsp # deallocate workspace
14356+ mov %r14, %rsp # deallocate workspace
14357
14358- pop %r12
14359+ pop %r14
14360 pop %rbp
14361 pop %rbx
14362+ pax_force_retaddr
14363 ret
14364
14365 ENDPROC(\name)
14366diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14367index 642f156..51a513c 100644
14368--- a/arch/x86/crypto/sha256-avx-asm.S
14369+++ b/arch/x86/crypto/sha256-avx-asm.S
14370@@ -49,6 +49,7 @@
14371
14372 #ifdef CONFIG_AS_AVX
14373 #include <linux/linkage.h>
14374+#include <asm/alternative-asm.h>
14375
14376 ## assume buffers not aligned
14377 #define VMOVDQ vmovdqu
14378@@ -460,6 +461,7 @@ done_hash:
14379 popq %r13
14380 popq %rbp
14381 popq %rbx
14382+ pax_force_retaddr
14383 ret
14384 ENDPROC(sha256_transform_avx)
14385
14386diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
14387index 9e86944..3795e6a 100644
14388--- a/arch/x86/crypto/sha256-avx2-asm.S
14389+++ b/arch/x86/crypto/sha256-avx2-asm.S
14390@@ -50,6 +50,7 @@
14391
14392 #ifdef CONFIG_AS_AVX2
14393 #include <linux/linkage.h>
14394+#include <asm/alternative-asm.h>
14395
14396 ## assume buffers not aligned
14397 #define VMOVDQ vmovdqu
14398@@ -720,6 +721,7 @@ done_hash:
14399 popq %r12
14400 popq %rbp
14401 popq %rbx
14402+ pax_force_retaddr
14403 ret
14404 ENDPROC(sha256_transform_rorx)
14405
14406diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
14407index f833b74..8c62a9e 100644
14408--- a/arch/x86/crypto/sha256-ssse3-asm.S
14409+++ b/arch/x86/crypto/sha256-ssse3-asm.S
14410@@ -47,6 +47,7 @@
14411 ########################################################################
14412
14413 #include <linux/linkage.h>
14414+#include <asm/alternative-asm.h>
14415
14416 ## assume buffers not aligned
14417 #define MOVDQ movdqu
14418@@ -471,6 +472,7 @@ done_hash:
14419 popq %rbp
14420 popq %rbx
14421
14422+ pax_force_retaddr
14423 ret
14424 ENDPROC(sha256_transform_ssse3)
14425
14426diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
14427index 974dde9..a823ff9 100644
14428--- a/arch/x86/crypto/sha512-avx-asm.S
14429+++ b/arch/x86/crypto/sha512-avx-asm.S
14430@@ -49,6 +49,7 @@
14431
14432 #ifdef CONFIG_AS_AVX
14433 #include <linux/linkage.h>
14434+#include <asm/alternative-asm.h>
14435
14436 .text
14437
14438@@ -364,6 +365,7 @@ updateblock:
14439 mov frame_RSPSAVE(%rsp), %rsp
14440
14441 nowork:
14442+ pax_force_retaddr
14443 ret
14444 ENDPROC(sha512_transform_avx)
14445
14446diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
14447index 568b961..ed20c37 100644
14448--- a/arch/x86/crypto/sha512-avx2-asm.S
14449+++ b/arch/x86/crypto/sha512-avx2-asm.S
14450@@ -51,6 +51,7 @@
14451
14452 #ifdef CONFIG_AS_AVX2
14453 #include <linux/linkage.h>
14454+#include <asm/alternative-asm.h>
14455
14456 .text
14457
14458@@ -678,6 +679,7 @@ done_hash:
14459
14460 # Restore Stack Pointer
14461 mov frame_RSPSAVE(%rsp), %rsp
14462+ pax_force_retaddr
14463 ret
14464 ENDPROC(sha512_transform_rorx)
14465
14466diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
14467index fb56855..6edd768 100644
14468--- a/arch/x86/crypto/sha512-ssse3-asm.S
14469+++ b/arch/x86/crypto/sha512-ssse3-asm.S
14470@@ -48,6 +48,7 @@
14471 ########################################################################
14472
14473 #include <linux/linkage.h>
14474+#include <asm/alternative-asm.h>
14475
14476 .text
14477
14478@@ -363,6 +364,7 @@ updateblock:
14479 mov frame_RSPSAVE(%rsp), %rsp
14480
14481 nowork:
14482+ pax_force_retaddr
14483 ret
14484 ENDPROC(sha512_transform_ssse3)
14485
14486diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14487index 0505813..b067311 100644
14488--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14489+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
14490@@ -24,6 +24,7 @@
14491 */
14492
14493 #include <linux/linkage.h>
14494+#include <asm/alternative-asm.h>
14495 #include "glue_helper-asm-avx.S"
14496
14497 .file "twofish-avx-x86_64-asm_64.S"
14498@@ -284,6 +285,7 @@ __twofish_enc_blk8:
14499 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
14500 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
14501
14502+ pax_force_retaddr
14503 ret;
14504 ENDPROC(__twofish_enc_blk8)
14505
14506@@ -324,6 +326,7 @@ __twofish_dec_blk8:
14507 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
14508 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
14509
14510+ pax_force_retaddr
14511 ret;
14512 ENDPROC(__twofish_dec_blk8)
14513
14514@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
14515
14516 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14517
14518+ pax_force_retaddr
14519 ret;
14520 ENDPROC(twofish_ecb_enc_8way)
14521
14522@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
14523
14524 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14525
14526+ pax_force_retaddr
14527 ret;
14528 ENDPROC(twofish_ecb_dec_8way)
14529
14530@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
14531 * %rdx: src
14532 */
14533
14534- pushq %r12;
14535+ pushq %r14;
14536
14537 movq %rsi, %r11;
14538- movq %rdx, %r12;
14539+ movq %rdx, %r14;
14540
14541 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14542
14543 call __twofish_dec_blk8;
14544
14545- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14546+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14547
14548- popq %r12;
14549+ popq %r14;
14550
14551+ pax_force_retaddr
14552 ret;
14553 ENDPROC(twofish_cbc_dec_8way)
14554
14555@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
14556 * %rcx: iv (little endian, 128bit)
14557 */
14558
14559- pushq %r12;
14560+ pushq %r14;
14561
14562 movq %rsi, %r11;
14563- movq %rdx, %r12;
14564+ movq %rdx, %r14;
14565
14566 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14567 RD2, RX0, RX1, RY0);
14568
14569 call __twofish_enc_blk8;
14570
14571- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14572+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14573
14574- popq %r12;
14575+ popq %r14;
14576
14577+ pax_force_retaddr
14578 ret;
14579 ENDPROC(twofish_ctr_8way)
14580
14581@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
14582 /* dst <= regs xor IVs(in dst) */
14583 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
14584
14585+ pax_force_retaddr
14586 ret;
14587 ENDPROC(twofish_xts_enc_8way)
14588
14589@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
14590 /* dst <= regs xor IVs(in dst) */
14591 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14592
14593+ pax_force_retaddr
14594 ret;
14595 ENDPROC(twofish_xts_dec_8way)
14596diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14597index 1c3b7ce..02f578d 100644
14598--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14599+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
14600@@ -21,6 +21,7 @@
14601 */
14602
14603 #include <linux/linkage.h>
14604+#include <asm/alternative-asm.h>
14605
14606 .file "twofish-x86_64-asm-3way.S"
14607 .text
14608@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
14609 popq %r13;
14610 popq %r14;
14611 popq %r15;
14612+ pax_force_retaddr
14613 ret;
14614
14615 .L__enc_xor3:
14616@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
14617 popq %r13;
14618 popq %r14;
14619 popq %r15;
14620+ pax_force_retaddr
14621 ret;
14622 ENDPROC(__twofish_enc_blk_3way)
14623
14624@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
14625 popq %r13;
14626 popq %r14;
14627 popq %r15;
14628+ pax_force_retaddr
14629 ret;
14630 ENDPROC(twofish_dec_blk_3way)
14631diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
14632index a039d21..524b8b2 100644
14633--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
14634+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
14635@@ -22,6 +22,7 @@
14636
14637 #include <linux/linkage.h>
14638 #include <asm/asm-offsets.h>
14639+#include <asm/alternative-asm.h>
14640
14641 #define a_offset 0
14642 #define b_offset 4
14643@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
14644
14645 popq R1
14646 movq $1,%rax
14647+ pax_force_retaddr
14648 ret
14649 ENDPROC(twofish_enc_blk)
14650
14651@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
14652
14653 popq R1
14654 movq $1,%rax
14655+ pax_force_retaddr
14656 ret
14657 ENDPROC(twofish_dec_blk)
14658diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
14659index ae6aad1..719d6d9 100644
14660--- a/arch/x86/ia32/ia32_aout.c
14661+++ b/arch/x86/ia32/ia32_aout.c
14662@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
14663 unsigned long dump_start, dump_size;
14664 struct user32 dump;
14665
14666+ memset(&dump, 0, sizeof(dump));
14667+
14668 fs = get_fs();
14669 set_fs(KERNEL_DS);
14670 has_dumped = 1;
14671diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
14672index f9e181a..300544c 100644
14673--- a/arch/x86/ia32/ia32_signal.c
14674+++ b/arch/x86/ia32/ia32_signal.c
14675@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
14676 if (__get_user(set.sig[0], &frame->sc.oldmask)
14677 || (_COMPAT_NSIG_WORDS > 1
14678 && __copy_from_user((((char *) &set.sig) + 4),
14679- &frame->extramask,
14680+ frame->extramask,
14681 sizeof(frame->extramask))))
14682 goto badframe;
14683
14684@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
14685 sp -= frame_size;
14686 /* Align the stack pointer according to the i386 ABI,
14687 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
14688- sp = ((sp + 4) & -16ul) - 4;
14689+ sp = ((sp - 12) & -16ul) - 4;
14690 return (void __user *) sp;
14691 }
14692
14693@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14694 } else {
14695 /* Return stub is in 32bit vsyscall page */
14696 if (current->mm->context.vdso)
14697- restorer = current->mm->context.vdso +
14698- selected_vdso32->sym___kernel_sigreturn;
14699+ restorer = (void __force_user *)(current->mm->context.vdso +
14700+ selected_vdso32->sym___kernel_sigreturn);
14701 else
14702- restorer = &frame->retcode;
14703+ restorer = frame->retcode;
14704 }
14705
14706 put_user_try {
14707@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
14708 * These are actually not used anymore, but left because some
14709 * gdb versions depend on them as a marker.
14710 */
14711- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14712+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14713 } put_user_catch(err);
14714
14715 if (err)
14716@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14717 0xb8,
14718 __NR_ia32_rt_sigreturn,
14719 0x80cd,
14720- 0,
14721+ 0
14722 };
14723
14724 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
14725@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
14726
14727 if (ksig->ka.sa.sa_flags & SA_RESTORER)
14728 restorer = ksig->ka.sa.sa_restorer;
14729+ else if (current->mm->context.vdso)
14730+ /* Return stub is in 32bit vsyscall page */
14731+ restorer = (void __force_user *)(current->mm->context.vdso +
14732+ selected_vdso32->sym___kernel_rt_sigreturn);
14733 else
14734- restorer = current->mm->context.vdso +
14735- selected_vdso32->sym___kernel_rt_sigreturn;
14736+ restorer = frame->retcode;
14737 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
14738
14739 /*
14740 * Not actually used anymore, but left because some gdb
14741 * versions need it.
14742 */
14743- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
14744+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
14745 } put_user_catch(err);
14746
14747 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
14748diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
14749index 82e8a1d..4e998d5 100644
14750--- a/arch/x86/ia32/ia32entry.S
14751+++ b/arch/x86/ia32/ia32entry.S
14752@@ -15,8 +15,10 @@
14753 #include <asm/irqflags.h>
14754 #include <asm/asm.h>
14755 #include <asm/smap.h>
14756+#include <asm/pgtable.h>
14757 #include <linux/linkage.h>
14758 #include <linux/err.h>
14759+#include <asm/alternative-asm.h>
14760
14761 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
14762 #include <linux/elf-em.h>
14763@@ -62,12 +64,12 @@
14764 */
14765 .macro LOAD_ARGS32 offset, _r9=0
14766 .if \_r9
14767- movl \offset+16(%rsp),%r9d
14768+ movl \offset+R9(%rsp),%r9d
14769 .endif
14770- movl \offset+40(%rsp),%ecx
14771- movl \offset+48(%rsp),%edx
14772- movl \offset+56(%rsp),%esi
14773- movl \offset+64(%rsp),%edi
14774+ movl \offset+RCX(%rsp),%ecx
14775+ movl \offset+RDX(%rsp),%edx
14776+ movl \offset+RSI(%rsp),%esi
14777+ movl \offset+RDI(%rsp),%edi
14778 movl %eax,%eax /* zero extension */
14779 .endm
14780
14781@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
14782 ENDPROC(native_irq_enable_sysexit)
14783 #endif
14784
14785+ .macro pax_enter_kernel_user
14786+ pax_set_fptr_mask
14787+#ifdef CONFIG_PAX_MEMORY_UDEREF
14788+ call pax_enter_kernel_user
14789+#endif
14790+ .endm
14791+
14792+ .macro pax_exit_kernel_user
14793+#ifdef CONFIG_PAX_MEMORY_UDEREF
14794+ call pax_exit_kernel_user
14795+#endif
14796+#ifdef CONFIG_PAX_RANDKSTACK
14797+ pushq %rax
14798+ pushq %r11
14799+ call pax_randomize_kstack
14800+ popq %r11
14801+ popq %rax
14802+#endif
14803+ .endm
14804+
14805+ .macro pax_erase_kstack
14806+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14807+ call pax_erase_kstack
14808+#endif
14809+ .endm
14810+
14811 /*
14812 * 32bit SYSENTER instruction entry.
14813 *
14814@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
14815 CFI_REGISTER rsp,rbp
14816 SWAPGS_UNSAFE_STACK
14817 movq PER_CPU_VAR(kernel_stack), %rsp
14818- addq $(KERNEL_STACK_OFFSET),%rsp
14819- /*
14820- * No need to follow this irqs on/off section: the syscall
14821- * disabled irqs, here we enable it straight after entry:
14822- */
14823- ENABLE_INTERRUPTS(CLBR_NONE)
14824 movl %ebp,%ebp /* zero extension */
14825 pushq_cfi $__USER32_DS
14826 /*CFI_REL_OFFSET ss,0*/
14827@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
14828 CFI_REL_OFFSET rsp,0
14829 pushfq_cfi
14830 /*CFI_REL_OFFSET rflags,0*/
14831- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
14832- CFI_REGISTER rip,r10
14833+ orl $X86_EFLAGS_IF,(%rsp)
14834+ GET_THREAD_INFO(%r11)
14835+ movl TI_sysenter_return(%r11), %r11d
14836+ CFI_REGISTER rip,r11
14837 pushq_cfi $__USER32_CS
14838 /*CFI_REL_OFFSET cs,0*/
14839 movl %eax, %eax
14840- pushq_cfi %r10
14841+ pushq_cfi %r11
14842 CFI_REL_OFFSET rip,0
14843 pushq_cfi %rax
14844 cld
14845 SAVE_ARGS 0,1,0
14846+ pax_enter_kernel_user
14847+
14848+#ifdef CONFIG_PAX_RANDKSTACK
14849+ pax_erase_kstack
14850+#endif
14851+
14852+ /*
14853+ * No need to follow this irqs on/off section: the syscall
14854+ * disabled irqs, here we enable it straight after entry:
14855+ */
14856+ ENABLE_INTERRUPTS(CLBR_NONE)
14857 /* no need to do an access_ok check here because rbp has been
14858 32bit zero extended */
14859+
14860+#ifdef CONFIG_PAX_MEMORY_UDEREF
14861+ addq pax_user_shadow_base,%rbp
14862+ ASM_PAX_OPEN_USERLAND
14863+#endif
14864+
14865 ASM_STAC
14866 1: movl (%rbp),%ebp
14867 _ASM_EXTABLE(1b,ia32_badarg)
14868 ASM_CLAC
14869
14870+#ifdef CONFIG_PAX_MEMORY_UDEREF
14871+ ASM_PAX_CLOSE_USERLAND
14872+#endif
14873+
14874 /*
14875 * Sysenter doesn't filter flags, so we need to clear NT
14876 * ourselves. To save a few cycles, we can check whether
14877@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
14878 jnz sysenter_fix_flags
14879 sysenter_flags_fixed:
14880
14881- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14882- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14883+ GET_THREAD_INFO(%r11)
14884+ orl $TS_COMPAT,TI_status(%r11)
14885+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14886 CFI_REMEMBER_STATE
14887 jnz sysenter_tracesys
14888 cmpq $(IA32_NR_syscalls-1),%rax
14889@@ -172,15 +218,18 @@ sysenter_do_call:
14890 sysenter_dispatch:
14891 call *ia32_sys_call_table(,%rax,8)
14892 movq %rax,RAX-ARGOFFSET(%rsp)
14893+ GET_THREAD_INFO(%r11)
14894 DISABLE_INTERRUPTS(CLBR_NONE)
14895 TRACE_IRQS_OFF
14896- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14897+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14898 jnz sysexit_audit
14899 sysexit_from_sys_call:
14900- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14901+ pax_exit_kernel_user
14902+ pax_erase_kstack
14903+ andl $~TS_COMPAT,TI_status(%r11)
14904 /* clear IF, that popfq doesn't enable interrupts early */
14905- andl $~0x200,EFLAGS-R11(%rsp)
14906- movl RIP-R11(%rsp),%edx /* User %eip */
14907+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
14908+ movl RIP(%rsp),%edx /* User %eip */
14909 CFI_REGISTER rip,rdx
14910 RESTORE_ARGS 0,24,0,0,0,0
14911 xorq %r8,%r8
14912@@ -205,6 +254,9 @@ sysexit_from_sys_call:
14913 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
14914 movl %eax,%edi /* 1st arg: syscall number */
14915 call __audit_syscall_entry
14916+
14917+ pax_erase_kstack
14918+
14919 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
14920 cmpq $(IA32_NR_syscalls-1),%rax
14921 ja ia32_badsys
14922@@ -216,7 +268,7 @@ sysexit_from_sys_call:
14923 .endm
14924
14925 .macro auditsys_exit exit
14926- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14927+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14928 jnz ia32_ret_from_sys_call
14929 TRACE_IRQS_ON
14930 ENABLE_INTERRUPTS(CLBR_NONE)
14931@@ -227,11 +279,12 @@ sysexit_from_sys_call:
14932 1: setbe %al /* 1 if error, 0 if not */
14933 movzbl %al,%edi /* zero-extend that into %edi */
14934 call __audit_syscall_exit
14935+ GET_THREAD_INFO(%r11)
14936 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
14937 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
14938 DISABLE_INTERRUPTS(CLBR_NONE)
14939 TRACE_IRQS_OFF
14940- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14941+ testl %edi,TI_flags(%r11)
14942 jz \exit
14943 CLEAR_RREGS -ARGOFFSET
14944 jmp int_with_check
14945@@ -253,7 +306,7 @@ sysenter_fix_flags:
14946
14947 sysenter_tracesys:
14948 #ifdef CONFIG_AUDITSYSCALL
14949- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14950+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14951 jz sysenter_auditsys
14952 #endif
14953 SAVE_REST
14954@@ -265,6 +318,9 @@ sysenter_tracesys:
14955 RESTORE_REST
14956 cmpq $(IA32_NR_syscalls-1),%rax
14957 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14958+
14959+ pax_erase_kstack
14960+
14961 jmp sysenter_do_call
14962 CFI_ENDPROC
14963 ENDPROC(ia32_sysenter_target)
14964@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
14965 ENTRY(ia32_cstar_target)
14966 CFI_STARTPROC32 simple
14967 CFI_SIGNAL_FRAME
14968- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14969+ CFI_DEF_CFA rsp,0
14970 CFI_REGISTER rip,rcx
14971 /*CFI_REGISTER rflags,r11*/
14972 SWAPGS_UNSAFE_STACK
14973 movl %esp,%r8d
14974 CFI_REGISTER rsp,r8
14975 movq PER_CPU_VAR(kernel_stack),%rsp
14976+ SAVE_ARGS 8*6,0,0
14977+ pax_enter_kernel_user
14978+
14979+#ifdef CONFIG_PAX_RANDKSTACK
14980+ pax_erase_kstack
14981+#endif
14982+
14983 /*
14984 * No need to follow this irqs on/off section: the syscall
14985 * disabled irqs and here we enable it straight after entry:
14986 */
14987 ENABLE_INTERRUPTS(CLBR_NONE)
14988- SAVE_ARGS 8,0,0
14989 movl %eax,%eax /* zero extension */
14990 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14991 movq %rcx,RIP-ARGOFFSET(%rsp)
14992@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
14993 /* no need to do an access_ok check here because r8 has been
14994 32bit zero extended */
14995 /* hardware stack frame is complete now */
14996+
14997+#ifdef CONFIG_PAX_MEMORY_UDEREF
14998+ ASM_PAX_OPEN_USERLAND
14999+ movq pax_user_shadow_base,%r8
15000+ addq RSP-ARGOFFSET(%rsp),%r8
15001+#endif
15002+
15003 ASM_STAC
15004 1: movl (%r8),%r9d
15005 _ASM_EXTABLE(1b,ia32_badarg)
15006 ASM_CLAC
15007- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15008- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15009+
15010+#ifdef CONFIG_PAX_MEMORY_UDEREF
15011+ ASM_PAX_CLOSE_USERLAND
15012+#endif
15013+
15014+ GET_THREAD_INFO(%r11)
15015+ orl $TS_COMPAT,TI_status(%r11)
15016+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15017 CFI_REMEMBER_STATE
15018 jnz cstar_tracesys
15019 cmpq $IA32_NR_syscalls-1,%rax
15020@@ -335,13 +410,16 @@ cstar_do_call:
15021 cstar_dispatch:
15022 call *ia32_sys_call_table(,%rax,8)
15023 movq %rax,RAX-ARGOFFSET(%rsp)
15024+ GET_THREAD_INFO(%r11)
15025 DISABLE_INTERRUPTS(CLBR_NONE)
15026 TRACE_IRQS_OFF
15027- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15028+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15029 jnz sysretl_audit
15030 sysretl_from_sys_call:
15031- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15032- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15033+ pax_exit_kernel_user
15034+ pax_erase_kstack
15035+ andl $~TS_COMPAT,TI_status(%r11)
15036+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15037 movl RIP-ARGOFFSET(%rsp),%ecx
15038 CFI_REGISTER rip,rcx
15039 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15040@@ -368,7 +446,7 @@ sysretl_audit:
15041
15042 cstar_tracesys:
15043 #ifdef CONFIG_AUDITSYSCALL
15044- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15045+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15046 jz cstar_auditsys
15047 #endif
15048 xchgl %r9d,%ebp
15049@@ -382,11 +460,19 @@ cstar_tracesys:
15050 xchgl %ebp,%r9d
15051 cmpq $(IA32_NR_syscalls-1),%rax
15052 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15053+
15054+ pax_erase_kstack
15055+
15056 jmp cstar_do_call
15057 END(ia32_cstar_target)
15058
15059 ia32_badarg:
15060 ASM_CLAC
15061+
15062+#ifdef CONFIG_PAX_MEMORY_UDEREF
15063+ ASM_PAX_CLOSE_USERLAND
15064+#endif
15065+
15066 movq $-EFAULT,%rax
15067 jmp ia32_sysret
15068 CFI_ENDPROC
15069@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15070 CFI_REL_OFFSET rip,RIP-RIP
15071 PARAVIRT_ADJUST_EXCEPTION_FRAME
15072 SWAPGS
15073- /*
15074- * No need to follow this irqs on/off section: the syscall
15075- * disabled irqs and here we enable it straight after entry:
15076- */
15077- ENABLE_INTERRUPTS(CLBR_NONE)
15078 movl %eax,%eax
15079 pushq_cfi %rax
15080 cld
15081 /* note the registers are not zero extended to the sf.
15082 this could be a problem. */
15083 SAVE_ARGS 0,1,0
15084- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15085- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15086+ pax_enter_kernel_user
15087+
15088+#ifdef CONFIG_PAX_RANDKSTACK
15089+ pax_erase_kstack
15090+#endif
15091+
15092+ /*
15093+ * No need to follow this irqs on/off section: the syscall
15094+ * disabled irqs and here we enable it straight after entry:
15095+ */
15096+ ENABLE_INTERRUPTS(CLBR_NONE)
15097+ GET_THREAD_INFO(%r11)
15098+ orl $TS_COMPAT,TI_status(%r11)
15099+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15100 jnz ia32_tracesys
15101 cmpq $(IA32_NR_syscalls-1),%rax
15102 ja ia32_badsys
15103@@ -458,6 +551,9 @@ ia32_tracesys:
15104 RESTORE_REST
15105 cmpq $(IA32_NR_syscalls-1),%rax
15106 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15107+
15108+ pax_erase_kstack
15109+
15110 jmp ia32_do_call
15111 END(ia32_syscall)
15112
15113diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15114index 8e0ceec..af13504 100644
15115--- a/arch/x86/ia32/sys_ia32.c
15116+++ b/arch/x86/ia32/sys_ia32.c
15117@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15118 */
15119 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15120 {
15121- typeof(ubuf->st_uid) uid = 0;
15122- typeof(ubuf->st_gid) gid = 0;
15123+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15124+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15125 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15126 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15127 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15128diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15129index 372231c..51b537d 100644
15130--- a/arch/x86/include/asm/alternative-asm.h
15131+++ b/arch/x86/include/asm/alternative-asm.h
15132@@ -18,6 +18,45 @@
15133 .endm
15134 #endif
15135
15136+#ifdef KERNEXEC_PLUGIN
15137+ .macro pax_force_retaddr_bts rip=0
15138+ btsq $63,\rip(%rsp)
15139+ .endm
15140+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15141+ .macro pax_force_retaddr rip=0, reload=0
15142+ btsq $63,\rip(%rsp)
15143+ .endm
15144+ .macro pax_force_fptr ptr
15145+ btsq $63,\ptr
15146+ .endm
15147+ .macro pax_set_fptr_mask
15148+ .endm
15149+#endif
15150+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15151+ .macro pax_force_retaddr rip=0, reload=0
15152+ .if \reload
15153+ pax_set_fptr_mask
15154+ .endif
15155+ orq %r12,\rip(%rsp)
15156+ .endm
15157+ .macro pax_force_fptr ptr
15158+ orq %r12,\ptr
15159+ .endm
15160+ .macro pax_set_fptr_mask
15161+ movabs $0x8000000000000000,%r12
15162+ .endm
15163+#endif
15164+#else
15165+ .macro pax_force_retaddr rip=0, reload=0
15166+ .endm
15167+ .macro pax_force_fptr ptr
15168+ .endm
15169+ .macro pax_force_retaddr_bts rip=0
15170+ .endm
15171+ .macro pax_set_fptr_mask
15172+ .endm
15173+#endif
15174+
15175 .macro altinstruction_entry orig alt feature orig_len alt_len
15176 .long \orig - .
15177 .long \alt - .
15178diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15179index 473bdbe..b1e3377 100644
15180--- a/arch/x86/include/asm/alternative.h
15181+++ b/arch/x86/include/asm/alternative.h
15182@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15183 ".pushsection .discard,\"aw\",@progbits\n" \
15184 DISCARD_ENTRY(1) \
15185 ".popsection\n" \
15186- ".pushsection .altinstr_replacement, \"ax\"\n" \
15187+ ".pushsection .altinstr_replacement, \"a\"\n" \
15188 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15189 ".popsection"
15190
15191@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15192 DISCARD_ENTRY(1) \
15193 DISCARD_ENTRY(2) \
15194 ".popsection\n" \
15195- ".pushsection .altinstr_replacement, \"ax\"\n" \
15196+ ".pushsection .altinstr_replacement, \"a\"\n" \
15197 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15198 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15199 ".popsection"
15200diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15201index 465b309..ab7e51f 100644
15202--- a/arch/x86/include/asm/apic.h
15203+++ b/arch/x86/include/asm/apic.h
15204@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15205
15206 #ifdef CONFIG_X86_LOCAL_APIC
15207
15208-extern unsigned int apic_verbosity;
15209+extern int apic_verbosity;
15210 extern int local_apic_timer_c2_ok;
15211
15212 extern int disable_apic;
15213diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15214index 20370c6..a2eb9b0 100644
15215--- a/arch/x86/include/asm/apm.h
15216+++ b/arch/x86/include/asm/apm.h
15217@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15218 __asm__ __volatile__(APM_DO_ZERO_SEGS
15219 "pushl %%edi\n\t"
15220 "pushl %%ebp\n\t"
15221- "lcall *%%cs:apm_bios_entry\n\t"
15222+ "lcall *%%ss:apm_bios_entry\n\t"
15223 "setc %%al\n\t"
15224 "popl %%ebp\n\t"
15225 "popl %%edi\n\t"
15226@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15227 __asm__ __volatile__(APM_DO_ZERO_SEGS
15228 "pushl %%edi\n\t"
15229 "pushl %%ebp\n\t"
15230- "lcall *%%cs:apm_bios_entry\n\t"
15231+ "lcall *%%ss:apm_bios_entry\n\t"
15232 "setc %%bl\n\t"
15233 "popl %%ebp\n\t"
15234 "popl %%edi\n\t"
15235diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15236index 5e5cd12..51cdc93 100644
15237--- a/arch/x86/include/asm/atomic.h
15238+++ b/arch/x86/include/asm/atomic.h
15239@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
15240 }
15241
15242 /**
15243+ * atomic_read_unchecked - read atomic variable
15244+ * @v: pointer of type atomic_unchecked_t
15245+ *
15246+ * Atomically reads the value of @v.
15247+ */
15248+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15249+{
15250+ return ACCESS_ONCE((v)->counter);
15251+}
15252+
15253+/**
15254 * atomic_set - set atomic variable
15255 * @v: pointer of type atomic_t
15256 * @i: required value
15257@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15258 }
15259
15260 /**
15261+ * atomic_set_unchecked - set atomic variable
15262+ * @v: pointer of type atomic_unchecked_t
15263+ * @i: required value
15264+ *
15265+ * Atomically sets the value of @v to @i.
15266+ */
15267+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15268+{
15269+ v->counter = i;
15270+}
15271+
15272+/**
15273 * atomic_add - add integer to atomic variable
15274 * @i: integer value to add
15275 * @v: pointer of type atomic_t
15276@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15277 */
15278 static inline void atomic_add(int i, atomic_t *v)
15279 {
15280- asm volatile(LOCK_PREFIX "addl %1,%0"
15281+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15282+
15283+#ifdef CONFIG_PAX_REFCOUNT
15284+ "jno 0f\n"
15285+ LOCK_PREFIX "subl %1,%0\n"
15286+ "int $4\n0:\n"
15287+ _ASM_EXTABLE(0b, 0b)
15288+#endif
15289+
15290+ : "+m" (v->counter)
15291+ : "ir" (i));
15292+}
15293+
15294+/**
15295+ * atomic_add_unchecked - add integer to atomic variable
15296+ * @i: integer value to add
15297+ * @v: pointer of type atomic_unchecked_t
15298+ *
15299+ * Atomically adds @i to @v.
15300+ */
15301+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15302+{
15303+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15304 : "+m" (v->counter)
15305 : "ir" (i));
15306 }
15307@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15308 */
15309 static inline void atomic_sub(int i, atomic_t *v)
15310 {
15311- asm volatile(LOCK_PREFIX "subl %1,%0"
15312+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15313+
15314+#ifdef CONFIG_PAX_REFCOUNT
15315+ "jno 0f\n"
15316+ LOCK_PREFIX "addl %1,%0\n"
15317+ "int $4\n0:\n"
15318+ _ASM_EXTABLE(0b, 0b)
15319+#endif
15320+
15321+ : "+m" (v->counter)
15322+ : "ir" (i));
15323+}
15324+
15325+/**
15326+ * atomic_sub_unchecked - subtract integer from atomic variable
15327+ * @i: integer value to subtract
15328+ * @v: pointer of type atomic_unchecked_t
15329+ *
15330+ * Atomically subtracts @i from @v.
15331+ */
15332+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15333+{
15334+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15335 : "+m" (v->counter)
15336 : "ir" (i));
15337 }
15338@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15339 */
15340 static inline int atomic_sub_and_test(int i, atomic_t *v)
15341 {
15342- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15343+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15344 }
15345
15346 /**
15347@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15348 */
15349 static inline void atomic_inc(atomic_t *v)
15350 {
15351- asm volatile(LOCK_PREFIX "incl %0"
15352+ asm volatile(LOCK_PREFIX "incl %0\n"
15353+
15354+#ifdef CONFIG_PAX_REFCOUNT
15355+ "jno 0f\n"
15356+ LOCK_PREFIX "decl %0\n"
15357+ "int $4\n0:\n"
15358+ _ASM_EXTABLE(0b, 0b)
15359+#endif
15360+
15361+ : "+m" (v->counter));
15362+}
15363+
15364+/**
15365+ * atomic_inc_unchecked - increment atomic variable
15366+ * @v: pointer of type atomic_unchecked_t
15367+ *
15368+ * Atomically increments @v by 1.
15369+ */
15370+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
15371+{
15372+ asm volatile(LOCK_PREFIX "incl %0\n"
15373 : "+m" (v->counter));
15374 }
15375
15376@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
15377 */
15378 static inline void atomic_dec(atomic_t *v)
15379 {
15380- asm volatile(LOCK_PREFIX "decl %0"
15381+ asm volatile(LOCK_PREFIX "decl %0\n"
15382+
15383+#ifdef CONFIG_PAX_REFCOUNT
15384+ "jno 0f\n"
15385+ LOCK_PREFIX "incl %0\n"
15386+ "int $4\n0:\n"
15387+ _ASM_EXTABLE(0b, 0b)
15388+#endif
15389+
15390+ : "+m" (v->counter));
15391+}
15392+
15393+/**
15394+ * atomic_dec_unchecked - decrement atomic variable
15395+ * @v: pointer of type atomic_unchecked_t
15396+ *
15397+ * Atomically decrements @v by 1.
15398+ */
15399+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
15400+{
15401+ asm volatile(LOCK_PREFIX "decl %0\n"
15402 : "+m" (v->counter));
15403 }
15404
15405@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
15406 */
15407 static inline int atomic_dec_and_test(atomic_t *v)
15408 {
15409- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
15410+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
15411 }
15412
15413 /**
15414@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
15415 */
15416 static inline int atomic_inc_and_test(atomic_t *v)
15417 {
15418- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
15419+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
15420+}
15421+
15422+/**
15423+ * atomic_inc_and_test_unchecked - increment and test
15424+ * @v: pointer of type atomic_unchecked_t
15425+ *
15426+ * Atomically increments @v by 1
15427+ * and returns true if the result is zero, or false for all
15428+ * other cases.
15429+ */
15430+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
15431+{
15432+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
15433 }
15434
15435 /**
15436@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
15437 */
15438 static inline int atomic_add_negative(int i, atomic_t *v)
15439 {
15440- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
15441+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
15442 }
15443
15444 /**
15445@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
15446 *
15447 * Atomically adds @i to @v and returns @i + @v
15448 */
15449-static inline int atomic_add_return(int i, atomic_t *v)
15450+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
15451+{
15452+ return i + xadd_check_overflow(&v->counter, i);
15453+}
15454+
15455+/**
15456+ * atomic_add_return_unchecked - add integer and return
15457+ * @i: integer value to add
15458+ * @v: pointer of type atomic_unchecked_t
15459+ *
15460+ * Atomically adds @i to @v and returns @i + @v
15461+ */
15462+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
15463 {
15464 return i + xadd(&v->counter, i);
15465 }
15466@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
15467 *
15468 * Atomically subtracts @i from @v and returns @v - @i
15469 */
15470-static inline int atomic_sub_return(int i, atomic_t *v)
15471+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
15472 {
15473 return atomic_add_return(-i, v);
15474 }
15475
15476 #define atomic_inc_return(v) (atomic_add_return(1, v))
15477+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
15478+{
15479+ return atomic_add_return_unchecked(1, v);
15480+}
15481 #define atomic_dec_return(v) (atomic_sub_return(1, v))
15482
15483-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
15484+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
15485+{
15486+ return cmpxchg(&v->counter, old, new);
15487+}
15488+
15489+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
15490 {
15491 return cmpxchg(&v->counter, old, new);
15492 }
15493@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
15494 return xchg(&v->counter, new);
15495 }
15496
15497+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
15498+{
15499+ return xchg(&v->counter, new);
15500+}
15501+
15502 /**
15503 * __atomic_add_unless - add unless the number is already a given value
15504 * @v: pointer of type atomic_t
15505@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
15506 */
15507 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15508 {
15509- int c, old;
15510+ int c, old, new;
15511 c = atomic_read(v);
15512 for (;;) {
15513- if (unlikely(c == (u)))
15514+ if (unlikely(c == u))
15515 break;
15516- old = atomic_cmpxchg((v), c, c + (a));
15517+
15518+ asm volatile("addl %2,%0\n"
15519+
15520+#ifdef CONFIG_PAX_REFCOUNT
15521+ "jno 0f\n"
15522+ "subl %2,%0\n"
15523+ "int $4\n0:\n"
15524+ _ASM_EXTABLE(0b, 0b)
15525+#endif
15526+
15527+ : "=r" (new)
15528+ : "0" (c), "ir" (a));
15529+
15530+ old = atomic_cmpxchg(v, c, new);
15531 if (likely(old == c))
15532 break;
15533 c = old;
15534@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15535 }
15536
15537 /**
15538+ * atomic_inc_not_zero_hint - increment if not null
15539+ * @v: pointer of type atomic_t
15540+ * @hint: probable value of the atomic before the increment
15541+ *
15542+ * This version of atomic_inc_not_zero() gives a hint of probable
15543+ * value of the atomic. This helps processor to not read the memory
15544+ * before doing the atomic read/modify/write cycle, lowering
15545+ * number of bus transactions on some arches.
15546+ *
15547+ * Returns: 0 if increment was not done, 1 otherwise.
15548+ */
15549+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
15550+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
15551+{
15552+ int val, c = hint, new;
15553+
15554+ /* sanity test, should be removed by compiler if hint is a constant */
15555+ if (!hint)
15556+ return __atomic_add_unless(v, 1, 0);
15557+
15558+ do {
15559+ asm volatile("incl %0\n"
15560+
15561+#ifdef CONFIG_PAX_REFCOUNT
15562+ "jno 0f\n"
15563+ "decl %0\n"
15564+ "int $4\n0:\n"
15565+ _ASM_EXTABLE(0b, 0b)
15566+#endif
15567+
15568+ : "=r" (new)
15569+ : "0" (c));
15570+
15571+ val = atomic_cmpxchg(v, c, new);
15572+ if (val == c)
15573+ return 1;
15574+ c = val;
15575+ } while (c);
15576+
15577+ return 0;
15578+}
15579+
15580+/**
15581 * atomic_inc_short - increment of a short integer
15582 * @v: pointer to type int
15583 *
15584@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
15585 }
15586
15587 /* These are x86-specific, used by some header files */
15588-#define atomic_clear_mask(mask, addr) \
15589- asm volatile(LOCK_PREFIX "andl %0,%1" \
15590- : : "r" (~(mask)), "m" (*(addr)) : "memory")
15591+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
15592+{
15593+ asm volatile(LOCK_PREFIX "andl %1,%0"
15594+ : "+m" (v->counter)
15595+ : "r" (~(mask))
15596+ : "memory");
15597+}
15598
15599-#define atomic_set_mask(mask, addr) \
15600- asm volatile(LOCK_PREFIX "orl %0,%1" \
15601- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
15602- : "memory")
15603+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15604+{
15605+ asm volatile(LOCK_PREFIX "andl %1,%0"
15606+ : "+m" (v->counter)
15607+ : "r" (~(mask))
15608+ : "memory");
15609+}
15610+
15611+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
15612+{
15613+ asm volatile(LOCK_PREFIX "orl %1,%0"
15614+ : "+m" (v->counter)
15615+ : "r" (mask)
15616+ : "memory");
15617+}
15618+
15619+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
15620+{
15621+ asm volatile(LOCK_PREFIX "orl %1,%0"
15622+ : "+m" (v->counter)
15623+ : "r" (mask)
15624+ : "memory");
15625+}
15626
15627 #ifdef CONFIG_X86_32
15628 # include <asm/atomic64_32.h>
15629diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
15630index b154de7..bf18a5a 100644
15631--- a/arch/x86/include/asm/atomic64_32.h
15632+++ b/arch/x86/include/asm/atomic64_32.h
15633@@ -12,6 +12,14 @@ typedef struct {
15634 u64 __aligned(8) counter;
15635 } atomic64_t;
15636
15637+#ifdef CONFIG_PAX_REFCOUNT
15638+typedef struct {
15639+ u64 __aligned(8) counter;
15640+} atomic64_unchecked_t;
15641+#else
15642+typedef atomic64_t atomic64_unchecked_t;
15643+#endif
15644+
15645 #define ATOMIC64_INIT(val) { (val) }
15646
15647 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
15648@@ -37,21 +45,31 @@ typedef struct {
15649 ATOMIC64_DECL_ONE(sym##_386)
15650
15651 ATOMIC64_DECL_ONE(add_386);
15652+ATOMIC64_DECL_ONE(add_unchecked_386);
15653 ATOMIC64_DECL_ONE(sub_386);
15654+ATOMIC64_DECL_ONE(sub_unchecked_386);
15655 ATOMIC64_DECL_ONE(inc_386);
15656+ATOMIC64_DECL_ONE(inc_unchecked_386);
15657 ATOMIC64_DECL_ONE(dec_386);
15658+ATOMIC64_DECL_ONE(dec_unchecked_386);
15659 #endif
15660
15661 #define alternative_atomic64(f, out, in...) \
15662 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
15663
15664 ATOMIC64_DECL(read);
15665+ATOMIC64_DECL(read_unchecked);
15666 ATOMIC64_DECL(set);
15667+ATOMIC64_DECL(set_unchecked);
15668 ATOMIC64_DECL(xchg);
15669 ATOMIC64_DECL(add_return);
15670+ATOMIC64_DECL(add_return_unchecked);
15671 ATOMIC64_DECL(sub_return);
15672+ATOMIC64_DECL(sub_return_unchecked);
15673 ATOMIC64_DECL(inc_return);
15674+ATOMIC64_DECL(inc_return_unchecked);
15675 ATOMIC64_DECL(dec_return);
15676+ATOMIC64_DECL(dec_return_unchecked);
15677 ATOMIC64_DECL(dec_if_positive);
15678 ATOMIC64_DECL(inc_not_zero);
15679 ATOMIC64_DECL(add_unless);
15680@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
15681 }
15682
15683 /**
15684+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
15685+ * @p: pointer to type atomic64_unchecked_t
15686+ * @o: expected value
15687+ * @n: new value
15688+ *
15689+ * Atomically sets @v to @n if it was equal to @o and returns
15690+ * the old value.
15691+ */
15692+
15693+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
15694+{
15695+ return cmpxchg64(&v->counter, o, n);
15696+}
15697+
15698+/**
15699 * atomic64_xchg - xchg atomic64 variable
15700 * @v: pointer to type atomic64_t
15701 * @n: value to assign
15702@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
15703 }
15704
15705 /**
15706+ * atomic64_set_unchecked - set atomic64 variable
15707+ * @v: pointer to type atomic64_unchecked_t
15708+ * @n: value to assign
15709+ *
15710+ * Atomically sets the value of @v to @n.
15711+ */
15712+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
15713+{
15714+ unsigned high = (unsigned)(i >> 32);
15715+ unsigned low = (unsigned)i;
15716+ alternative_atomic64(set, /* no output */,
15717+ "S" (v), "b" (low), "c" (high)
15718+ : "eax", "edx", "memory");
15719+}
15720+
15721+/**
15722 * atomic64_read - read atomic64 variable
15723 * @v: pointer to type atomic64_t
15724 *
15725@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
15726 }
15727
15728 /**
15729+ * atomic64_read_unchecked - read atomic64 variable
15730+ * @v: pointer to type atomic64_unchecked_t
15731+ *
15732+ * Atomically reads the value of @v and returns it.
15733+ */
15734+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
15735+{
15736+ long long r;
15737+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
15738+ return r;
15739+ }
15740+
15741+/**
15742 * atomic64_add_return - add and return
15743 * @i: integer value to add
15744 * @v: pointer to type atomic64_t
15745@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
15746 return i;
15747 }
15748
15749+/**
15750+ * atomic64_add_return_unchecked - add and return
15751+ * @i: integer value to add
15752+ * @v: pointer to type atomic64_unchecked_t
15753+ *
15754+ * Atomically adds @i to @v and returns @i + *@v
15755+ */
15756+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
15757+{
15758+ alternative_atomic64(add_return_unchecked,
15759+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15760+ ASM_NO_INPUT_CLOBBER("memory"));
15761+ return i;
15762+}
15763+
15764 /*
15765 * Other variants with different arithmetic operators:
15766 */
15767@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
15768 return a;
15769 }
15770
15771+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15772+{
15773+ long long a;
15774+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
15775+ "S" (v) : "memory", "ecx");
15776+ return a;
15777+}
15778+
15779 static inline long long atomic64_dec_return(atomic64_t *v)
15780 {
15781 long long a;
15782@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
15783 }
15784
15785 /**
15786+ * atomic64_add_unchecked - add integer to atomic64 variable
15787+ * @i: integer value to add
15788+ * @v: pointer to type atomic64_unchecked_t
15789+ *
15790+ * Atomically adds @i to @v.
15791+ */
15792+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
15793+{
15794+ __alternative_atomic64(add_unchecked, add_return_unchecked,
15795+ ASM_OUTPUT2("+A" (i), "+c" (v)),
15796+ ASM_NO_INPUT_CLOBBER("memory"));
15797+ return i;
15798+}
15799+
15800+/**
15801 * atomic64_sub - subtract the atomic64 variable
15802 * @i: integer value to subtract
15803 * @v: pointer to type atomic64_t
15804diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
15805index f8d273e..02f39f3 100644
15806--- a/arch/x86/include/asm/atomic64_64.h
15807+++ b/arch/x86/include/asm/atomic64_64.h
15808@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
15809 }
15810
15811 /**
15812+ * atomic64_read_unchecked - read atomic64 variable
15813+ * @v: pointer of type atomic64_unchecked_t
15814+ *
15815+ * Atomically reads the value of @v.
15816+ * Doesn't imply a read memory barrier.
15817+ */
15818+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
15819+{
15820+ return ACCESS_ONCE((v)->counter);
15821+}
15822+
15823+/**
15824 * atomic64_set - set atomic64 variable
15825 * @v: pointer to type atomic64_t
15826 * @i: required value
15827@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
15828 }
15829
15830 /**
15831+ * atomic64_set_unchecked - set atomic64 variable
15832+ * @v: pointer to type atomic64_unchecked_t
15833+ * @i: required value
15834+ *
15835+ * Atomically sets the value of @v to @i.
15836+ */
15837+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
15838+{
15839+ v->counter = i;
15840+}
15841+
15842+/**
15843 * atomic64_add - add integer to atomic64 variable
15844 * @i: integer value to add
15845 * @v: pointer to type atomic64_t
15846@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
15847 */
15848 static inline void atomic64_add(long i, atomic64_t *v)
15849 {
15850+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
15851+
15852+#ifdef CONFIG_PAX_REFCOUNT
15853+ "jno 0f\n"
15854+ LOCK_PREFIX "subq %1,%0\n"
15855+ "int $4\n0:\n"
15856+ _ASM_EXTABLE(0b, 0b)
15857+#endif
15858+
15859+ : "=m" (v->counter)
15860+ : "er" (i), "m" (v->counter));
15861+}
15862+
15863+/**
15864+ * atomic64_add_unchecked - add integer to atomic64 variable
15865+ * @i: integer value to add
15866+ * @v: pointer to type atomic64_unchecked_t
15867+ *
15868+ * Atomically adds @i to @v.
15869+ */
15870+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
15871+{
15872 asm volatile(LOCK_PREFIX "addq %1,%0"
15873 : "=m" (v->counter)
15874 : "er" (i), "m" (v->counter));
15875@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
15876 */
15877 static inline void atomic64_sub(long i, atomic64_t *v)
15878 {
15879- asm volatile(LOCK_PREFIX "subq %1,%0"
15880+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15881+
15882+#ifdef CONFIG_PAX_REFCOUNT
15883+ "jno 0f\n"
15884+ LOCK_PREFIX "addq %1,%0\n"
15885+ "int $4\n0:\n"
15886+ _ASM_EXTABLE(0b, 0b)
15887+#endif
15888+
15889+ : "=m" (v->counter)
15890+ : "er" (i), "m" (v->counter));
15891+}
15892+
15893+/**
15894+ * atomic64_sub_unchecked - subtract the atomic64 variable
15895+ * @i: integer value to subtract
15896+ * @v: pointer to type atomic64_unchecked_t
15897+ *
15898+ * Atomically subtracts @i from @v.
15899+ */
15900+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
15901+{
15902+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
15903 : "=m" (v->counter)
15904 : "er" (i), "m" (v->counter));
15905 }
15906@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
15907 */
15908 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15909 {
15910- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
15911+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
15912 }
15913
15914 /**
15915@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15916 */
15917 static inline void atomic64_inc(atomic64_t *v)
15918 {
15919+ asm volatile(LOCK_PREFIX "incq %0\n"
15920+
15921+#ifdef CONFIG_PAX_REFCOUNT
15922+ "jno 0f\n"
15923+ LOCK_PREFIX "decq %0\n"
15924+ "int $4\n0:\n"
15925+ _ASM_EXTABLE(0b, 0b)
15926+#endif
15927+
15928+ : "=m" (v->counter)
15929+ : "m" (v->counter));
15930+}
15931+
15932+/**
15933+ * atomic64_inc_unchecked - increment atomic64 variable
15934+ * @v: pointer to type atomic64_unchecked_t
15935+ *
15936+ * Atomically increments @v by 1.
15937+ */
15938+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15939+{
15940 asm volatile(LOCK_PREFIX "incq %0"
15941 : "=m" (v->counter)
15942 : "m" (v->counter));
15943@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
15944 */
15945 static inline void atomic64_dec(atomic64_t *v)
15946 {
15947- asm volatile(LOCK_PREFIX "decq %0"
15948+ asm volatile(LOCK_PREFIX "decq %0\n"
15949+
15950+#ifdef CONFIG_PAX_REFCOUNT
15951+ "jno 0f\n"
15952+ LOCK_PREFIX "incq %0\n"
15953+ "int $4\n0:\n"
15954+ _ASM_EXTABLE(0b, 0b)
15955+#endif
15956+
15957+ : "=m" (v->counter)
15958+ : "m" (v->counter));
15959+}
15960+
15961+/**
15962+ * atomic64_dec_unchecked - decrement atomic64 variable
15963+ * @v: pointer to type atomic64_t
15964+ *
15965+ * Atomically decrements @v by 1.
15966+ */
15967+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15968+{
15969+ asm volatile(LOCK_PREFIX "decq %0\n"
15970 : "=m" (v->counter)
15971 : "m" (v->counter));
15972 }
15973@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
15974 */
15975 static inline int atomic64_dec_and_test(atomic64_t *v)
15976 {
15977- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
15978+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
15979 }
15980
15981 /**
15982@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15983 */
15984 static inline int atomic64_inc_and_test(atomic64_t *v)
15985 {
15986- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
15987+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
15988 }
15989
15990 /**
15991@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15992 */
15993 static inline int atomic64_add_negative(long i, atomic64_t *v)
15994 {
15995- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
15996+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
15997 }
15998
15999 /**
16000@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16001 */
16002 static inline long atomic64_add_return(long i, atomic64_t *v)
16003 {
16004+ return i + xadd_check_overflow(&v->counter, i);
16005+}
16006+
16007+/**
16008+ * atomic64_add_return_unchecked - add and return
16009+ * @i: integer value to add
16010+ * @v: pointer to type atomic64_unchecked_t
16011+ *
16012+ * Atomically adds @i to @v and returns @i + @v
16013+ */
16014+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16015+{
16016 return i + xadd(&v->counter, i);
16017 }
16018
16019@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16020 }
16021
16022 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16023+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16024+{
16025+ return atomic64_add_return_unchecked(1, v);
16026+}
16027 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16028
16029 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16030@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16031 return cmpxchg(&v->counter, old, new);
16032 }
16033
16034+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16035+{
16036+ return cmpxchg(&v->counter, old, new);
16037+}
16038+
16039 static inline long atomic64_xchg(atomic64_t *v, long new)
16040 {
16041 return xchg(&v->counter, new);
16042@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16043 */
16044 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16045 {
16046- long c, old;
16047+ long c, old, new;
16048 c = atomic64_read(v);
16049 for (;;) {
16050- if (unlikely(c == (u)))
16051+ if (unlikely(c == u))
16052 break;
16053- old = atomic64_cmpxchg((v), c, c + (a));
16054+
16055+ asm volatile("add %2,%0\n"
16056+
16057+#ifdef CONFIG_PAX_REFCOUNT
16058+ "jno 0f\n"
16059+ "sub %2,%0\n"
16060+ "int $4\n0:\n"
16061+ _ASM_EXTABLE(0b, 0b)
16062+#endif
16063+
16064+ : "=r" (new)
16065+ : "0" (c), "ir" (a));
16066+
16067+ old = atomic64_cmpxchg(v, c, new);
16068 if (likely(old == c))
16069 break;
16070 c = old;
16071 }
16072- return c != (u);
16073+ return c != u;
16074 }
16075
16076 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16077diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16078index 2ab1eb3..1e8cc5d 100644
16079--- a/arch/x86/include/asm/barrier.h
16080+++ b/arch/x86/include/asm/barrier.h
16081@@ -57,7 +57,7 @@
16082 do { \
16083 compiletime_assert_atomic_type(*p); \
16084 smp_mb(); \
16085- ACCESS_ONCE(*p) = (v); \
16086+ ACCESS_ONCE_RW(*p) = (v); \
16087 } while (0)
16088
16089 #define smp_load_acquire(p) \
16090@@ -74,7 +74,7 @@ do { \
16091 do { \
16092 compiletime_assert_atomic_type(*p); \
16093 barrier(); \
16094- ACCESS_ONCE(*p) = (v); \
16095+ ACCESS_ONCE_RW(*p) = (v); \
16096 } while (0)
16097
16098 #define smp_load_acquire(p) \
16099diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16100index cfe3b95..d01b118 100644
16101--- a/arch/x86/include/asm/bitops.h
16102+++ b/arch/x86/include/asm/bitops.h
16103@@ -50,7 +50,7 @@
16104 * a mask operation on a byte.
16105 */
16106 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16107-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16108+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16109 #define CONST_MASK(nr) (1 << ((nr) & 7))
16110
16111 /**
16112@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16113 */
16114 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16115 {
16116- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16117+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16118 }
16119
16120 /**
16121@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16122 */
16123 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16124 {
16125- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16126+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16127 }
16128
16129 /**
16130@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16131 */
16132 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16133 {
16134- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16135+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16136 }
16137
16138 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16139@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16140 *
16141 * Undefined if no bit exists, so code should check against 0 first.
16142 */
16143-static inline unsigned long __ffs(unsigned long word)
16144+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16145 {
16146 asm("rep; bsf %1,%0"
16147 : "=r" (word)
16148@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16149 *
16150 * Undefined if no zero exists, so code should check against ~0UL first.
16151 */
16152-static inline unsigned long ffz(unsigned long word)
16153+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16154 {
16155 asm("rep; bsf %1,%0"
16156 : "=r" (word)
16157@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16158 *
16159 * Undefined if no set bit exists, so code should check against 0 first.
16160 */
16161-static inline unsigned long __fls(unsigned long word)
16162+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16163 {
16164 asm("bsr %1,%0"
16165 : "=r" (word)
16166@@ -434,7 +434,7 @@ static inline int ffs(int x)
16167 * set bit if value is nonzero. The last (most significant) bit is
16168 * at position 32.
16169 */
16170-static inline int fls(int x)
16171+static inline int __intentional_overflow(-1) fls(int x)
16172 {
16173 int r;
16174
16175@@ -476,7 +476,7 @@ static inline int fls(int x)
16176 * at position 64.
16177 */
16178 #ifdef CONFIG_X86_64
16179-static __always_inline int fls64(__u64 x)
16180+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16181 {
16182 int bitpos = -1;
16183 /*
16184diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16185index 4fa687a..60f2d39 100644
16186--- a/arch/x86/include/asm/boot.h
16187+++ b/arch/x86/include/asm/boot.h
16188@@ -6,10 +6,15 @@
16189 #include <uapi/asm/boot.h>
16190
16191 /* Physical address where kernel should be loaded. */
16192-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16193+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16194 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16195 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16196
16197+#ifndef __ASSEMBLY__
16198+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16199+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16200+#endif
16201+
16202 /* Minimum kernel alignment, as a power of two */
16203 #ifdef CONFIG_X86_64
16204 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16205diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16206index 48f99f1..d78ebf9 100644
16207--- a/arch/x86/include/asm/cache.h
16208+++ b/arch/x86/include/asm/cache.h
16209@@ -5,12 +5,13 @@
16210
16211 /* L1 cache line size */
16212 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16213-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16214+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16215
16216 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16217+#define __read_only __attribute__((__section__(".data..read_only")))
16218
16219 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16220-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16221+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16222
16223 #ifdef CONFIG_X86_VSMP
16224 #ifdef CONFIG_SMP
16225diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16226index 76659b6..72b8439 100644
16227--- a/arch/x86/include/asm/calling.h
16228+++ b/arch/x86/include/asm/calling.h
16229@@ -82,107 +82,117 @@ For 32-bit we have the following conventions - kernel is built with
16230 #define RSP 152
16231 #define SS 160
16232
16233-#define ARGOFFSET R11
16234-#define SWFRAME ORIG_RAX
16235+#define ARGOFFSET R15
16236
16237 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
16238- subq $9*8+\addskip, %rsp
16239- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16240- movq_cfi rdi, 8*8
16241- movq_cfi rsi, 7*8
16242- movq_cfi rdx, 6*8
16243+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16244+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16245+ movq_cfi rdi, RDI
16246+ movq_cfi rsi, RSI
16247+ movq_cfi rdx, RDX
16248
16249 .if \save_rcx
16250- movq_cfi rcx, 5*8
16251+ movq_cfi rcx, RCX
16252 .endif
16253
16254 .if \rax_enosys
16255- movq $-ENOSYS, 4*8(%rsp)
16256+ movq $-ENOSYS, RAX(%rsp)
16257 .else
16258- movq_cfi rax, 4*8
16259+ movq_cfi rax, RAX
16260 .endif
16261
16262 .if \save_r891011
16263- movq_cfi r8, 3*8
16264- movq_cfi r9, 2*8
16265- movq_cfi r10, 1*8
16266- movq_cfi r11, 0*8
16267+ movq_cfi r8, R8
16268+ movq_cfi r9, R9
16269+ movq_cfi r10, R10
16270+ movq_cfi r11, R11
16271 .endif
16272
16273+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16274+ movq_cfi r12, R12
16275+#endif
16276+
16277 .endm
16278
16279-#define ARG_SKIP (9*8)
16280+#define ARG_SKIP ORIG_RAX
16281
16282 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16283 rstor_r8910=1, rstor_rdx=1
16284+
16285+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16286+ movq_cfi_restore R12, r12
16287+#endif
16288+
16289 .if \rstor_r11
16290- movq_cfi_restore 0*8, r11
16291+ movq_cfi_restore R11, r11
16292 .endif
16293
16294 .if \rstor_r8910
16295- movq_cfi_restore 1*8, r10
16296- movq_cfi_restore 2*8, r9
16297- movq_cfi_restore 3*8, r8
16298+ movq_cfi_restore R10, r10
16299+ movq_cfi_restore R9, r9
16300+ movq_cfi_restore R8, r8
16301 .endif
16302
16303 .if \rstor_rax
16304- movq_cfi_restore 4*8, rax
16305+ movq_cfi_restore RAX, rax
16306 .endif
16307
16308 .if \rstor_rcx
16309- movq_cfi_restore 5*8, rcx
16310+ movq_cfi_restore RCX, rcx
16311 .endif
16312
16313 .if \rstor_rdx
16314- movq_cfi_restore 6*8, rdx
16315+ movq_cfi_restore RDX, rdx
16316 .endif
16317
16318- movq_cfi_restore 7*8, rsi
16319- movq_cfi_restore 8*8, rdi
16320+ movq_cfi_restore RSI, rsi
16321+ movq_cfi_restore RDI, rdi
16322
16323- .if ARG_SKIP+\addskip > 0
16324- addq $ARG_SKIP+\addskip, %rsp
16325- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16326+ .if ORIG_RAX+\addskip > 0
16327+ addq $ORIG_RAX+\addskip, %rsp
16328+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16329 .endif
16330 .endm
16331
16332- .macro LOAD_ARGS offset, skiprax=0
16333- movq \offset(%rsp), %r11
16334- movq \offset+8(%rsp), %r10
16335- movq \offset+16(%rsp), %r9
16336- movq \offset+24(%rsp), %r8
16337- movq \offset+40(%rsp), %rcx
16338- movq \offset+48(%rsp), %rdx
16339- movq \offset+56(%rsp), %rsi
16340- movq \offset+64(%rsp), %rdi
16341+ .macro LOAD_ARGS skiprax=0
16342+ movq R11(%rsp), %r11
16343+ movq R10(%rsp), %r10
16344+ movq R9(%rsp), %r9
16345+ movq R8(%rsp), %r8
16346+ movq RCX(%rsp), %rcx
16347+ movq RDX(%rsp), %rdx
16348+ movq RSI(%rsp), %rsi
16349+ movq RDI(%rsp), %rdi
16350 .if \skiprax
16351 .else
16352- movq \offset+72(%rsp), %rax
16353+ movq ORIG_RAX(%rsp), %rax
16354 .endif
16355 .endm
16356
16357-#define REST_SKIP (6*8)
16358-
16359 .macro SAVE_REST
16360- subq $REST_SKIP, %rsp
16361- CFI_ADJUST_CFA_OFFSET REST_SKIP
16362- movq_cfi rbx, 5*8
16363- movq_cfi rbp, 4*8
16364- movq_cfi r12, 3*8
16365- movq_cfi r13, 2*8
16366- movq_cfi r14, 1*8
16367- movq_cfi r15, 0*8
16368+ movq_cfi rbx, RBX
16369+ movq_cfi rbp, RBP
16370+
16371+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16372+ movq_cfi r12, R12
16373+#endif
16374+
16375+ movq_cfi r13, R13
16376+ movq_cfi r14, R14
16377+ movq_cfi r15, R15
16378 .endm
16379
16380 .macro RESTORE_REST
16381- movq_cfi_restore 0*8, r15
16382- movq_cfi_restore 1*8, r14
16383- movq_cfi_restore 2*8, r13
16384- movq_cfi_restore 3*8, r12
16385- movq_cfi_restore 4*8, rbp
16386- movq_cfi_restore 5*8, rbx
16387- addq $REST_SKIP, %rsp
16388- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
16389+ movq_cfi_restore R15, r15
16390+ movq_cfi_restore R14, r14
16391+ movq_cfi_restore R13, r13
16392+
16393+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16394+ movq_cfi_restore R12, r12
16395+#endif
16396+
16397+ movq_cfi_restore RBP, rbp
16398+ movq_cfi_restore RBX, rbx
16399 .endm
16400
16401 .macro SAVE_ALL
16402diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
16403index f50de69..2b0a458 100644
16404--- a/arch/x86/include/asm/checksum_32.h
16405+++ b/arch/x86/include/asm/checksum_32.h
16406@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
16407 int len, __wsum sum,
16408 int *src_err_ptr, int *dst_err_ptr);
16409
16410+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
16411+ int len, __wsum sum,
16412+ int *src_err_ptr, int *dst_err_ptr);
16413+
16414+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
16415+ int len, __wsum sum,
16416+ int *src_err_ptr, int *dst_err_ptr);
16417+
16418 /*
16419 * Note: when you get a NULL pointer exception here this means someone
16420 * passed in an incorrect kernel address to one of these functions.
16421@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
16422
16423 might_sleep();
16424 stac();
16425- ret = csum_partial_copy_generic((__force void *)src, dst,
16426+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
16427 len, sum, err_ptr, NULL);
16428 clac();
16429
16430@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
16431 might_sleep();
16432 if (access_ok(VERIFY_WRITE, dst, len)) {
16433 stac();
16434- ret = csum_partial_copy_generic(src, (__force void *)dst,
16435+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
16436 len, sum, NULL, err_ptr);
16437 clac();
16438 return ret;
16439diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
16440index 99c105d7..2f667ac 100644
16441--- a/arch/x86/include/asm/cmpxchg.h
16442+++ b/arch/x86/include/asm/cmpxchg.h
16443@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
16444 __compiletime_error("Bad argument size for cmpxchg");
16445 extern void __xadd_wrong_size(void)
16446 __compiletime_error("Bad argument size for xadd");
16447+extern void __xadd_check_overflow_wrong_size(void)
16448+ __compiletime_error("Bad argument size for xadd_check_overflow");
16449 extern void __add_wrong_size(void)
16450 __compiletime_error("Bad argument size for add");
16451+extern void __add_check_overflow_wrong_size(void)
16452+ __compiletime_error("Bad argument size for add_check_overflow");
16453
16454 /*
16455 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
16456@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
16457 __ret; \
16458 })
16459
16460+#ifdef CONFIG_PAX_REFCOUNT
16461+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
16462+ ({ \
16463+ __typeof__ (*(ptr)) __ret = (arg); \
16464+ switch (sizeof(*(ptr))) { \
16465+ case __X86_CASE_L: \
16466+ asm volatile (lock #op "l %0, %1\n" \
16467+ "jno 0f\n" \
16468+ "mov %0,%1\n" \
16469+ "int $4\n0:\n" \
16470+ _ASM_EXTABLE(0b, 0b) \
16471+ : "+r" (__ret), "+m" (*(ptr)) \
16472+ : : "memory", "cc"); \
16473+ break; \
16474+ case __X86_CASE_Q: \
16475+ asm volatile (lock #op "q %q0, %1\n" \
16476+ "jno 0f\n" \
16477+ "mov %0,%1\n" \
16478+ "int $4\n0:\n" \
16479+ _ASM_EXTABLE(0b, 0b) \
16480+ : "+r" (__ret), "+m" (*(ptr)) \
16481+ : : "memory", "cc"); \
16482+ break; \
16483+ default: \
16484+ __ ## op ## _check_overflow_wrong_size(); \
16485+ } \
16486+ __ret; \
16487+ })
16488+#else
16489+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
16490+#endif
16491+
16492 /*
16493 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
16494 * Since this is generally used to protect other memory information, we
16495@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
16496 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
16497 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
16498
16499+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
16500+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
16501+
16502 #define __add(ptr, inc, lock) \
16503 ({ \
16504 __typeof__ (*(ptr)) __ret = (inc); \
16505diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
16506index 59c6c40..5e0b22c 100644
16507--- a/arch/x86/include/asm/compat.h
16508+++ b/arch/x86/include/asm/compat.h
16509@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
16510 typedef u32 compat_uint_t;
16511 typedef u32 compat_ulong_t;
16512 typedef u64 __attribute__((aligned(4))) compat_u64;
16513-typedef u32 compat_uptr_t;
16514+typedef u32 __user compat_uptr_t;
16515
16516 struct compat_timespec {
16517 compat_time_t tv_sec;
16518diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
16519index aede2c3..40d7a8f 100644
16520--- a/arch/x86/include/asm/cpufeature.h
16521+++ b/arch/x86/include/asm/cpufeature.h
16522@@ -212,7 +212,7 @@
16523 #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
16524 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
16525 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
16526-
16527+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
16528
16529 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
16530 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
16531@@ -220,7 +220,7 @@
16532 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
16533 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
16534 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
16535-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
16536+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
16537 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
16538 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
16539 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
16540@@ -388,6 +388,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
16541 #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
16542 #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
16543 #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
16544+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
16545
16546 #if __GNUC__ >= 4
16547 extern void warn_pre_alternatives(void);
16548@@ -439,7 +440,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16549
16550 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
16551 t_warn:
16552- warn_pre_alternatives();
16553+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
16554+ warn_pre_alternatives();
16555 return false;
16556 #endif
16557
16558@@ -459,7 +461,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
16559 ".section .discard,\"aw\",@progbits\n"
16560 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16561 ".previous\n"
16562- ".section .altinstr_replacement,\"ax\"\n"
16563+ ".section .altinstr_replacement,\"a\"\n"
16564 "3: movb $1,%0\n"
16565 "4:\n"
16566 ".previous\n"
16567@@ -496,7 +498,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16568 " .byte 2b - 1b\n" /* src len */
16569 " .byte 4f - 3f\n" /* repl len */
16570 ".previous\n"
16571- ".section .altinstr_replacement,\"ax\"\n"
16572+ ".section .altinstr_replacement,\"a\"\n"
16573 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
16574 "4:\n"
16575 ".previous\n"
16576@@ -529,7 +531,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16577 ".section .discard,\"aw\",@progbits\n"
16578 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
16579 ".previous\n"
16580- ".section .altinstr_replacement,\"ax\"\n"
16581+ ".section .altinstr_replacement,\"a\"\n"
16582 "3: movb $0,%0\n"
16583 "4:\n"
16584 ".previous\n"
16585@@ -543,7 +545,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
16586 ".section .discard,\"aw\",@progbits\n"
16587 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
16588 ".previous\n"
16589- ".section .altinstr_replacement,\"ax\"\n"
16590+ ".section .altinstr_replacement,\"a\"\n"
16591 "5: movb $1,%0\n"
16592 "6:\n"
16593 ".previous\n"
16594diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
16595index a94b82e..59ecefa 100644
16596--- a/arch/x86/include/asm/desc.h
16597+++ b/arch/x86/include/asm/desc.h
16598@@ -4,6 +4,7 @@
16599 #include <asm/desc_defs.h>
16600 #include <asm/ldt.h>
16601 #include <asm/mmu.h>
16602+#include <asm/pgtable.h>
16603
16604 #include <linux/smp.h>
16605 #include <linux/percpu.h>
16606@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16607
16608 desc->type = (info->read_exec_only ^ 1) << 1;
16609 desc->type |= info->contents << 2;
16610+ desc->type |= info->seg_not_present ^ 1;
16611
16612 desc->s = 1;
16613 desc->dpl = 0x3;
16614@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
16615 }
16616
16617 extern struct desc_ptr idt_descr;
16618-extern gate_desc idt_table[];
16619-extern struct desc_ptr debug_idt_descr;
16620-extern gate_desc debug_idt_table[];
16621-
16622-struct gdt_page {
16623- struct desc_struct gdt[GDT_ENTRIES];
16624-} __attribute__((aligned(PAGE_SIZE)));
16625-
16626-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
16627+extern gate_desc idt_table[IDT_ENTRIES];
16628+extern const struct desc_ptr debug_idt_descr;
16629+extern gate_desc debug_idt_table[IDT_ENTRIES];
16630
16631+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
16632 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
16633 {
16634- return per_cpu(gdt_page, cpu).gdt;
16635+ return cpu_gdt_table[cpu];
16636 }
16637
16638 #ifdef CONFIG_X86_64
16639@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
16640 unsigned long base, unsigned dpl, unsigned flags,
16641 unsigned short seg)
16642 {
16643- gate->a = (seg << 16) | (base & 0xffff);
16644- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
16645+ gate->gate.offset_low = base;
16646+ gate->gate.seg = seg;
16647+ gate->gate.reserved = 0;
16648+ gate->gate.type = type;
16649+ gate->gate.s = 0;
16650+ gate->gate.dpl = dpl;
16651+ gate->gate.p = 1;
16652+ gate->gate.offset_high = base >> 16;
16653 }
16654
16655 #endif
16656@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
16657
16658 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
16659 {
16660+ pax_open_kernel();
16661 memcpy(&idt[entry], gate, sizeof(*gate));
16662+ pax_close_kernel();
16663 }
16664
16665 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
16666 {
16667+ pax_open_kernel();
16668 memcpy(&ldt[entry], desc, 8);
16669+ pax_close_kernel();
16670 }
16671
16672 static inline void
16673@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
16674 default: size = sizeof(*gdt); break;
16675 }
16676
16677+ pax_open_kernel();
16678 memcpy(&gdt[entry], desc, size);
16679+ pax_close_kernel();
16680 }
16681
16682 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
16683@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
16684
16685 static inline void native_load_tr_desc(void)
16686 {
16687+ pax_open_kernel();
16688 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
16689+ pax_close_kernel();
16690 }
16691
16692 static inline void native_load_gdt(const struct desc_ptr *dtr)
16693@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
16694 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
16695 unsigned int i;
16696
16697+ pax_open_kernel();
16698 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
16699 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
16700+ pax_close_kernel();
16701 }
16702
16703 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
16704@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
16705 preempt_enable();
16706 }
16707
16708-static inline unsigned long get_desc_base(const struct desc_struct *desc)
16709+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
16710 {
16711 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
16712 }
16713@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
16714 }
16715
16716 #ifdef CONFIG_X86_64
16717-static inline void set_nmi_gate(int gate, void *addr)
16718+static inline void set_nmi_gate(int gate, const void *addr)
16719 {
16720 gate_desc s;
16721
16722@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
16723 #endif
16724
16725 #ifdef CONFIG_TRACING
16726-extern struct desc_ptr trace_idt_descr;
16727-extern gate_desc trace_idt_table[];
16728+extern const struct desc_ptr trace_idt_descr;
16729+extern gate_desc trace_idt_table[IDT_ENTRIES];
16730 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16731 {
16732 write_idt_entry(trace_idt_table, entry, gate);
16733 }
16734
16735-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
16736+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
16737 unsigned dpl, unsigned ist, unsigned seg)
16738 {
16739 gate_desc s;
16740@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
16741 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
16742 #endif
16743
16744-static inline void _set_gate(int gate, unsigned type, void *addr,
16745+static inline void _set_gate(int gate, unsigned type, const void *addr,
16746 unsigned dpl, unsigned ist, unsigned seg)
16747 {
16748 gate_desc s;
16749@@ -379,9 +392,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
16750 #define set_intr_gate(n, addr) \
16751 do { \
16752 BUG_ON((unsigned)n > 0xFF); \
16753- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
16754+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
16755 __KERNEL_CS); \
16756- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
16757+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
16758 0, 0, __KERNEL_CS); \
16759 } while (0)
16760
16761@@ -409,19 +422,19 @@ static inline void alloc_system_vector(int vector)
16762 /*
16763 * This routine sets up an interrupt gate at directory privilege level 3.
16764 */
16765-static inline void set_system_intr_gate(unsigned int n, void *addr)
16766+static inline void set_system_intr_gate(unsigned int n, const void *addr)
16767 {
16768 BUG_ON((unsigned)n > 0xFF);
16769 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
16770 }
16771
16772-static inline void set_system_trap_gate(unsigned int n, void *addr)
16773+static inline void set_system_trap_gate(unsigned int n, const void *addr)
16774 {
16775 BUG_ON((unsigned)n > 0xFF);
16776 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
16777 }
16778
16779-static inline void set_trap_gate(unsigned int n, void *addr)
16780+static inline void set_trap_gate(unsigned int n, const void *addr)
16781 {
16782 BUG_ON((unsigned)n > 0xFF);
16783 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
16784@@ -430,16 +443,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
16785 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
16786 {
16787 BUG_ON((unsigned)n > 0xFF);
16788- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
16789+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
16790 }
16791
16792-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
16793+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
16794 {
16795 BUG_ON((unsigned)n > 0xFF);
16796 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
16797 }
16798
16799-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
16800+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
16801 {
16802 BUG_ON((unsigned)n > 0xFF);
16803 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
16804@@ -511,4 +524,17 @@ static inline void load_current_idt(void)
16805 else
16806 load_idt((const struct desc_ptr *)&idt_descr);
16807 }
16808+
16809+#ifdef CONFIG_X86_32
16810+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
16811+{
16812+ struct desc_struct d;
16813+
16814+ if (likely(limit))
16815+ limit = (limit - 1UL) >> PAGE_SHIFT;
16816+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
16817+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
16818+}
16819+#endif
16820+
16821 #endif /* _ASM_X86_DESC_H */
16822diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
16823index 278441f..b95a174 100644
16824--- a/arch/x86/include/asm/desc_defs.h
16825+++ b/arch/x86/include/asm/desc_defs.h
16826@@ -31,6 +31,12 @@ struct desc_struct {
16827 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
16828 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
16829 };
16830+ struct {
16831+ u16 offset_low;
16832+ u16 seg;
16833+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
16834+ unsigned offset_high: 16;
16835+ } gate;
16836 };
16837 } __attribute__((packed));
16838
16839diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
16840index ced283a..ffe04cc 100644
16841--- a/arch/x86/include/asm/div64.h
16842+++ b/arch/x86/include/asm/div64.h
16843@@ -39,7 +39,7 @@
16844 __mod; \
16845 })
16846
16847-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16848+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
16849 {
16850 union {
16851 u64 v64;
16852diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
16853index ca3347a..1a5082a 100644
16854--- a/arch/x86/include/asm/elf.h
16855+++ b/arch/x86/include/asm/elf.h
16856@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
16857
16858 #include <asm/vdso.h>
16859
16860-#ifdef CONFIG_X86_64
16861-extern unsigned int vdso64_enabled;
16862-#endif
16863 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
16864 extern unsigned int vdso32_enabled;
16865 #endif
16866@@ -249,7 +246,25 @@ extern int force_personality32;
16867 the loader. We need to make sure that it is out of the way of the program
16868 that it will "exec", and that there is sufficient room for the brk. */
16869
16870+#ifdef CONFIG_PAX_SEGMEXEC
16871+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
16872+#else
16873 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
16874+#endif
16875+
16876+#ifdef CONFIG_PAX_ASLR
16877+#ifdef CONFIG_X86_32
16878+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
16879+
16880+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16881+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
16882+#else
16883+#define PAX_ELF_ET_DYN_BASE 0x400000UL
16884+
16885+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16886+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
16887+#endif
16888+#endif
16889
16890 /* This yields a mask that user programs can use to figure out what
16891 instruction set this CPU supports. This could be done in user space,
16892@@ -298,17 +313,13 @@ do { \
16893
16894 #define ARCH_DLINFO \
16895 do { \
16896- if (vdso64_enabled) \
16897- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16898- (unsigned long __force)current->mm->context.vdso); \
16899+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16900 } while (0)
16901
16902 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
16903 #define ARCH_DLINFO_X32 \
16904 do { \
16905- if (vdso64_enabled) \
16906- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
16907- (unsigned long __force)current->mm->context.vdso); \
16908+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
16909 } while (0)
16910
16911 #define AT_SYSINFO 32
16912@@ -323,10 +334,10 @@ else \
16913
16914 #endif /* !CONFIG_X86_32 */
16915
16916-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
16917+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
16918
16919 #define VDSO_ENTRY \
16920- ((unsigned long)current->mm->context.vdso + \
16921+ (current->mm->context.vdso + \
16922 selected_vdso32->sym___kernel_vsyscall)
16923
16924 struct linux_binprm;
16925@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
16926 int uses_interp);
16927 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
16928
16929-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
16930-#define arch_randomize_brk arch_randomize_brk
16931-
16932 /*
16933 * True on X86_32 or when emulating IA32 on X86_64
16934 */
16935diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
16936index 77a99ac..39ff7f5 100644
16937--- a/arch/x86/include/asm/emergency-restart.h
16938+++ b/arch/x86/include/asm/emergency-restart.h
16939@@ -1,6 +1,6 @@
16940 #ifndef _ASM_X86_EMERGENCY_RESTART_H
16941 #define _ASM_X86_EMERGENCY_RESTART_H
16942
16943-extern void machine_emergency_restart(void);
16944+extern void machine_emergency_restart(void) __noreturn;
16945
16946 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
16947diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
16948index 1c7eefe..d0e4702 100644
16949--- a/arch/x86/include/asm/floppy.h
16950+++ b/arch/x86/include/asm/floppy.h
16951@@ -229,18 +229,18 @@ static struct fd_routine_l {
16952 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
16953 } fd_routine[] = {
16954 {
16955- request_dma,
16956- free_dma,
16957- get_dma_residue,
16958- dma_mem_alloc,
16959- hard_dma_setup
16960+ ._request_dma = request_dma,
16961+ ._free_dma = free_dma,
16962+ ._get_dma_residue = get_dma_residue,
16963+ ._dma_mem_alloc = dma_mem_alloc,
16964+ ._dma_setup = hard_dma_setup
16965 },
16966 {
16967- vdma_request_dma,
16968- vdma_nop,
16969- vdma_get_dma_residue,
16970- vdma_mem_alloc,
16971- vdma_dma_setup
16972+ ._request_dma = vdma_request_dma,
16973+ ._free_dma = vdma_nop,
16974+ ._get_dma_residue = vdma_get_dma_residue,
16975+ ._dma_mem_alloc = vdma_mem_alloc,
16976+ ._dma_setup = vdma_dma_setup
16977 }
16978 };
16979
16980diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
16981index f895358..800c60d 100644
16982--- a/arch/x86/include/asm/fpu-internal.h
16983+++ b/arch/x86/include/asm/fpu-internal.h
16984@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16985 #define user_insn(insn, output, input...) \
16986 ({ \
16987 int err; \
16988+ pax_open_userland(); \
16989 asm volatile(ASM_STAC "\n" \
16990- "1:" #insn "\n\t" \
16991+ "1:" \
16992+ __copyuser_seg \
16993+ #insn "\n\t" \
16994 "2: " ASM_CLAC "\n" \
16995 ".section .fixup,\"ax\"\n" \
16996 "3: movl $-1,%[err]\n" \
16997@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16998 _ASM_EXTABLE(1b, 3b) \
16999 : [err] "=r" (err), output \
17000 : "0"(0), input); \
17001+ pax_close_userland(); \
17002 err; \
17003 })
17004
17005@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17006 "fnclex\n\t"
17007 "emms\n\t"
17008 "fildl %P[addr]" /* set F?P to defined value */
17009- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17010+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17011 }
17012
17013 return fpu_restore_checking(&tsk->thread.fpu);
17014diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17015index b4c1f54..e290c08 100644
17016--- a/arch/x86/include/asm/futex.h
17017+++ b/arch/x86/include/asm/futex.h
17018@@ -12,6 +12,7 @@
17019 #include <asm/smap.h>
17020
17021 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17022+ typecheck(u32 __user *, uaddr); \
17023 asm volatile("\t" ASM_STAC "\n" \
17024 "1:\t" insn "\n" \
17025 "2:\t" ASM_CLAC "\n" \
17026@@ -20,15 +21,16 @@
17027 "\tjmp\t2b\n" \
17028 "\t.previous\n" \
17029 _ASM_EXTABLE(1b, 3b) \
17030- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17031+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17032 : "i" (-EFAULT), "0" (oparg), "1" (0))
17033
17034 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17035+ typecheck(u32 __user *, uaddr); \
17036 asm volatile("\t" ASM_STAC "\n" \
17037 "1:\tmovl %2, %0\n" \
17038 "\tmovl\t%0, %3\n" \
17039 "\t" insn "\n" \
17040- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17041+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17042 "\tjnz\t1b\n" \
17043 "3:\t" ASM_CLAC "\n" \
17044 "\t.section .fixup,\"ax\"\n" \
17045@@ -38,7 +40,7 @@
17046 _ASM_EXTABLE(1b, 4b) \
17047 _ASM_EXTABLE(2b, 4b) \
17048 : "=&a" (oldval), "=&r" (ret), \
17049- "+m" (*uaddr), "=&r" (tem) \
17050+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17051 : "r" (oparg), "i" (-EFAULT), "1" (0))
17052
17053 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17054@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17055
17056 pagefault_disable();
17057
17058+ pax_open_userland();
17059 switch (op) {
17060 case FUTEX_OP_SET:
17061- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17062+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17063 break;
17064 case FUTEX_OP_ADD:
17065- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17066+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17067 uaddr, oparg);
17068 break;
17069 case FUTEX_OP_OR:
17070@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17071 default:
17072 ret = -ENOSYS;
17073 }
17074+ pax_close_userland();
17075
17076 pagefault_enable();
17077
17078diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17079index 9662290..49ca5e5 100644
17080--- a/arch/x86/include/asm/hw_irq.h
17081+++ b/arch/x86/include/asm/hw_irq.h
17082@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
17083 #endif /* CONFIG_X86_LOCAL_APIC */
17084
17085 /* Statistics */
17086-extern atomic_t irq_err_count;
17087-extern atomic_t irq_mis_count;
17088+extern atomic_unchecked_t irq_err_count;
17089+extern atomic_unchecked_t irq_mis_count;
17090
17091 /* EISA */
17092 extern void eisa_set_level_irq(unsigned int irq);
17093diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17094index ccffa53..3c90c87 100644
17095--- a/arch/x86/include/asm/i8259.h
17096+++ b/arch/x86/include/asm/i8259.h
17097@@ -62,7 +62,7 @@ struct legacy_pic {
17098 void (*init)(int auto_eoi);
17099 int (*irq_pending)(unsigned int irq);
17100 void (*make_irq)(unsigned int irq);
17101-};
17102+} __do_const;
17103
17104 extern struct legacy_pic *legacy_pic;
17105 extern struct legacy_pic null_legacy_pic;
17106diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17107index 34a5b93..27e40a6 100644
17108--- a/arch/x86/include/asm/io.h
17109+++ b/arch/x86/include/asm/io.h
17110@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17111 "m" (*(volatile type __force *)addr) barrier); }
17112
17113 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17114-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17115-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17116+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17117+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17118
17119 build_mmio_read(__readb, "b", unsigned char, "=q", )
17120-build_mmio_read(__readw, "w", unsigned short, "=r", )
17121-build_mmio_read(__readl, "l", unsigned int, "=r", )
17122+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17123+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17124
17125 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17126 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17127@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17128 * this function
17129 */
17130
17131-static inline phys_addr_t virt_to_phys(volatile void *address)
17132+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17133 {
17134 return __pa(address);
17135 }
17136@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17137 return ioremap_nocache(offset, size);
17138 }
17139
17140-extern void iounmap(volatile void __iomem *addr);
17141+extern void iounmap(const volatile void __iomem *addr);
17142
17143 extern void set_iounmap_nonlazy(void);
17144
17145@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
17146
17147 #include <linux/vmalloc.h>
17148
17149+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17150+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17151+{
17152+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17153+}
17154+
17155+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17156+{
17157+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17158+}
17159+
17160 /*
17161 * Convert a virtual cached pointer to an uncached pointer
17162 */
17163diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17164index 0a8b519..80e7d5b 100644
17165--- a/arch/x86/include/asm/irqflags.h
17166+++ b/arch/x86/include/asm/irqflags.h
17167@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17168 sti; \
17169 sysexit
17170
17171+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17172+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17173+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17174+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17175+
17176 #else
17177 #define INTERRUPT_RETURN iret
17178 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17179diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17180index 4421b5d..8543006 100644
17181--- a/arch/x86/include/asm/kprobes.h
17182+++ b/arch/x86/include/asm/kprobes.h
17183@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
17184 #define RELATIVEJUMP_SIZE 5
17185 #define RELATIVECALL_OPCODE 0xe8
17186 #define RELATIVE_ADDR_SIZE 4
17187-#define MAX_STACK_SIZE 64
17188-#define MIN_STACK_SIZE(ADDR) \
17189- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17190- THREAD_SIZE - (unsigned long)(ADDR))) \
17191- ? (MAX_STACK_SIZE) \
17192- : (((unsigned long)current_thread_info()) + \
17193- THREAD_SIZE - (unsigned long)(ADDR)))
17194+#define MAX_STACK_SIZE 64UL
17195+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17196
17197 #define flush_insn_slot(p) do { } while (0)
17198
17199diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
17200index d89c6b8..e711c69 100644
17201--- a/arch/x86/include/asm/kvm_host.h
17202+++ b/arch/x86/include/asm/kvm_host.h
17203@@ -51,7 +51,7 @@
17204 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
17205
17206 #define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
17207-#define CR3_PCID_INVD (1UL << 63)
17208+#define CR3_PCID_INVD (1ULL << 63)
17209 #define CR4_RESERVED_BITS \
17210 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
17211 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
17212diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17213index 4ad6560..75c7bdd 100644
17214--- a/arch/x86/include/asm/local.h
17215+++ b/arch/x86/include/asm/local.h
17216@@ -10,33 +10,97 @@ typedef struct {
17217 atomic_long_t a;
17218 } local_t;
17219
17220+typedef struct {
17221+ atomic_long_unchecked_t a;
17222+} local_unchecked_t;
17223+
17224 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17225
17226 #define local_read(l) atomic_long_read(&(l)->a)
17227+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17228 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17229+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17230
17231 static inline void local_inc(local_t *l)
17232 {
17233- asm volatile(_ASM_INC "%0"
17234+ asm volatile(_ASM_INC "%0\n"
17235+
17236+#ifdef CONFIG_PAX_REFCOUNT
17237+ "jno 0f\n"
17238+ _ASM_DEC "%0\n"
17239+ "int $4\n0:\n"
17240+ _ASM_EXTABLE(0b, 0b)
17241+#endif
17242+
17243+ : "+m" (l->a.counter));
17244+}
17245+
17246+static inline void local_inc_unchecked(local_unchecked_t *l)
17247+{
17248+ asm volatile(_ASM_INC "%0\n"
17249 : "+m" (l->a.counter));
17250 }
17251
17252 static inline void local_dec(local_t *l)
17253 {
17254- asm volatile(_ASM_DEC "%0"
17255+ asm volatile(_ASM_DEC "%0\n"
17256+
17257+#ifdef CONFIG_PAX_REFCOUNT
17258+ "jno 0f\n"
17259+ _ASM_INC "%0\n"
17260+ "int $4\n0:\n"
17261+ _ASM_EXTABLE(0b, 0b)
17262+#endif
17263+
17264+ : "+m" (l->a.counter));
17265+}
17266+
17267+static inline void local_dec_unchecked(local_unchecked_t *l)
17268+{
17269+ asm volatile(_ASM_DEC "%0\n"
17270 : "+m" (l->a.counter));
17271 }
17272
17273 static inline void local_add(long i, local_t *l)
17274 {
17275- asm volatile(_ASM_ADD "%1,%0"
17276+ asm volatile(_ASM_ADD "%1,%0\n"
17277+
17278+#ifdef CONFIG_PAX_REFCOUNT
17279+ "jno 0f\n"
17280+ _ASM_SUB "%1,%0\n"
17281+ "int $4\n0:\n"
17282+ _ASM_EXTABLE(0b, 0b)
17283+#endif
17284+
17285+ : "+m" (l->a.counter)
17286+ : "ir" (i));
17287+}
17288+
17289+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17290+{
17291+ asm volatile(_ASM_ADD "%1,%0\n"
17292 : "+m" (l->a.counter)
17293 : "ir" (i));
17294 }
17295
17296 static inline void local_sub(long i, local_t *l)
17297 {
17298- asm volatile(_ASM_SUB "%1,%0"
17299+ asm volatile(_ASM_SUB "%1,%0\n"
17300+
17301+#ifdef CONFIG_PAX_REFCOUNT
17302+ "jno 0f\n"
17303+ _ASM_ADD "%1,%0\n"
17304+ "int $4\n0:\n"
17305+ _ASM_EXTABLE(0b, 0b)
17306+#endif
17307+
17308+ : "+m" (l->a.counter)
17309+ : "ir" (i));
17310+}
17311+
17312+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17313+{
17314+ asm volatile(_ASM_SUB "%1,%0\n"
17315 : "+m" (l->a.counter)
17316 : "ir" (i));
17317 }
17318@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17319 */
17320 static inline int local_sub_and_test(long i, local_t *l)
17321 {
17322- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17323+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17324 }
17325
17326 /**
17327@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17328 */
17329 static inline int local_dec_and_test(local_t *l)
17330 {
17331- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17332+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17333 }
17334
17335 /**
17336@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17337 */
17338 static inline int local_inc_and_test(local_t *l)
17339 {
17340- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17341+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17342 }
17343
17344 /**
17345@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17346 */
17347 static inline int local_add_negative(long i, local_t *l)
17348 {
17349- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17350+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17351 }
17352
17353 /**
17354@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17355 static inline long local_add_return(long i, local_t *l)
17356 {
17357 long __i = i;
17358+ asm volatile(_ASM_XADD "%0, %1\n"
17359+
17360+#ifdef CONFIG_PAX_REFCOUNT
17361+ "jno 0f\n"
17362+ _ASM_MOV "%0,%1\n"
17363+ "int $4\n0:\n"
17364+ _ASM_EXTABLE(0b, 0b)
17365+#endif
17366+
17367+ : "+r" (i), "+m" (l->a.counter)
17368+ : : "memory");
17369+ return i + __i;
17370+}
17371+
17372+/**
17373+ * local_add_return_unchecked - add and return
17374+ * @i: integer value to add
17375+ * @l: pointer to type local_unchecked_t
17376+ *
17377+ * Atomically adds @i to @l and returns @i + @l
17378+ */
17379+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
17380+{
17381+ long __i = i;
17382 asm volatile(_ASM_XADD "%0, %1;"
17383 : "+r" (i), "+m" (l->a.counter)
17384 : : "memory");
17385@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
17386
17387 #define local_cmpxchg(l, o, n) \
17388 (cmpxchg_local(&((l)->a.counter), (o), (n)))
17389+#define local_cmpxchg_unchecked(l, o, n) \
17390+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
17391 /* Always has a lock prefix */
17392 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
17393
17394diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
17395new file mode 100644
17396index 0000000..2bfd3ba
17397--- /dev/null
17398+++ b/arch/x86/include/asm/mman.h
17399@@ -0,0 +1,15 @@
17400+#ifndef _X86_MMAN_H
17401+#define _X86_MMAN_H
17402+
17403+#include <uapi/asm/mman.h>
17404+
17405+#ifdef __KERNEL__
17406+#ifndef __ASSEMBLY__
17407+#ifdef CONFIG_X86_32
17408+#define arch_mmap_check i386_mmap_check
17409+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
17410+#endif
17411+#endif
17412+#endif
17413+
17414+#endif /* X86_MMAN_H */
17415diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
17416index 876e74e..e20bfb1 100644
17417--- a/arch/x86/include/asm/mmu.h
17418+++ b/arch/x86/include/asm/mmu.h
17419@@ -9,7 +9,7 @@
17420 * we put the segment information here.
17421 */
17422 typedef struct {
17423- void *ldt;
17424+ struct desc_struct *ldt;
17425 int size;
17426
17427 #ifdef CONFIG_X86_64
17428@@ -18,7 +18,19 @@ typedef struct {
17429 #endif
17430
17431 struct mutex lock;
17432- void __user *vdso;
17433+ unsigned long vdso;
17434+
17435+#ifdef CONFIG_X86_32
17436+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17437+ unsigned long user_cs_base;
17438+ unsigned long user_cs_limit;
17439+
17440+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17441+ cpumask_t cpu_user_cs_mask;
17442+#endif
17443+
17444+#endif
17445+#endif
17446 } mm_context_t;
17447
17448 #ifdef CONFIG_SMP
17449diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
17450index 4b75d59..8ffacb6 100644
17451--- a/arch/x86/include/asm/mmu_context.h
17452+++ b/arch/x86/include/asm/mmu_context.h
17453@@ -27,6 +27,20 @@ void destroy_context(struct mm_struct *mm);
17454
17455 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17456 {
17457+
17458+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17459+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
17460+ unsigned int i;
17461+ pgd_t *pgd;
17462+
17463+ pax_open_kernel();
17464+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
17465+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
17466+ set_pgd_batched(pgd+i, native_make_pgd(0));
17467+ pax_close_kernel();
17468+ }
17469+#endif
17470+
17471 #ifdef CONFIG_SMP
17472 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
17473 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
17474@@ -37,16 +51,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17475 struct task_struct *tsk)
17476 {
17477 unsigned cpu = smp_processor_id();
17478+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17479+ int tlbstate = TLBSTATE_OK;
17480+#endif
17481
17482 if (likely(prev != next)) {
17483 #ifdef CONFIG_SMP
17484+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17485+ tlbstate = this_cpu_read(cpu_tlbstate.state);
17486+#endif
17487 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17488 this_cpu_write(cpu_tlbstate.active_mm, next);
17489 #endif
17490 cpumask_set_cpu(cpu, mm_cpumask(next));
17491
17492 /* Re-load page tables */
17493+#ifdef CONFIG_PAX_PER_CPU_PGD
17494+ pax_open_kernel();
17495+
17496+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17497+ if (static_cpu_has(X86_FEATURE_PCID))
17498+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17499+ else
17500+#endif
17501+
17502+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17503+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17504+ pax_close_kernel();
17505+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17506+
17507+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17508+ if (static_cpu_has(X86_FEATURE_PCID)) {
17509+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17510+ u64 descriptor[2];
17511+ descriptor[0] = PCID_USER;
17512+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17513+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17514+ descriptor[0] = PCID_KERNEL;
17515+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17516+ }
17517+ } else {
17518+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17519+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17520+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17521+ else
17522+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17523+ }
17524+ } else
17525+#endif
17526+
17527+ load_cr3(get_cpu_pgd(cpu, kernel));
17528+#else
17529 load_cr3(next->pgd);
17530+#endif
17531 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17532
17533 /* Stop flush ipis for the previous mm */
17534@@ -64,9 +121,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17535 */
17536 if (unlikely(prev->context.ldt != next->context.ldt))
17537 load_LDT_nolock(&next->context);
17538+
17539+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17540+ if (!(__supported_pte_mask & _PAGE_NX)) {
17541+ smp_mb__before_atomic();
17542+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
17543+ smp_mb__after_atomic();
17544+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17545+ }
17546+#endif
17547+
17548+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17549+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
17550+ prev->context.user_cs_limit != next->context.user_cs_limit))
17551+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17552+#ifdef CONFIG_SMP
17553+ else if (unlikely(tlbstate != TLBSTATE_OK))
17554+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17555+#endif
17556+#endif
17557+
17558 }
17559+ else {
17560+
17561+#ifdef CONFIG_PAX_PER_CPU_PGD
17562+ pax_open_kernel();
17563+
17564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17565+ if (static_cpu_has(X86_FEATURE_PCID))
17566+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
17567+ else
17568+#endif
17569+
17570+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
17571+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
17572+ pax_close_kernel();
17573+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
17574+
17575+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17576+ if (static_cpu_has(X86_FEATURE_PCID)) {
17577+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
17578+ u64 descriptor[2];
17579+ descriptor[0] = PCID_USER;
17580+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17581+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
17582+ descriptor[0] = PCID_KERNEL;
17583+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
17584+ }
17585+ } else {
17586+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
17587+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
17588+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
17589+ else
17590+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
17591+ }
17592+ } else
17593+#endif
17594+
17595+ load_cr3(get_cpu_pgd(cpu, kernel));
17596+#endif
17597+
17598 #ifdef CONFIG_SMP
17599- else {
17600 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
17601 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
17602
17603@@ -83,12 +198,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
17604 * tlb flush IPI delivery. We must reload CR3
17605 * to make sure to use no freed page tables.
17606 */
17607+
17608+#ifndef CONFIG_PAX_PER_CPU_PGD
17609 load_cr3(next->pgd);
17610 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
17611+#endif
17612+
17613 load_LDT_nolock(&next->context);
17614+
17615+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17616+ if (!(__supported_pte_mask & _PAGE_NX))
17617+ cpu_set(cpu, next->context.cpu_user_cs_mask);
17618+#endif
17619+
17620+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
17621+#ifdef CONFIG_PAX_PAGEEXEC
17622+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
17623+#endif
17624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
17625+#endif
17626+
17627 }
17628+#endif
17629 }
17630-#endif
17631 }
17632
17633 #define activate_mm(prev, next) \
17634diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
17635index e3b7819..b257c64 100644
17636--- a/arch/x86/include/asm/module.h
17637+++ b/arch/x86/include/asm/module.h
17638@@ -5,6 +5,7 @@
17639
17640 #ifdef CONFIG_X86_64
17641 /* X86_64 does not define MODULE_PROC_FAMILY */
17642+#define MODULE_PROC_FAMILY ""
17643 #elif defined CONFIG_M486
17644 #define MODULE_PROC_FAMILY "486 "
17645 #elif defined CONFIG_M586
17646@@ -57,8 +58,20 @@
17647 #error unknown processor family
17648 #endif
17649
17650-#ifdef CONFIG_X86_32
17651-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
17652+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
17653+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
17654+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
17655+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
17656+#else
17657+#define MODULE_PAX_KERNEXEC ""
17658 #endif
17659
17660+#ifdef CONFIG_PAX_MEMORY_UDEREF
17661+#define MODULE_PAX_UDEREF "UDEREF "
17662+#else
17663+#define MODULE_PAX_UDEREF ""
17664+#endif
17665+
17666+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
17667+
17668 #endif /* _ASM_X86_MODULE_H */
17669diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
17670index 5f2fc44..106caa6 100644
17671--- a/arch/x86/include/asm/nmi.h
17672+++ b/arch/x86/include/asm/nmi.h
17673@@ -36,26 +36,35 @@ enum {
17674
17675 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
17676
17677+struct nmiaction;
17678+
17679+struct nmiwork {
17680+ const struct nmiaction *action;
17681+ u64 max_duration;
17682+ struct irq_work irq_work;
17683+};
17684+
17685 struct nmiaction {
17686 struct list_head list;
17687 nmi_handler_t handler;
17688- u64 max_duration;
17689- struct irq_work irq_work;
17690 unsigned long flags;
17691 const char *name;
17692-};
17693+ struct nmiwork *work;
17694+} __do_const;
17695
17696 #define register_nmi_handler(t, fn, fg, n, init...) \
17697 ({ \
17698- static struct nmiaction init fn##_na = { \
17699+ static struct nmiwork fn##_nw; \
17700+ static const struct nmiaction init fn##_na = { \
17701 .handler = (fn), \
17702 .name = (n), \
17703 .flags = (fg), \
17704+ .work = &fn##_nw, \
17705 }; \
17706 __register_nmi_handler((t), &fn##_na); \
17707 })
17708
17709-int __register_nmi_handler(unsigned int, struct nmiaction *);
17710+int __register_nmi_handler(unsigned int, const struct nmiaction *);
17711
17712 void unregister_nmi_handler(unsigned int, const char *);
17713
17714diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
17715index 802dde3..9183e68 100644
17716--- a/arch/x86/include/asm/page.h
17717+++ b/arch/x86/include/asm/page.h
17718@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17719 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
17720
17721 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
17722+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
17723
17724 #define __boot_va(x) __va(x)
17725 #define __boot_pa(x) __pa(x)
17726@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
17727 * virt_to_page(kaddr) returns a valid pointer if and only if
17728 * virt_addr_valid(kaddr) returns true.
17729 */
17730-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17731 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
17732 extern bool __virt_addr_valid(unsigned long kaddr);
17733 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
17734
17735+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
17736+#define virt_to_page(kaddr) \
17737+ ({ \
17738+ const void *__kaddr = (const void *)(kaddr); \
17739+ BUG_ON(!virt_addr_valid(__kaddr)); \
17740+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
17741+ })
17742+#else
17743+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
17744+#endif
17745+
17746 #endif /* __ASSEMBLY__ */
17747
17748 #include <asm-generic/memory_model.h>
17749diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
17750index b3bebf9..13ac22e 100644
17751--- a/arch/x86/include/asm/page_64.h
17752+++ b/arch/x86/include/asm/page_64.h
17753@@ -7,9 +7,9 @@
17754
17755 /* duplicated to the one in bootmem.h */
17756 extern unsigned long max_pfn;
17757-extern unsigned long phys_base;
17758+extern const unsigned long phys_base;
17759
17760-static inline unsigned long __phys_addr_nodebug(unsigned long x)
17761+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
17762 {
17763 unsigned long y = x - __START_KERNEL_map;
17764
17765@@ -20,8 +20,8 @@ static inline unsigned long __phys_addr_nodebug(unsigned long x)
17766 }
17767
17768 #ifdef CONFIG_DEBUG_VIRTUAL
17769-extern unsigned long __phys_addr(unsigned long);
17770-extern unsigned long __phys_addr_symbol(unsigned long);
17771+extern unsigned long __intentional_overflow(-1) __phys_addr(unsigned long);
17772+extern unsigned long __intentional_overflow(-1) __phys_addr_symbol(unsigned long);
17773 #else
17774 #define __phys_addr(x) __phys_addr_nodebug(x)
17775 #define __phys_addr_symbol(x) \
17776diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
17777index 32444ae..1a1624b 100644
17778--- a/arch/x86/include/asm/paravirt.h
17779+++ b/arch/x86/include/asm/paravirt.h
17780@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
17781 return (pmd_t) { ret };
17782 }
17783
17784-static inline pmdval_t pmd_val(pmd_t pmd)
17785+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
17786 {
17787 pmdval_t ret;
17788
17789@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
17790 val);
17791 }
17792
17793+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17794+{
17795+ pgdval_t val = native_pgd_val(pgd);
17796+
17797+ if (sizeof(pgdval_t) > sizeof(long))
17798+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
17799+ val, (u64)val >> 32);
17800+ else
17801+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
17802+ val);
17803+}
17804+
17805 static inline void pgd_clear(pgd_t *pgdp)
17806 {
17807 set_pgd(pgdp, __pgd(0));
17808@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
17809 pv_mmu_ops.set_fixmap(idx, phys, flags);
17810 }
17811
17812+#ifdef CONFIG_PAX_KERNEXEC
17813+static inline unsigned long pax_open_kernel(void)
17814+{
17815+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
17816+}
17817+
17818+static inline unsigned long pax_close_kernel(void)
17819+{
17820+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
17821+}
17822+#else
17823+static inline unsigned long pax_open_kernel(void) { return 0; }
17824+static inline unsigned long pax_close_kernel(void) { return 0; }
17825+#endif
17826+
17827 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
17828
17829 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
17830@@ -906,7 +933,7 @@ extern void default_banner(void);
17831
17832 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
17833 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
17834-#define PARA_INDIRECT(addr) *%cs:addr
17835+#define PARA_INDIRECT(addr) *%ss:addr
17836 #endif
17837
17838 #define INTERRUPT_RETURN \
17839@@ -981,6 +1008,21 @@ extern void default_banner(void);
17840 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
17841 CLBR_NONE, \
17842 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
17843+
17844+#define GET_CR0_INTO_RDI \
17845+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
17846+ mov %rax,%rdi
17847+
17848+#define SET_RDI_INTO_CR0 \
17849+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17850+
17851+#define GET_CR3_INTO_RDI \
17852+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
17853+ mov %rax,%rdi
17854+
17855+#define SET_RDI_INTO_CR3 \
17856+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
17857+
17858 #endif /* CONFIG_X86_32 */
17859
17860 #endif /* __ASSEMBLY__ */
17861diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
17862index 7549b8b..f0edfda 100644
17863--- a/arch/x86/include/asm/paravirt_types.h
17864+++ b/arch/x86/include/asm/paravirt_types.h
17865@@ -84,7 +84,7 @@ struct pv_init_ops {
17866 */
17867 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
17868 unsigned long addr, unsigned len);
17869-};
17870+} __no_const __no_randomize_layout;
17871
17872
17873 struct pv_lazy_ops {
17874@@ -92,13 +92,13 @@ struct pv_lazy_ops {
17875 void (*enter)(void);
17876 void (*leave)(void);
17877 void (*flush)(void);
17878-};
17879+} __no_randomize_layout;
17880
17881 struct pv_time_ops {
17882 unsigned long long (*sched_clock)(void);
17883 unsigned long long (*steal_clock)(int cpu);
17884 unsigned long (*get_tsc_khz)(void);
17885-};
17886+} __no_const __no_randomize_layout;
17887
17888 struct pv_cpu_ops {
17889 /* hooks for various privileged instructions */
17890@@ -192,7 +192,7 @@ struct pv_cpu_ops {
17891
17892 void (*start_context_switch)(struct task_struct *prev);
17893 void (*end_context_switch)(struct task_struct *next);
17894-};
17895+} __no_const __no_randomize_layout;
17896
17897 struct pv_irq_ops {
17898 /*
17899@@ -215,7 +215,7 @@ struct pv_irq_ops {
17900 #ifdef CONFIG_X86_64
17901 void (*adjust_exception_frame)(void);
17902 #endif
17903-};
17904+} __no_randomize_layout;
17905
17906 struct pv_apic_ops {
17907 #ifdef CONFIG_X86_LOCAL_APIC
17908@@ -223,7 +223,7 @@ struct pv_apic_ops {
17909 unsigned long start_eip,
17910 unsigned long start_esp);
17911 #endif
17912-};
17913+} __no_const __no_randomize_layout;
17914
17915 struct pv_mmu_ops {
17916 unsigned long (*read_cr2)(void);
17917@@ -313,6 +313,7 @@ struct pv_mmu_ops {
17918 struct paravirt_callee_save make_pud;
17919
17920 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
17921+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
17922 #endif /* PAGETABLE_LEVELS == 4 */
17923 #endif /* PAGETABLE_LEVELS >= 3 */
17924
17925@@ -324,7 +325,13 @@ struct pv_mmu_ops {
17926 an mfn. We can tell which is which from the index. */
17927 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
17928 phys_addr_t phys, pgprot_t flags);
17929-};
17930+
17931+#ifdef CONFIG_PAX_KERNEXEC
17932+ unsigned long (*pax_open_kernel)(void);
17933+ unsigned long (*pax_close_kernel)(void);
17934+#endif
17935+
17936+} __no_randomize_layout;
17937
17938 struct arch_spinlock;
17939 #ifdef CONFIG_SMP
17940@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
17941 struct pv_lock_ops {
17942 struct paravirt_callee_save lock_spinning;
17943 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
17944-};
17945+} __no_randomize_layout;
17946
17947 /* This contains all the paravirt structures: we get a convenient
17948 * number for each function using the offset which we use to indicate
17949- * what to patch. */
17950+ * what to patch.
17951+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
17952+ */
17953+
17954 struct paravirt_patch_template {
17955 struct pv_init_ops pv_init_ops;
17956 struct pv_time_ops pv_time_ops;
17957@@ -349,7 +359,7 @@ struct paravirt_patch_template {
17958 struct pv_apic_ops pv_apic_ops;
17959 struct pv_mmu_ops pv_mmu_ops;
17960 struct pv_lock_ops pv_lock_ops;
17961-};
17962+} __no_randomize_layout;
17963
17964 extern struct pv_info pv_info;
17965 extern struct pv_init_ops pv_init_ops;
17966diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
17967index c4412e9..90e88c5 100644
17968--- a/arch/x86/include/asm/pgalloc.h
17969+++ b/arch/x86/include/asm/pgalloc.h
17970@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
17971 pmd_t *pmd, pte_t *pte)
17972 {
17973 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17974+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
17975+}
17976+
17977+static inline void pmd_populate_user(struct mm_struct *mm,
17978+ pmd_t *pmd, pte_t *pte)
17979+{
17980+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
17981 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
17982 }
17983
17984@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
17985
17986 #ifdef CONFIG_X86_PAE
17987 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
17988+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
17989+{
17990+ pud_populate(mm, pudp, pmd);
17991+}
17992 #else /* !CONFIG_X86_PAE */
17993 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
17994 {
17995 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
17996 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
17997 }
17998+
17999+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18000+{
18001+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18002+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18003+}
18004 #endif /* CONFIG_X86_PAE */
18005
18006 #if PAGETABLE_LEVELS > 3
18007@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18008 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18009 }
18010
18011+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18012+{
18013+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18014+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18015+}
18016+
18017 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18018 {
18019 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18020diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18021index 206a87f..1623b06 100644
18022--- a/arch/x86/include/asm/pgtable-2level.h
18023+++ b/arch/x86/include/asm/pgtable-2level.h
18024@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18025
18026 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18027 {
18028+ pax_open_kernel();
18029 *pmdp = pmd;
18030+ pax_close_kernel();
18031 }
18032
18033 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18034diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18035index 81bb91b..9392125 100644
18036--- a/arch/x86/include/asm/pgtable-3level.h
18037+++ b/arch/x86/include/asm/pgtable-3level.h
18038@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18039
18040 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18041 {
18042+ pax_open_kernel();
18043 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18044+ pax_close_kernel();
18045 }
18046
18047 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18048 {
18049+ pax_open_kernel();
18050 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18051+ pax_close_kernel();
18052 }
18053
18054 /*
18055diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18056index e8a5454..1539359 100644
18057--- a/arch/x86/include/asm/pgtable.h
18058+++ b/arch/x86/include/asm/pgtable.h
18059@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18060
18061 #ifndef __PAGETABLE_PUD_FOLDED
18062 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18063+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18064 #define pgd_clear(pgd) native_pgd_clear(pgd)
18065 #endif
18066
18067@@ -84,12 +85,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18068
18069 #define arch_end_context_switch(prev) do {} while(0)
18070
18071+#define pax_open_kernel() native_pax_open_kernel()
18072+#define pax_close_kernel() native_pax_close_kernel()
18073 #endif /* CONFIG_PARAVIRT */
18074
18075+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18076+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18077+
18078+#ifdef CONFIG_PAX_KERNEXEC
18079+static inline unsigned long native_pax_open_kernel(void)
18080+{
18081+ unsigned long cr0;
18082+
18083+ preempt_disable();
18084+ barrier();
18085+ cr0 = read_cr0() ^ X86_CR0_WP;
18086+ BUG_ON(cr0 & X86_CR0_WP);
18087+ write_cr0(cr0);
18088+ barrier();
18089+ return cr0 ^ X86_CR0_WP;
18090+}
18091+
18092+static inline unsigned long native_pax_close_kernel(void)
18093+{
18094+ unsigned long cr0;
18095+
18096+ barrier();
18097+ cr0 = read_cr0() ^ X86_CR0_WP;
18098+ BUG_ON(!(cr0 & X86_CR0_WP));
18099+ write_cr0(cr0);
18100+ barrier();
18101+ preempt_enable_no_resched();
18102+ return cr0 ^ X86_CR0_WP;
18103+}
18104+#else
18105+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18106+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18107+#endif
18108+
18109 /*
18110 * The following only work if pte_present() is true.
18111 * Undefined behaviour if not..
18112 */
18113+static inline int pte_user(pte_t pte)
18114+{
18115+ return pte_val(pte) & _PAGE_USER;
18116+}
18117+
18118 static inline int pte_dirty(pte_t pte)
18119 {
18120 return pte_flags(pte) & _PAGE_DIRTY;
18121@@ -161,6 +203,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18122 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18123 }
18124
18125+static inline unsigned long pgd_pfn(pgd_t pgd)
18126+{
18127+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18128+}
18129+
18130 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18131
18132 static inline int pmd_large(pmd_t pte)
18133@@ -214,9 +261,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18134 return pte_clear_flags(pte, _PAGE_RW);
18135 }
18136
18137+static inline pte_t pte_mkread(pte_t pte)
18138+{
18139+ return __pte(pte_val(pte) | _PAGE_USER);
18140+}
18141+
18142 static inline pte_t pte_mkexec(pte_t pte)
18143 {
18144- return pte_clear_flags(pte, _PAGE_NX);
18145+#ifdef CONFIG_X86_PAE
18146+ if (__supported_pte_mask & _PAGE_NX)
18147+ return pte_clear_flags(pte, _PAGE_NX);
18148+ else
18149+#endif
18150+ return pte_set_flags(pte, _PAGE_USER);
18151+}
18152+
18153+static inline pte_t pte_exprotect(pte_t pte)
18154+{
18155+#ifdef CONFIG_X86_PAE
18156+ if (__supported_pte_mask & _PAGE_NX)
18157+ return pte_set_flags(pte, _PAGE_NX);
18158+ else
18159+#endif
18160+ return pte_clear_flags(pte, _PAGE_USER);
18161 }
18162
18163 static inline pte_t pte_mkdirty(pte_t pte)
18164@@ -446,6 +513,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18165 #endif
18166
18167 #ifndef __ASSEMBLY__
18168+
18169+#ifdef CONFIG_PAX_PER_CPU_PGD
18170+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18171+enum cpu_pgd_type {kernel = 0, user = 1};
18172+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18173+{
18174+ return cpu_pgd[cpu][type];
18175+}
18176+#endif
18177+
18178 #include <linux/mm_types.h>
18179 #include <linux/mmdebug.h>
18180 #include <linux/log2.h>
18181@@ -592,7 +669,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18182 * Currently stuck as a macro due to indirect forward reference to
18183 * linux/mmzone.h's __section_mem_map_addr() definition:
18184 */
18185-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18186+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18187
18188 /* Find an entry in the second-level page table.. */
18189 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18190@@ -632,7 +709,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18191 * Currently stuck as a macro due to indirect forward reference to
18192 * linux/mmzone.h's __section_mem_map_addr() definition:
18193 */
18194-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18195+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18196
18197 /* to find an entry in a page-table-directory. */
18198 static inline unsigned long pud_index(unsigned long address)
18199@@ -647,7 +724,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18200
18201 static inline int pgd_bad(pgd_t pgd)
18202 {
18203- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18204+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18205 }
18206
18207 static inline int pgd_none(pgd_t pgd)
18208@@ -670,7 +747,12 @@ static inline int pgd_none(pgd_t pgd)
18209 * pgd_offset() returns a (pgd_t *)
18210 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18211 */
18212-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18213+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18214+
18215+#ifdef CONFIG_PAX_PER_CPU_PGD
18216+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18217+#endif
18218+
18219 /*
18220 * a shortcut which implies the use of the kernel's pgd, instead
18221 * of a process's
18222@@ -681,6 +763,23 @@ static inline int pgd_none(pgd_t pgd)
18223 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18224 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18225
18226+#ifdef CONFIG_X86_32
18227+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18228+#else
18229+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18230+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18231+
18232+#ifdef CONFIG_PAX_MEMORY_UDEREF
18233+#ifdef __ASSEMBLY__
18234+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18235+#else
18236+extern unsigned long pax_user_shadow_base;
18237+extern pgdval_t clone_pgd_mask;
18238+#endif
18239+#endif
18240+
18241+#endif
18242+
18243 #ifndef __ASSEMBLY__
18244
18245 extern int direct_gbpages;
18246@@ -847,11 +946,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18247 * dst and src can be on the same page, but the range must not overlap,
18248 * and must not cross a page boundary.
18249 */
18250-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18251+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18252 {
18253- memcpy(dst, src, count * sizeof(pgd_t));
18254+ pax_open_kernel();
18255+ while (count--)
18256+ *dst++ = *src++;
18257+ pax_close_kernel();
18258 }
18259
18260+#ifdef CONFIG_PAX_PER_CPU_PGD
18261+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18262+#endif
18263+
18264+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18265+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18266+#else
18267+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18268+#endif
18269+
18270 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18271 static inline int page_level_shift(enum pg_level level)
18272 {
18273diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18274index b6c0b40..3535d47 100644
18275--- a/arch/x86/include/asm/pgtable_32.h
18276+++ b/arch/x86/include/asm/pgtable_32.h
18277@@ -25,9 +25,6 @@
18278 struct mm_struct;
18279 struct vm_area_struct;
18280
18281-extern pgd_t swapper_pg_dir[1024];
18282-extern pgd_t initial_page_table[1024];
18283-
18284 static inline void pgtable_cache_init(void) { }
18285 static inline void check_pgt_cache(void) { }
18286 void paging_init(void);
18287@@ -45,6 +42,12 @@ void paging_init(void);
18288 # include <asm/pgtable-2level.h>
18289 #endif
18290
18291+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18292+extern pgd_t initial_page_table[PTRS_PER_PGD];
18293+#ifdef CONFIG_X86_PAE
18294+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18295+#endif
18296+
18297 #if defined(CONFIG_HIGHPTE)
18298 #define pte_offset_map(dir, address) \
18299 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18300@@ -59,12 +62,17 @@ void paging_init(void);
18301 /* Clear a kernel PTE and flush it from the TLB */
18302 #define kpte_clear_flush(ptep, vaddr) \
18303 do { \
18304+ pax_open_kernel(); \
18305 pte_clear(&init_mm, (vaddr), (ptep)); \
18306+ pax_close_kernel(); \
18307 __flush_tlb_one((vaddr)); \
18308 } while (0)
18309
18310 #endif /* !__ASSEMBLY__ */
18311
18312+#define HAVE_ARCH_UNMAPPED_AREA
18313+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18314+
18315 /*
18316 * kern_addr_valid() is (1) for FLATMEM and (0) for
18317 * SPARSEMEM and DISCONTIGMEM
18318diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18319index 9fb2f2b..b04b4bf 100644
18320--- a/arch/x86/include/asm/pgtable_32_types.h
18321+++ b/arch/x86/include/asm/pgtable_32_types.h
18322@@ -8,7 +8,7 @@
18323 */
18324 #ifdef CONFIG_X86_PAE
18325 # include <asm/pgtable-3level_types.h>
18326-# define PMD_SIZE (1UL << PMD_SHIFT)
18327+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18328 # define PMD_MASK (~(PMD_SIZE - 1))
18329 #else
18330 # include <asm/pgtable-2level_types.h>
18331@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18332 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18333 #endif
18334
18335+#ifdef CONFIG_PAX_KERNEXEC
18336+#ifndef __ASSEMBLY__
18337+extern unsigned char MODULES_EXEC_VADDR[];
18338+extern unsigned char MODULES_EXEC_END[];
18339+#endif
18340+#include <asm/boot.h>
18341+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18342+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18343+#else
18344+#define ktla_ktva(addr) (addr)
18345+#define ktva_ktla(addr) (addr)
18346+#endif
18347+
18348 #define MODULES_VADDR VMALLOC_START
18349 #define MODULES_END VMALLOC_END
18350 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18351diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18352index 4572b2f..4430113 100644
18353--- a/arch/x86/include/asm/pgtable_64.h
18354+++ b/arch/x86/include/asm/pgtable_64.h
18355@@ -16,11 +16,16 @@
18356
18357 extern pud_t level3_kernel_pgt[512];
18358 extern pud_t level3_ident_pgt[512];
18359+extern pud_t level3_vmalloc_start_pgt[512];
18360+extern pud_t level3_vmalloc_end_pgt[512];
18361+extern pud_t level3_vmemmap_pgt[512];
18362+extern pud_t level2_vmemmap_pgt[512];
18363 extern pmd_t level2_kernel_pgt[512];
18364 extern pmd_t level2_fixmap_pgt[512];
18365-extern pmd_t level2_ident_pgt[512];
18366+extern pmd_t level2_ident_pgt[512*2];
18367 extern pte_t level1_fixmap_pgt[512];
18368-extern pgd_t init_level4_pgt[];
18369+extern pte_t level1_vsyscall_pgt[512];
18370+extern pgd_t init_level4_pgt[512];
18371
18372 #define swapper_pg_dir init_level4_pgt
18373
18374@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18375
18376 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18377 {
18378+ pax_open_kernel();
18379 *pmdp = pmd;
18380+ pax_close_kernel();
18381 }
18382
18383 static inline void native_pmd_clear(pmd_t *pmd)
18384@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
18385
18386 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18387 {
18388+ pax_open_kernel();
18389 *pudp = pud;
18390+ pax_close_kernel();
18391 }
18392
18393 static inline void native_pud_clear(pud_t *pud)
18394@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
18395
18396 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
18397 {
18398+ pax_open_kernel();
18399+ *pgdp = pgd;
18400+ pax_close_kernel();
18401+}
18402+
18403+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18404+{
18405 *pgdp = pgd;
18406 }
18407
18408diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
18409index 602b602..acb53ed 100644
18410--- a/arch/x86/include/asm/pgtable_64_types.h
18411+++ b/arch/x86/include/asm/pgtable_64_types.h
18412@@ -61,11 +61,16 @@ typedef struct { pteval_t pte; } pte_t;
18413 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
18414 #define MODULES_END _AC(0xffffffffff000000, UL)
18415 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
18416+#define MODULES_EXEC_VADDR MODULES_VADDR
18417+#define MODULES_EXEC_END MODULES_END
18418 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
18419 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
18420 #define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
18421 #define EFI_VA_END (-68 * (_AC(1, UL) << 30))
18422
18423+#define ktla_ktva(addr) (addr)
18424+#define ktva_ktla(addr) (addr)
18425+
18426 #define EARLY_DYNAMIC_PAGE_TABLES 64
18427
18428 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
18429diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
18430index 25bcd4a..bf3f815 100644
18431--- a/arch/x86/include/asm/pgtable_types.h
18432+++ b/arch/x86/include/asm/pgtable_types.h
18433@@ -110,8 +110,10 @@
18434
18435 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
18436 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
18437-#else
18438+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
18439 #define _PAGE_NX (_AT(pteval_t, 0))
18440+#else
18441+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
18442 #endif
18443
18444 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
18445@@ -167,6 +169,9 @@ enum page_cache_mode {
18446 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
18447 _PAGE_ACCESSED)
18448
18449+#define PAGE_READONLY_NOEXEC PAGE_READONLY
18450+#define PAGE_SHARED_NOEXEC PAGE_SHARED
18451+
18452 #define __PAGE_KERNEL_EXEC \
18453 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
18454 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
18455@@ -174,7 +179,7 @@ enum page_cache_mode {
18456 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
18457 #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
18458 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE)
18459-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
18460+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
18461 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
18462 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
18463 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
18464@@ -220,7 +225,7 @@ enum page_cache_mode {
18465 #ifdef CONFIG_X86_64
18466 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
18467 #else
18468-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
18469+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18470 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
18471 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
18472 #endif
18473@@ -259,7 +264,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
18474 {
18475 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
18476 }
18477+#endif
18478
18479+#if PAGETABLE_LEVELS == 3
18480+#include <asm-generic/pgtable-nopud.h>
18481+#endif
18482+
18483+#if PAGETABLE_LEVELS == 2
18484+#include <asm-generic/pgtable-nopmd.h>
18485+#endif
18486+
18487+#ifndef __ASSEMBLY__
18488 #if PAGETABLE_LEVELS > 3
18489 typedef struct { pudval_t pud; } pud_t;
18490
18491@@ -273,8 +288,6 @@ static inline pudval_t native_pud_val(pud_t pud)
18492 return pud.pud;
18493 }
18494 #else
18495-#include <asm-generic/pgtable-nopud.h>
18496-
18497 static inline pudval_t native_pud_val(pud_t pud)
18498 {
18499 return native_pgd_val(pud.pgd);
18500@@ -294,8 +307,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
18501 return pmd.pmd;
18502 }
18503 #else
18504-#include <asm-generic/pgtable-nopmd.h>
18505-
18506 static inline pmdval_t native_pmd_val(pmd_t pmd)
18507 {
18508 return native_pgd_val(pmd.pud.pgd);
18509@@ -402,7 +413,6 @@ typedef struct page *pgtable_t;
18510
18511 extern pteval_t __supported_pte_mask;
18512 extern void set_nx(void);
18513-extern int nx_enabled;
18514
18515 #define pgprot_writecombine pgprot_writecombine
18516 extern pgprot_t pgprot_writecombine(pgprot_t prot);
18517diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
18518index 8f327184..368fb29 100644
18519--- a/arch/x86/include/asm/preempt.h
18520+++ b/arch/x86/include/asm/preempt.h
18521@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
18522 */
18523 static __always_inline bool __preempt_count_dec_and_test(void)
18524 {
18525- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
18526+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
18527 }
18528
18529 /*
18530diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
18531index a092a0c..8e9640b 100644
18532--- a/arch/x86/include/asm/processor.h
18533+++ b/arch/x86/include/asm/processor.h
18534@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
18535 /* Index into per_cpu list: */
18536 u16 cpu_index;
18537 u32 microcode;
18538-};
18539+} __randomize_layout;
18540
18541 #define X86_VENDOR_INTEL 0
18542 #define X86_VENDOR_CYRIX 1
18543@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
18544 : "memory");
18545 }
18546
18547+/* invpcid (%rdx),%rax */
18548+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
18549+
18550+#define INVPCID_SINGLE_ADDRESS 0UL
18551+#define INVPCID_SINGLE_CONTEXT 1UL
18552+#define INVPCID_ALL_GLOBAL 2UL
18553+#define INVPCID_ALL_NONGLOBAL 3UL
18554+
18555+#define PCID_KERNEL 0UL
18556+#define PCID_USER 1UL
18557+#define PCID_NOFLUSH (1UL << 63)
18558+
18559 static inline void load_cr3(pgd_t *pgdir)
18560 {
18561- write_cr3(__pa(pgdir));
18562+ write_cr3(__pa(pgdir) | PCID_KERNEL);
18563 }
18564
18565 #ifdef CONFIG_X86_32
18566@@ -282,7 +294,7 @@ struct tss_struct {
18567
18568 } ____cacheline_aligned;
18569
18570-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
18571+extern struct tss_struct init_tss[NR_CPUS];
18572
18573 /*
18574 * Save the original ist values for checking stack pointers during debugging
18575@@ -479,6 +491,7 @@ struct thread_struct {
18576 unsigned short ds;
18577 unsigned short fsindex;
18578 unsigned short gsindex;
18579+ unsigned short ss;
18580 #endif
18581 #ifdef CONFIG_X86_32
18582 unsigned long ip;
18583@@ -588,29 +601,8 @@ static inline void load_sp0(struct tss_struct *tss,
18584 extern unsigned long mmu_cr4_features;
18585 extern u32 *trampoline_cr4_features;
18586
18587-static inline void set_in_cr4(unsigned long mask)
18588-{
18589- unsigned long cr4;
18590-
18591- mmu_cr4_features |= mask;
18592- if (trampoline_cr4_features)
18593- *trampoline_cr4_features = mmu_cr4_features;
18594- cr4 = read_cr4();
18595- cr4 |= mask;
18596- write_cr4(cr4);
18597-}
18598-
18599-static inline void clear_in_cr4(unsigned long mask)
18600-{
18601- unsigned long cr4;
18602-
18603- mmu_cr4_features &= ~mask;
18604- if (trampoline_cr4_features)
18605- *trampoline_cr4_features = mmu_cr4_features;
18606- cr4 = read_cr4();
18607- cr4 &= ~mask;
18608- write_cr4(cr4);
18609-}
18610+extern void set_in_cr4(unsigned long mask);
18611+extern void clear_in_cr4(unsigned long mask);
18612
18613 typedef struct {
18614 unsigned long seg;
18615@@ -838,11 +830,18 @@ static inline void spin_lock_prefetch(const void *x)
18616 */
18617 #define TASK_SIZE PAGE_OFFSET
18618 #define TASK_SIZE_MAX TASK_SIZE
18619+
18620+#ifdef CONFIG_PAX_SEGMEXEC
18621+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
18622+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
18623+#else
18624 #define STACK_TOP TASK_SIZE
18625-#define STACK_TOP_MAX STACK_TOP
18626+#endif
18627+
18628+#define STACK_TOP_MAX TASK_SIZE
18629
18630 #define INIT_THREAD { \
18631- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18632+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18633 .vm86_info = NULL, \
18634 .sysenter_cs = __KERNEL_CS, \
18635 .io_bitmap_ptr = NULL, \
18636@@ -856,7 +855,7 @@ static inline void spin_lock_prefetch(const void *x)
18637 */
18638 #define INIT_TSS { \
18639 .x86_tss = { \
18640- .sp0 = sizeof(init_stack) + (long)&init_stack, \
18641+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
18642 .ss0 = __KERNEL_DS, \
18643 .ss1 = __KERNEL_CS, \
18644 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
18645@@ -867,11 +866,7 @@ static inline void spin_lock_prefetch(const void *x)
18646 extern unsigned long thread_saved_pc(struct task_struct *tsk);
18647
18648 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
18649-#define KSTK_TOP(info) \
18650-({ \
18651- unsigned long *__ptr = (unsigned long *)(info); \
18652- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
18653-})
18654+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
18655
18656 /*
18657 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
18658@@ -886,7 +881,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18659 #define task_pt_regs(task) \
18660 ({ \
18661 struct pt_regs *__regs__; \
18662- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
18663+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
18664 __regs__ - 1; \
18665 })
18666
18667@@ -902,13 +897,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18668 * particular problem by preventing anything from being mapped
18669 * at the maximum canonical address.
18670 */
18671-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
18672+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
18673
18674 /* This decides where the kernel will search for a free chunk of vm
18675 * space during mmap's.
18676 */
18677 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
18678- 0xc0000000 : 0xFFFFe000)
18679+ 0xc0000000 : 0xFFFFf000)
18680
18681 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
18682 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
18683@@ -919,11 +914,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
18684 #define STACK_TOP_MAX TASK_SIZE_MAX
18685
18686 #define INIT_THREAD { \
18687- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18688+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18689 }
18690
18691 #define INIT_TSS { \
18692- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
18693+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
18694 }
18695
18696 /*
18697@@ -951,6 +946,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
18698 */
18699 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
18700
18701+#ifdef CONFIG_PAX_SEGMEXEC
18702+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
18703+#endif
18704+
18705 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
18706
18707 /* Get/set a process' ability to use the timestamp counter instruction */
18708@@ -995,7 +994,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
18709 return 0;
18710 }
18711
18712-extern unsigned long arch_align_stack(unsigned long sp);
18713+#define arch_align_stack(x) ((x) & ~0xfUL)
18714 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
18715
18716 void default_idle(void);
18717@@ -1005,6 +1004,6 @@ bool xen_set_default_idle(void);
18718 #define xen_set_default_idle 0
18719 #endif
18720
18721-void stop_this_cpu(void *dummy);
18722+void stop_this_cpu(void *dummy) __noreturn;
18723 void df_debug(struct pt_regs *regs, long error_code);
18724 #endif /* _ASM_X86_PROCESSOR_H */
18725diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
18726index 86fc2bb..bd5049a 100644
18727--- a/arch/x86/include/asm/ptrace.h
18728+++ b/arch/x86/include/asm/ptrace.h
18729@@ -89,28 +89,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
18730 }
18731
18732 /*
18733- * user_mode_vm(regs) determines whether a register set came from user mode.
18734+ * user_mode(regs) determines whether a register set came from user mode.
18735 * This is true if V8086 mode was enabled OR if the register set was from
18736 * protected mode with RPL-3 CS value. This tricky test checks that with
18737 * one comparison. Many places in the kernel can bypass this full check
18738- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
18739+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
18740+ * be used.
18741 */
18742-static inline int user_mode(struct pt_regs *regs)
18743+static inline int user_mode_novm(struct pt_regs *regs)
18744 {
18745 #ifdef CONFIG_X86_32
18746 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
18747 #else
18748- return !!(regs->cs & 3);
18749+ return !!(regs->cs & SEGMENT_RPL_MASK);
18750 #endif
18751 }
18752
18753-static inline int user_mode_vm(struct pt_regs *regs)
18754+static inline int user_mode(struct pt_regs *regs)
18755 {
18756 #ifdef CONFIG_X86_32
18757 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
18758 USER_RPL;
18759 #else
18760- return user_mode(regs);
18761+ return user_mode_novm(regs);
18762 #endif
18763 }
18764
18765@@ -126,15 +127,16 @@ static inline int v8086_mode(struct pt_regs *regs)
18766 #ifdef CONFIG_X86_64
18767 static inline bool user_64bit_mode(struct pt_regs *regs)
18768 {
18769+ unsigned long cs = regs->cs & 0xffff;
18770 #ifndef CONFIG_PARAVIRT
18771 /*
18772 * On non-paravirt systems, this is the only long mode CPL 3
18773 * selector. We do not allow long mode selectors in the LDT.
18774 */
18775- return regs->cs == __USER_CS;
18776+ return cs == __USER_CS;
18777 #else
18778 /* Headers are too twisted for this to go in paravirt.h. */
18779- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
18780+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
18781 #endif
18782 }
18783
18784@@ -185,9 +187,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
18785 * Traps from the kernel do not save sp and ss.
18786 * Use the helper function to retrieve sp.
18787 */
18788- if (offset == offsetof(struct pt_regs, sp) &&
18789- regs->cs == __KERNEL_CS)
18790- return kernel_stack_pointer(regs);
18791+ if (offset == offsetof(struct pt_regs, sp)) {
18792+ unsigned long cs = regs->cs & 0xffff;
18793+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
18794+ return kernel_stack_pointer(regs);
18795+ }
18796 #endif
18797 return *(unsigned long *)((unsigned long)regs + offset);
18798 }
18799diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
18800index ae0e241..e80b10b 100644
18801--- a/arch/x86/include/asm/qrwlock.h
18802+++ b/arch/x86/include/asm/qrwlock.h
18803@@ -7,8 +7,8 @@
18804 #define queue_write_unlock queue_write_unlock
18805 static inline void queue_write_unlock(struct qrwlock *lock)
18806 {
18807- barrier();
18808- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
18809+ barrier();
18810+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
18811 }
18812 #endif
18813
18814diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
18815index 9c6b890..5305f53 100644
18816--- a/arch/x86/include/asm/realmode.h
18817+++ b/arch/x86/include/asm/realmode.h
18818@@ -22,16 +22,14 @@ struct real_mode_header {
18819 #endif
18820 /* APM/BIOS reboot */
18821 u32 machine_real_restart_asm;
18822-#ifdef CONFIG_X86_64
18823 u32 machine_real_restart_seg;
18824-#endif
18825 };
18826
18827 /* This must match data at trampoline_32/64.S */
18828 struct trampoline_header {
18829 #ifdef CONFIG_X86_32
18830 u32 start;
18831- u16 gdt_pad;
18832+ u16 boot_cs;
18833 u16 gdt_limit;
18834 u32 gdt_base;
18835 #else
18836diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
18837index a82c4f1..ac45053 100644
18838--- a/arch/x86/include/asm/reboot.h
18839+++ b/arch/x86/include/asm/reboot.h
18840@@ -6,13 +6,13 @@
18841 struct pt_regs;
18842
18843 struct machine_ops {
18844- void (*restart)(char *cmd);
18845- void (*halt)(void);
18846- void (*power_off)(void);
18847+ void (* __noreturn restart)(char *cmd);
18848+ void (* __noreturn halt)(void);
18849+ void (* __noreturn power_off)(void);
18850 void (*shutdown)(void);
18851 void (*crash_shutdown)(struct pt_regs *);
18852- void (*emergency_restart)(void);
18853-};
18854+ void (* __noreturn emergency_restart)(void);
18855+} __no_const;
18856
18857 extern struct machine_ops machine_ops;
18858
18859diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
18860index 8f7866a..e442f20 100644
18861--- a/arch/x86/include/asm/rmwcc.h
18862+++ b/arch/x86/include/asm/rmwcc.h
18863@@ -3,7 +3,34 @@
18864
18865 #ifdef CC_HAVE_ASM_GOTO
18866
18867-#define __GEN_RMWcc(fullop, var, cc, ...) \
18868+#ifdef CONFIG_PAX_REFCOUNT
18869+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18870+do { \
18871+ asm_volatile_goto (fullop \
18872+ ";jno 0f\n" \
18873+ fullantiop \
18874+ ";int $4\n0:\n" \
18875+ _ASM_EXTABLE(0b, 0b) \
18876+ ";j" cc " %l[cc_label]" \
18877+ : : "m" (var), ## __VA_ARGS__ \
18878+ : "memory" : cc_label); \
18879+ return 0; \
18880+cc_label: \
18881+ return 1; \
18882+} while (0)
18883+#else
18884+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18885+do { \
18886+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
18887+ : : "m" (var), ## __VA_ARGS__ \
18888+ : "memory" : cc_label); \
18889+ return 0; \
18890+cc_label: \
18891+ return 1; \
18892+} while (0)
18893+#endif
18894+
18895+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18896 do { \
18897 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
18898 : : "m" (var), ## __VA_ARGS__ \
18899@@ -13,15 +40,46 @@ cc_label: \
18900 return 1; \
18901 } while (0)
18902
18903-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18904- __GEN_RMWcc(op " " arg0, var, cc)
18905+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18906+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18907
18908-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18909- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
18910+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18911+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18912+
18913+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18914+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
18915+
18916+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18917+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
18918
18919 #else /* !CC_HAVE_ASM_GOTO */
18920
18921-#define __GEN_RMWcc(fullop, var, cc, ...) \
18922+#ifdef CONFIG_PAX_REFCOUNT
18923+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18924+do { \
18925+ char c; \
18926+ asm volatile (fullop \
18927+ ";jno 0f\n" \
18928+ fullantiop \
18929+ ";int $4\n0:\n" \
18930+ _ASM_EXTABLE(0b, 0b) \
18931+ "; set" cc " %1" \
18932+ : "+m" (var), "=qm" (c) \
18933+ : __VA_ARGS__ : "memory"); \
18934+ return c != 0; \
18935+} while (0)
18936+#else
18937+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
18938+do { \
18939+ char c; \
18940+ asm volatile (fullop "; set" cc " %1" \
18941+ : "+m" (var), "=qm" (c) \
18942+ : __VA_ARGS__ : "memory"); \
18943+ return c != 0; \
18944+} while (0)
18945+#endif
18946+
18947+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
18948 do { \
18949 char c; \
18950 asm volatile (fullop "; set" cc " %1" \
18951@@ -30,11 +88,17 @@ do { \
18952 return c != 0; \
18953 } while (0)
18954
18955-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
18956- __GEN_RMWcc(op " " arg0, var, cc)
18957+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
18958+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
18959+
18960+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
18961+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
18962+
18963+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
18964+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
18965
18966-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
18967- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
18968+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
18969+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
18970
18971 #endif /* CC_HAVE_ASM_GOTO */
18972
18973diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
18974index cad82c9..2e5c5c1 100644
18975--- a/arch/x86/include/asm/rwsem.h
18976+++ b/arch/x86/include/asm/rwsem.h
18977@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
18978 {
18979 asm volatile("# beginning down_read\n\t"
18980 LOCK_PREFIX _ASM_INC "(%1)\n\t"
18981+
18982+#ifdef CONFIG_PAX_REFCOUNT
18983+ "jno 0f\n"
18984+ LOCK_PREFIX _ASM_DEC "(%1)\n"
18985+ "int $4\n0:\n"
18986+ _ASM_EXTABLE(0b, 0b)
18987+#endif
18988+
18989 /* adds 0x00000001 */
18990 " jns 1f\n"
18991 " call call_rwsem_down_read_failed\n"
18992@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
18993 "1:\n\t"
18994 " mov %1,%2\n\t"
18995 " add %3,%2\n\t"
18996+
18997+#ifdef CONFIG_PAX_REFCOUNT
18998+ "jno 0f\n"
18999+ "sub %3,%2\n"
19000+ "int $4\n0:\n"
19001+ _ASM_EXTABLE(0b, 0b)
19002+#endif
19003+
19004 " jle 2f\n\t"
19005 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19006 " jnz 1b\n\t"
19007@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19008 long tmp;
19009 asm volatile("# beginning down_write\n\t"
19010 LOCK_PREFIX " xadd %1,(%2)\n\t"
19011+
19012+#ifdef CONFIG_PAX_REFCOUNT
19013+ "jno 0f\n"
19014+ "mov %1,(%2)\n"
19015+ "int $4\n0:\n"
19016+ _ASM_EXTABLE(0b, 0b)
19017+#endif
19018+
19019 /* adds 0xffff0001, returns the old value */
19020 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19021 /* was the active mask 0 before? */
19022@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19023 long tmp;
19024 asm volatile("# beginning __up_read\n\t"
19025 LOCK_PREFIX " xadd %1,(%2)\n\t"
19026+
19027+#ifdef CONFIG_PAX_REFCOUNT
19028+ "jno 0f\n"
19029+ "mov %1,(%2)\n"
19030+ "int $4\n0:\n"
19031+ _ASM_EXTABLE(0b, 0b)
19032+#endif
19033+
19034 /* subtracts 1, returns the old value */
19035 " jns 1f\n\t"
19036 " call call_rwsem_wake\n" /* expects old value in %edx */
19037@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19038 long tmp;
19039 asm volatile("# beginning __up_write\n\t"
19040 LOCK_PREFIX " xadd %1,(%2)\n\t"
19041+
19042+#ifdef CONFIG_PAX_REFCOUNT
19043+ "jno 0f\n"
19044+ "mov %1,(%2)\n"
19045+ "int $4\n0:\n"
19046+ _ASM_EXTABLE(0b, 0b)
19047+#endif
19048+
19049 /* subtracts 0xffff0001, returns the old value */
19050 " jns 1f\n\t"
19051 " call call_rwsem_wake\n" /* expects old value in %edx */
19052@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19053 {
19054 asm volatile("# beginning __downgrade_write\n\t"
19055 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19056+
19057+#ifdef CONFIG_PAX_REFCOUNT
19058+ "jno 0f\n"
19059+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19060+ "int $4\n0:\n"
19061+ _ASM_EXTABLE(0b, 0b)
19062+#endif
19063+
19064 /*
19065 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19066 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19067@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19068 */
19069 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19070 {
19071- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19072+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19073+
19074+#ifdef CONFIG_PAX_REFCOUNT
19075+ "jno 0f\n"
19076+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19077+ "int $4\n0:\n"
19078+ _ASM_EXTABLE(0b, 0b)
19079+#endif
19080+
19081 : "+m" (sem->count)
19082 : "er" (delta));
19083 }
19084@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19085 */
19086 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19087 {
19088- return delta + xadd(&sem->count, delta);
19089+ return delta + xadd_check_overflow(&sem->count, delta);
19090 }
19091
19092 #endif /* __KERNEL__ */
19093diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19094index db257a5..b91bc77 100644
19095--- a/arch/x86/include/asm/segment.h
19096+++ b/arch/x86/include/asm/segment.h
19097@@ -73,10 +73,15 @@
19098 * 26 - ESPFIX small SS
19099 * 27 - per-cpu [ offset to per-cpu data area ]
19100 * 28 - stack_canary-20 [ for stack protector ]
19101- * 29 - unused
19102- * 30 - unused
19103+ * 29 - PCI BIOS CS
19104+ * 30 - PCI BIOS DS
19105 * 31 - TSS for double fault handler
19106 */
19107+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19108+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19109+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19110+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19111+
19112 #define GDT_ENTRY_TLS_MIN 6
19113 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19114
19115@@ -88,6 +93,8 @@
19116
19117 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19118
19119+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19120+
19121 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19122
19123 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19124@@ -113,6 +120,12 @@
19125 #define __KERNEL_STACK_CANARY 0
19126 #endif
19127
19128+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19129+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19130+
19131+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19132+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19133+
19134 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19135
19136 /*
19137@@ -140,7 +153,7 @@
19138 */
19139
19140 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19141-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19142+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19143
19144
19145 #else
19146@@ -164,6 +177,8 @@
19147 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19148 #define __USER32_DS __USER_DS
19149
19150+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19151+
19152 #define GDT_ENTRY_TSS 8 /* needs two entries */
19153 #define GDT_ENTRY_LDT 10 /* needs two entries */
19154 #define GDT_ENTRY_TLS_MIN 12
19155@@ -172,6 +187,8 @@
19156 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19157 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19158
19159+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19160+
19161 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19162 #define FS_TLS 0
19163 #define GS_TLS 1
19164@@ -179,12 +196,14 @@
19165 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19166 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19167
19168-#define GDT_ENTRIES 16
19169+#define GDT_ENTRIES 17
19170
19171 #endif
19172
19173 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19174+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19175 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19176+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19177 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19178 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19179 #ifndef CONFIG_PARAVIRT
19180@@ -256,7 +275,7 @@ static inline unsigned long get_limit(unsigned long segment)
19181 {
19182 unsigned long __limit;
19183 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19184- return __limit + 1;
19185+ return __limit;
19186 }
19187
19188 #endif /* !__ASSEMBLY__ */
19189diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19190index 8d3120f..352b440 100644
19191--- a/arch/x86/include/asm/smap.h
19192+++ b/arch/x86/include/asm/smap.h
19193@@ -25,11 +25,40 @@
19194
19195 #include <asm/alternative-asm.h>
19196
19197+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19198+#define ASM_PAX_OPEN_USERLAND \
19199+ 661: jmp 663f; \
19200+ .pushsection .altinstr_replacement, "a" ; \
19201+ 662: pushq %rax; nop; \
19202+ .popsection ; \
19203+ .pushsection .altinstructions, "a" ; \
19204+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19205+ .popsection ; \
19206+ call __pax_open_userland; \
19207+ popq %rax; \
19208+ 663:
19209+
19210+#define ASM_PAX_CLOSE_USERLAND \
19211+ 661: jmp 663f; \
19212+ .pushsection .altinstr_replacement, "a" ; \
19213+ 662: pushq %rax; nop; \
19214+ .popsection; \
19215+ .pushsection .altinstructions, "a" ; \
19216+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19217+ .popsection; \
19218+ call __pax_close_userland; \
19219+ popq %rax; \
19220+ 663:
19221+#else
19222+#define ASM_PAX_OPEN_USERLAND
19223+#define ASM_PAX_CLOSE_USERLAND
19224+#endif
19225+
19226 #ifdef CONFIG_X86_SMAP
19227
19228 #define ASM_CLAC \
19229 661: ASM_NOP3 ; \
19230- .pushsection .altinstr_replacement, "ax" ; \
19231+ .pushsection .altinstr_replacement, "a" ; \
19232 662: __ASM_CLAC ; \
19233 .popsection ; \
19234 .pushsection .altinstructions, "a" ; \
19235@@ -38,7 +67,7 @@
19236
19237 #define ASM_STAC \
19238 661: ASM_NOP3 ; \
19239- .pushsection .altinstr_replacement, "ax" ; \
19240+ .pushsection .altinstr_replacement, "a" ; \
19241 662: __ASM_STAC ; \
19242 .popsection ; \
19243 .pushsection .altinstructions, "a" ; \
19244@@ -56,6 +85,37 @@
19245
19246 #include <asm/alternative.h>
19247
19248+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19249+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19250+
19251+extern void __pax_open_userland(void);
19252+static __always_inline unsigned long pax_open_userland(void)
19253+{
19254+
19255+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19256+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19257+ :
19258+ : [open] "i" (__pax_open_userland)
19259+ : "memory", "rax");
19260+#endif
19261+
19262+ return 0;
19263+}
19264+
19265+extern void __pax_close_userland(void);
19266+static __always_inline unsigned long pax_close_userland(void)
19267+{
19268+
19269+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19270+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19271+ :
19272+ : [close] "i" (__pax_close_userland)
19273+ : "memory", "rax");
19274+#endif
19275+
19276+ return 0;
19277+}
19278+
19279 #ifdef CONFIG_X86_SMAP
19280
19281 static __always_inline void clac(void)
19282diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19283index 8cd1cc3..827e09e 100644
19284--- a/arch/x86/include/asm/smp.h
19285+++ b/arch/x86/include/asm/smp.h
19286@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19287 /* cpus sharing the last level cache: */
19288 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19289 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19290-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19291+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19292
19293 static inline struct cpumask *cpu_sibling_mask(int cpu)
19294 {
19295@@ -78,7 +78,7 @@ struct smp_ops {
19296
19297 void (*send_call_func_ipi)(const struct cpumask *mask);
19298 void (*send_call_func_single_ipi)(int cpu);
19299-};
19300+} __no_const;
19301
19302 /* Globals due to paravirt */
19303 extern void set_cpu_sibling_map(int cpu);
19304@@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
19305 extern int safe_smp_processor_id(void);
19306
19307 #elif defined(CONFIG_X86_64_SMP)
19308-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19309-
19310-#define stack_smp_processor_id() \
19311-({ \
19312- struct thread_info *ti; \
19313- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19314- ti->cpu; \
19315-})
19316+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19317+#define stack_smp_processor_id() raw_smp_processor_id()
19318 #define safe_smp_processor_id() smp_processor_id()
19319
19320 #endif
19321diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
19322index 6a99859..03cb807 100644
19323--- a/arch/x86/include/asm/stackprotector.h
19324+++ b/arch/x86/include/asm/stackprotector.h
19325@@ -47,7 +47,7 @@
19326 * head_32 for boot CPU and setup_per_cpu_areas() for others.
19327 */
19328 #define GDT_STACK_CANARY_INIT \
19329- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
19330+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
19331
19332 /*
19333 * Initialize the stackprotector canary value.
19334@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
19335
19336 static inline void load_stack_canary_segment(void)
19337 {
19338-#ifdef CONFIG_X86_32
19339+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19340 asm volatile ("mov %0, %%gs" : : "r" (0));
19341 #endif
19342 }
19343diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
19344index 70bbe39..4ae2bd4 100644
19345--- a/arch/x86/include/asm/stacktrace.h
19346+++ b/arch/x86/include/asm/stacktrace.h
19347@@ -11,28 +11,20 @@
19348
19349 extern int kstack_depth_to_print;
19350
19351-struct thread_info;
19352+struct task_struct;
19353 struct stacktrace_ops;
19354
19355-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
19356- unsigned long *stack,
19357- unsigned long bp,
19358- const struct stacktrace_ops *ops,
19359- void *data,
19360- unsigned long *end,
19361- int *graph);
19362+typedef unsigned long walk_stack_t(struct task_struct *task,
19363+ void *stack_start,
19364+ unsigned long *stack,
19365+ unsigned long bp,
19366+ const struct stacktrace_ops *ops,
19367+ void *data,
19368+ unsigned long *end,
19369+ int *graph);
19370
19371-extern unsigned long
19372-print_context_stack(struct thread_info *tinfo,
19373- unsigned long *stack, unsigned long bp,
19374- const struct stacktrace_ops *ops, void *data,
19375- unsigned long *end, int *graph);
19376-
19377-extern unsigned long
19378-print_context_stack_bp(struct thread_info *tinfo,
19379- unsigned long *stack, unsigned long bp,
19380- const struct stacktrace_ops *ops, void *data,
19381- unsigned long *end, int *graph);
19382+extern walk_stack_t print_context_stack;
19383+extern walk_stack_t print_context_stack_bp;
19384
19385 /* Generic stack tracer with callbacks */
19386
19387@@ -40,7 +32,7 @@ struct stacktrace_ops {
19388 void (*address)(void *data, unsigned long address, int reliable);
19389 /* On negative return stop dumping */
19390 int (*stack)(void *data, char *name);
19391- walk_stack_t walk_stack;
19392+ walk_stack_t *walk_stack;
19393 };
19394
19395 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
19396diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
19397index 751bf4b..a1278b5 100644
19398--- a/arch/x86/include/asm/switch_to.h
19399+++ b/arch/x86/include/asm/switch_to.h
19400@@ -112,7 +112,7 @@ do { \
19401 "call __switch_to\n\t" \
19402 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
19403 __switch_canary \
19404- "movq %P[thread_info](%%rsi),%%r8\n\t" \
19405+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
19406 "movq %%rax,%%rdi\n\t" \
19407 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
19408 "jnz ret_from_fork\n\t" \
19409@@ -123,7 +123,7 @@ do { \
19410 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
19411 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
19412 [_tif_fork] "i" (_TIF_FORK), \
19413- [thread_info] "i" (offsetof(struct task_struct, stack)), \
19414+ [thread_info] "m" (current_tinfo), \
19415 [current_task] "m" (current_task) \
19416 __switch_canary_iparam \
19417 : "memory", "cc" __EXTRA_CLOBBER)
19418diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
19419index 547e344..6be1175 100644
19420--- a/arch/x86/include/asm/thread_info.h
19421+++ b/arch/x86/include/asm/thread_info.h
19422@@ -24,7 +24,6 @@ struct exec_domain;
19423 #include <linux/atomic.h>
19424
19425 struct thread_info {
19426- struct task_struct *task; /* main task structure */
19427 struct exec_domain *exec_domain; /* execution domain */
19428 __u32 flags; /* low level flags */
19429 __u32 status; /* thread synchronous flags */
19430@@ -33,13 +32,13 @@ struct thread_info {
19431 mm_segment_t addr_limit;
19432 struct restart_block restart_block;
19433 void __user *sysenter_return;
19434+ unsigned long lowest_stack;
19435 unsigned int sig_on_uaccess_error:1;
19436 unsigned int uaccess_err:1; /* uaccess failed */
19437 };
19438
19439-#define INIT_THREAD_INFO(tsk) \
19440+#define INIT_THREAD_INFO \
19441 { \
19442- .task = &tsk, \
19443 .exec_domain = &default_exec_domain, \
19444 .flags = 0, \
19445 .cpu = 0, \
19446@@ -50,7 +49,7 @@ struct thread_info {
19447 }, \
19448 }
19449
19450-#define init_thread_info (init_thread_union.thread_info)
19451+#define init_thread_info (init_thread_union.stack)
19452 #define init_stack (init_thread_union.stack)
19453
19454 #else /* !__ASSEMBLY__ */
19455@@ -91,6 +90,7 @@ struct thread_info {
19456 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
19457 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
19458 #define TIF_X32 30 /* 32-bit native x86-64 binary */
19459+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
19460
19461 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
19462 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
19463@@ -115,17 +115,18 @@ struct thread_info {
19464 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
19465 #define _TIF_ADDR32 (1 << TIF_ADDR32)
19466 #define _TIF_X32 (1 << TIF_X32)
19467+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
19468
19469 /* work to do in syscall_trace_enter() */
19470 #define _TIF_WORK_SYSCALL_ENTRY \
19471 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
19472 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
19473- _TIF_NOHZ)
19474+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19475
19476 /* work to do in syscall_trace_leave() */
19477 #define _TIF_WORK_SYSCALL_EXIT \
19478 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
19479- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
19480+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
19481
19482 /* work to do on interrupt/exception return */
19483 #define _TIF_WORK_MASK \
19484@@ -136,7 +137,7 @@ struct thread_info {
19485 /* work to do on any return to user space */
19486 #define _TIF_ALLWORK_MASK \
19487 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
19488- _TIF_NOHZ)
19489+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
19490
19491 /* Only used for 64 bit */
19492 #define _TIF_DO_NOTIFY_MASK \
19493@@ -151,7 +152,6 @@ struct thread_info {
19494 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
19495
19496 #define STACK_WARN (THREAD_SIZE/8)
19497-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
19498
19499 /*
19500 * macros/functions for gaining access to the thread information structure
19501@@ -162,26 +162,18 @@ struct thread_info {
19502
19503 DECLARE_PER_CPU(unsigned long, kernel_stack);
19504
19505+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
19506+
19507 static inline struct thread_info *current_thread_info(void)
19508 {
19509- struct thread_info *ti;
19510- ti = (void *)(this_cpu_read_stable(kernel_stack) +
19511- KERNEL_STACK_OFFSET - THREAD_SIZE);
19512- return ti;
19513+ return this_cpu_read_stable(current_tinfo);
19514 }
19515
19516 #else /* !__ASSEMBLY__ */
19517
19518 /* how to get the thread information struct from ASM */
19519 #define GET_THREAD_INFO(reg) \
19520- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
19521- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
19522-
19523-/*
19524- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
19525- * a certain register (to be used in assembler memory operands).
19526- */
19527-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
19528+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
19529
19530 #endif
19531
19532@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
19533 extern void arch_task_cache_init(void);
19534 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
19535 extern void arch_release_task_struct(struct task_struct *tsk);
19536+
19537+#define __HAVE_THREAD_FUNCTIONS
19538+#define task_thread_info(task) (&(task)->tinfo)
19539+#define task_stack_page(task) ((task)->stack)
19540+#define setup_thread_stack(p, org) do {} while (0)
19541+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
19542+
19543 #endif
19544 #endif /* _ASM_X86_THREAD_INFO_H */
19545diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
19546index 04905bf..1178cdf 100644
19547--- a/arch/x86/include/asm/tlbflush.h
19548+++ b/arch/x86/include/asm/tlbflush.h
19549@@ -17,18 +17,44 @@
19550
19551 static inline void __native_flush_tlb(void)
19552 {
19553+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19554+ u64 descriptor[2];
19555+
19556+ descriptor[0] = PCID_KERNEL;
19557+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
19558+ return;
19559+ }
19560+
19561+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19562+ if (static_cpu_has(X86_FEATURE_PCID)) {
19563+ unsigned int cpu = raw_get_cpu();
19564+
19565+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
19566+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
19567+ raw_put_cpu_no_resched();
19568+ return;
19569+ }
19570+#endif
19571+
19572 native_write_cr3(native_read_cr3());
19573 }
19574
19575 static inline void __native_flush_tlb_global_irq_disabled(void)
19576 {
19577- unsigned long cr4;
19578+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19579+ u64 descriptor[2];
19580
19581- cr4 = native_read_cr4();
19582- /* clear PGE */
19583- native_write_cr4(cr4 & ~X86_CR4_PGE);
19584- /* write old PGE again and flush TLBs */
19585- native_write_cr4(cr4);
19586+ descriptor[0] = PCID_KERNEL;
19587+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
19588+ } else {
19589+ unsigned long cr4;
19590+
19591+ cr4 = native_read_cr4();
19592+ /* clear PGE */
19593+ native_write_cr4(cr4 & ~X86_CR4_PGE);
19594+ /* write old PGE again and flush TLBs */
19595+ native_write_cr4(cr4);
19596+ }
19597 }
19598
19599 static inline void __native_flush_tlb_global(void)
19600@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
19601
19602 static inline void __native_flush_tlb_single(unsigned long addr)
19603 {
19604+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
19605+ u64 descriptor[2];
19606+
19607+ descriptor[0] = PCID_KERNEL;
19608+ descriptor[1] = addr;
19609+
19610+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19611+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
19612+ if (addr < TASK_SIZE_MAX)
19613+ descriptor[1] += pax_user_shadow_base;
19614+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19615+ }
19616+
19617+ descriptor[0] = PCID_USER;
19618+ descriptor[1] = addr;
19619+#endif
19620+
19621+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
19622+ return;
19623+ }
19624+
19625+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19626+ if (static_cpu_has(X86_FEATURE_PCID)) {
19627+ unsigned int cpu = raw_get_cpu();
19628+
19629+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
19630+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19631+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
19632+ raw_put_cpu_no_resched();
19633+
19634+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
19635+ addr += pax_user_shadow_base;
19636+ }
19637+#endif
19638+
19639 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
19640 }
19641
19642diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
19643index 0d592e0..cbc90a3 100644
19644--- a/arch/x86/include/asm/uaccess.h
19645+++ b/arch/x86/include/asm/uaccess.h
19646@@ -7,6 +7,7 @@
19647 #include <linux/compiler.h>
19648 #include <linux/thread_info.h>
19649 #include <linux/string.h>
19650+#include <linux/spinlock.h>
19651 #include <asm/asm.h>
19652 #include <asm/page.h>
19653 #include <asm/smap.h>
19654@@ -29,7 +30,12 @@
19655
19656 #define get_ds() (KERNEL_DS)
19657 #define get_fs() (current_thread_info()->addr_limit)
19658+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19659+void __set_fs(mm_segment_t x);
19660+void set_fs(mm_segment_t x);
19661+#else
19662 #define set_fs(x) (current_thread_info()->addr_limit = (x))
19663+#endif
19664
19665 #define segment_eq(a, b) ((a).seg == (b).seg)
19666
19667@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
19668 * checks that the pointer is in the user space range - after calling
19669 * this function, memory access functions may still return -EFAULT.
19670 */
19671-#define access_ok(type, addr, size) \
19672- likely(!__range_not_ok(addr, size, user_addr_max()))
19673+extern int _cond_resched(void);
19674+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
19675+#define access_ok(type, addr, size) \
19676+({ \
19677+ unsigned long __size = size; \
19678+ unsigned long __addr = (unsigned long)addr; \
19679+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
19680+ if (__ret_ao && __size) { \
19681+ unsigned long __addr_ao = __addr & PAGE_MASK; \
19682+ unsigned long __end_ao = __addr + __size - 1; \
19683+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
19684+ while (__addr_ao <= __end_ao) { \
19685+ char __c_ao; \
19686+ __addr_ao += PAGE_SIZE; \
19687+ if (__size > PAGE_SIZE) \
19688+ _cond_resched(); \
19689+ if (__get_user(__c_ao, (char __user *)__addr)) \
19690+ break; \
19691+ if (type != VERIFY_WRITE) { \
19692+ __addr = __addr_ao; \
19693+ continue; \
19694+ } \
19695+ if (__put_user(__c_ao, (char __user *)__addr)) \
19696+ break; \
19697+ __addr = __addr_ao; \
19698+ } \
19699+ } \
19700+ } \
19701+ __ret_ao; \
19702+})
19703
19704 /*
19705 * The exception table consists of pairs of addresses relative to the
19706@@ -134,11 +168,13 @@ extern int __get_user_8(void);
19707 extern int __get_user_bad(void);
19708
19709 /*
19710- * This is a type: either unsigned long, if the argument fits into
19711- * that type, or otherwise unsigned long long.
19712+ * This is a type: either (un)signed int, if the argument fits into
19713+ * that type, or otherwise (un)signed long long.
19714 */
19715 #define __inttype(x) \
19716-__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19717+__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0U), \
19718+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0ULL, 0LL),\
19719+ __builtin_choose_expr(__type_is_unsigned(__typeof__(x)), 0U, 0)))
19720
19721 /**
19722 * get_user: - Get a simple variable from user space.
19723@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19724 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
19725 __chk_user_ptr(ptr); \
19726 might_fault(); \
19727+ pax_open_userland(); \
19728 asm volatile("call __get_user_%P3" \
19729 : "=a" (__ret_gu), "=r" (__val_gu) \
19730 : "0" (ptr), "i" (sizeof(*(ptr)))); \
19731 (x) = (__typeof__(*(ptr))) __val_gu; \
19732+ pax_close_userland(); \
19733 __ret_gu; \
19734 })
19735
19736@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19737 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
19738 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
19739
19740-
19741+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19742+#define __copyuser_seg "gs;"
19743+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
19744+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
19745+#else
19746+#define __copyuser_seg
19747+#define __COPYUSER_SET_ES
19748+#define __COPYUSER_RESTORE_ES
19749+#endif
19750
19751 #ifdef CONFIG_X86_32
19752 #define __put_user_asm_u64(x, addr, err, errret) \
19753 asm volatile(ASM_STAC "\n" \
19754- "1: movl %%eax,0(%2)\n" \
19755- "2: movl %%edx,4(%2)\n" \
19756+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
19757+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
19758 "3: " ASM_CLAC "\n" \
19759 ".section .fixup,\"ax\"\n" \
19760 "4: movl %3,%0\n" \
19761@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
19762
19763 #define __put_user_asm_ex_u64(x, addr) \
19764 asm volatile(ASM_STAC "\n" \
19765- "1: movl %%eax,0(%1)\n" \
19766- "2: movl %%edx,4(%1)\n" \
19767+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
19768+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
19769 "3: " ASM_CLAC "\n" \
19770 _ASM_EXTABLE_EX(1b, 2b) \
19771 _ASM_EXTABLE_EX(2b, 3b) \
19772@@ -257,7 +303,8 @@ extern void __put_user_8(void);
19773 __typeof__(*(ptr)) __pu_val; \
19774 __chk_user_ptr(ptr); \
19775 might_fault(); \
19776- __pu_val = x; \
19777+ __pu_val = (x); \
19778+ pax_open_userland(); \
19779 switch (sizeof(*(ptr))) { \
19780 case 1: \
19781 __put_user_x(1, __pu_val, ptr, __ret_pu); \
19782@@ -275,6 +322,7 @@ extern void __put_user_8(void);
19783 __put_user_x(X, __pu_val, ptr, __ret_pu); \
19784 break; \
19785 } \
19786+ pax_close_userland(); \
19787 __ret_pu; \
19788 })
19789
19790@@ -355,8 +403,10 @@ do { \
19791 } while (0)
19792
19793 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19794+do { \
19795+ pax_open_userland(); \
19796 asm volatile(ASM_STAC "\n" \
19797- "1: mov"itype" %2,%"rtype"1\n" \
19798+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
19799 "2: " ASM_CLAC "\n" \
19800 ".section .fixup,\"ax\"\n" \
19801 "3: mov %3,%0\n" \
19802@@ -364,8 +414,10 @@ do { \
19803 " jmp 2b\n" \
19804 ".previous\n" \
19805 _ASM_EXTABLE(1b, 3b) \
19806- : "=r" (err), ltype(x) \
19807- : "m" (__m(addr)), "i" (errret), "0" (err))
19808+ : "=r" (err), ltype (x) \
19809+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
19810+ pax_close_userland(); \
19811+} while (0)
19812
19813 #define __get_user_size_ex(x, ptr, size) \
19814 do { \
19815@@ -389,7 +441,7 @@ do { \
19816 } while (0)
19817
19818 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
19819- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
19820+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
19821 "2:\n" \
19822 _ASM_EXTABLE_EX(1b, 2b) \
19823 : ltype(x) : "m" (__m(addr)))
19824@@ -406,13 +458,24 @@ do { \
19825 int __gu_err; \
19826 unsigned long __gu_val; \
19827 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
19828- (x) = (__force __typeof__(*(ptr)))__gu_val; \
19829+ (x) = (__typeof__(*(ptr)))__gu_val; \
19830 __gu_err; \
19831 })
19832
19833 /* FIXME: this hack is definitely wrong -AK */
19834 struct __large_struct { unsigned long buf[100]; };
19835-#define __m(x) (*(struct __large_struct __user *)(x))
19836+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19837+#define ____m(x) \
19838+({ \
19839+ unsigned long ____x = (unsigned long)(x); \
19840+ if (____x < pax_user_shadow_base) \
19841+ ____x += pax_user_shadow_base; \
19842+ (typeof(x))____x; \
19843+})
19844+#else
19845+#define ____m(x) (x)
19846+#endif
19847+#define __m(x) (*(struct __large_struct __user *)____m(x))
19848
19849 /*
19850 * Tell gcc we read from memory instead of writing: this is because
19851@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
19852 * aliasing issues.
19853 */
19854 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
19855+do { \
19856+ pax_open_userland(); \
19857 asm volatile(ASM_STAC "\n" \
19858- "1: mov"itype" %"rtype"1,%2\n" \
19859+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
19860 "2: " ASM_CLAC "\n" \
19861 ".section .fixup,\"ax\"\n" \
19862 "3: mov %3,%0\n" \
19863@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
19864 ".previous\n" \
19865 _ASM_EXTABLE(1b, 3b) \
19866 : "=r"(err) \
19867- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
19868+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
19869+ pax_close_userland(); \
19870+} while (0)
19871
19872 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
19873- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
19874+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
19875 "2:\n" \
19876 _ASM_EXTABLE_EX(1b, 2b) \
19877 : : ltype(x), "m" (__m(addr)))
19878@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
19879 */
19880 #define uaccess_try do { \
19881 current_thread_info()->uaccess_err = 0; \
19882+ pax_open_userland(); \
19883 stac(); \
19884 barrier();
19885
19886 #define uaccess_catch(err) \
19887 clac(); \
19888+ pax_close_userland(); \
19889 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
19890 } while (0)
19891
19892@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
19893 * On error, the variable @x is set to zero.
19894 */
19895
19896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19897+#define __get_user(x, ptr) get_user((x), (ptr))
19898+#else
19899 #define __get_user(x, ptr) \
19900 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
19901+#endif
19902
19903 /**
19904 * __put_user: - Write a simple value into user space, with less checking.
19905@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
19906 * Returns zero on success, or -EFAULT on error.
19907 */
19908
19909+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19910+#define __put_user(x, ptr) put_user((x), (ptr))
19911+#else
19912 #define __put_user(x, ptr) \
19913 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
19914+#endif
19915
19916 #define __get_user_unaligned __get_user
19917 #define __put_user_unaligned __put_user
19918@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
19919 #define get_user_ex(x, ptr) do { \
19920 unsigned long __gue_val; \
19921 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
19922- (x) = (__force __typeof__(*(ptr)))__gue_val; \
19923+ (x) = (__typeof__(*(ptr)))__gue_val; \
19924 } while (0)
19925
19926 #define put_user_try uaccess_try
19927@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
19928 extern __must_check long strnlen_user(const char __user *str, long n);
19929
19930 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
19931-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
19932+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
19933
19934 extern void __cmpxchg_wrong_size(void)
19935 __compiletime_error("Bad argument size for cmpxchg");
19936@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
19937 __typeof__(ptr) __uval = (uval); \
19938 __typeof__(*(ptr)) __old = (old); \
19939 __typeof__(*(ptr)) __new = (new); \
19940+ pax_open_userland(); \
19941 switch (size) { \
19942 case 1: \
19943 { \
19944 asm volatile("\t" ASM_STAC "\n" \
19945- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
19946+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
19947 "2:\t" ASM_CLAC "\n" \
19948 "\t.section .fixup, \"ax\"\n" \
19949 "3:\tmov %3, %0\n" \
19950 "\tjmp 2b\n" \
19951 "\t.previous\n" \
19952 _ASM_EXTABLE(1b, 3b) \
19953- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19954+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19955 : "i" (-EFAULT), "q" (__new), "1" (__old) \
19956 : "memory" \
19957 ); \
19958@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
19959 case 2: \
19960 { \
19961 asm volatile("\t" ASM_STAC "\n" \
19962- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
19963+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
19964 "2:\t" ASM_CLAC "\n" \
19965 "\t.section .fixup, \"ax\"\n" \
19966 "3:\tmov %3, %0\n" \
19967 "\tjmp 2b\n" \
19968 "\t.previous\n" \
19969 _ASM_EXTABLE(1b, 3b) \
19970- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19971+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19972 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19973 : "memory" \
19974 ); \
19975@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
19976 case 4: \
19977 { \
19978 asm volatile("\t" ASM_STAC "\n" \
19979- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
19980+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
19981 "2:\t" ASM_CLAC "\n" \
19982 "\t.section .fixup, \"ax\"\n" \
19983 "3:\tmov %3, %0\n" \
19984 "\tjmp 2b\n" \
19985 "\t.previous\n" \
19986 _ASM_EXTABLE(1b, 3b) \
19987- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
19988+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
19989 : "i" (-EFAULT), "r" (__new), "1" (__old) \
19990 : "memory" \
19991 ); \
19992@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
19993 __cmpxchg_wrong_size(); \
19994 \
19995 asm volatile("\t" ASM_STAC "\n" \
19996- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
19997+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
19998 "2:\t" ASM_CLAC "\n" \
19999 "\t.section .fixup, \"ax\"\n" \
20000 "3:\tmov %3, %0\n" \
20001 "\tjmp 2b\n" \
20002 "\t.previous\n" \
20003 _ASM_EXTABLE(1b, 3b) \
20004- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20005+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20006 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20007 : "memory" \
20008 ); \
20009@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
20010 default: \
20011 __cmpxchg_wrong_size(); \
20012 } \
20013+ pax_close_userland(); \
20014 *__uval = __old; \
20015 __ret; \
20016 })
20017@@ -636,17 +715,6 @@ extern struct movsl_mask {
20018
20019 #define ARCH_HAS_NOCACHE_UACCESS 1
20020
20021-#ifdef CONFIG_X86_32
20022-# include <asm/uaccess_32.h>
20023-#else
20024-# include <asm/uaccess_64.h>
20025-#endif
20026-
20027-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20028- unsigned n);
20029-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20030- unsigned n);
20031-
20032 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20033 # define copy_user_diag __compiletime_error
20034 #else
20035@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20036 extern void copy_user_diag("copy_from_user() buffer size is too small")
20037 copy_from_user_overflow(void);
20038 extern void copy_user_diag("copy_to_user() buffer size is too small")
20039-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20040+copy_to_user_overflow(void);
20041
20042 #undef copy_user_diag
20043
20044@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20045
20046 extern void
20047 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20048-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20049+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20050 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20051
20052 #else
20053@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20054
20055 #endif
20056
20057+#ifdef CONFIG_X86_32
20058+# include <asm/uaccess_32.h>
20059+#else
20060+# include <asm/uaccess_64.h>
20061+#endif
20062+
20063 static inline unsigned long __must_check
20064 copy_from_user(void *to, const void __user *from, unsigned long n)
20065 {
20066- int sz = __compiletime_object_size(to);
20067+ size_t sz = __compiletime_object_size(to);
20068
20069 might_fault();
20070
20071@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20072 * case, and do only runtime checking for non-constant sizes.
20073 */
20074
20075- if (likely(sz < 0 || sz >= n))
20076- n = _copy_from_user(to, from, n);
20077- else if(__builtin_constant_p(n))
20078- copy_from_user_overflow();
20079- else
20080- __copy_from_user_overflow(sz, n);
20081+ if (likely(sz != (size_t)-1 && sz < n)) {
20082+ if(__builtin_constant_p(n))
20083+ copy_from_user_overflow();
20084+ else
20085+ __copy_from_user_overflow(sz, n);
20086+ } else if (access_ok(VERIFY_READ, from, n))
20087+ n = __copy_from_user(to, from, n);
20088+ else if ((long)n > 0)
20089+ memset(to, 0, n);
20090
20091 return n;
20092 }
20093@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20094 static inline unsigned long __must_check
20095 copy_to_user(void __user *to, const void *from, unsigned long n)
20096 {
20097- int sz = __compiletime_object_size(from);
20098+ size_t sz = __compiletime_object_size(from);
20099
20100 might_fault();
20101
20102 /* See the comment in copy_from_user() above. */
20103- if (likely(sz < 0 || sz >= n))
20104- n = _copy_to_user(to, from, n);
20105- else if(__builtin_constant_p(n))
20106- copy_to_user_overflow();
20107- else
20108- __copy_to_user_overflow(sz, n);
20109+ if (likely(sz != (size_t)-1 && sz < n)) {
20110+ if(__builtin_constant_p(n))
20111+ copy_to_user_overflow();
20112+ else
20113+ __copy_to_user_overflow(sz, n);
20114+ } else if (access_ok(VERIFY_WRITE, to, n))
20115+ n = __copy_to_user(to, from, n);
20116
20117 return n;
20118 }
20119diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20120index 3c03a5d..edb68ae 100644
20121--- a/arch/x86/include/asm/uaccess_32.h
20122+++ b/arch/x86/include/asm/uaccess_32.h
20123@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20124 * anything, so this is accurate.
20125 */
20126
20127-static __always_inline unsigned long __must_check
20128+static __always_inline __size_overflow(3) unsigned long __must_check
20129 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20130 {
20131+ if ((long)n < 0)
20132+ return n;
20133+
20134+ check_object_size(from, n, true);
20135+
20136 if (__builtin_constant_p(n)) {
20137 unsigned long ret;
20138
20139@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20140 __copy_to_user(void __user *to, const void *from, unsigned long n)
20141 {
20142 might_fault();
20143+
20144 return __copy_to_user_inatomic(to, from, n);
20145 }
20146
20147-static __always_inline unsigned long
20148+static __always_inline __size_overflow(3) unsigned long
20149 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20150 {
20151+ if ((long)n < 0)
20152+ return n;
20153+
20154 /* Avoid zeroing the tail if the copy fails..
20155 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20156 * but as the zeroing behaviour is only significant when n is not
20157@@ -137,6 +146,12 @@ static __always_inline unsigned long
20158 __copy_from_user(void *to, const void __user *from, unsigned long n)
20159 {
20160 might_fault();
20161+
20162+ if ((long)n < 0)
20163+ return n;
20164+
20165+ check_object_size(to, n, false);
20166+
20167 if (__builtin_constant_p(n)) {
20168 unsigned long ret;
20169
20170@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20171 const void __user *from, unsigned long n)
20172 {
20173 might_fault();
20174+
20175+ if ((long)n < 0)
20176+ return n;
20177+
20178 if (__builtin_constant_p(n)) {
20179 unsigned long ret;
20180
20181@@ -181,7 +200,10 @@ static __always_inline unsigned long
20182 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20183 unsigned long n)
20184 {
20185- return __copy_from_user_ll_nocache_nozero(to, from, n);
20186+ if ((long)n < 0)
20187+ return n;
20188+
20189+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20190 }
20191
20192 #endif /* _ASM_X86_UACCESS_32_H */
20193diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20194index 12a26b9..c36fff5 100644
20195--- a/arch/x86/include/asm/uaccess_64.h
20196+++ b/arch/x86/include/asm/uaccess_64.h
20197@@ -10,6 +10,9 @@
20198 #include <asm/alternative.h>
20199 #include <asm/cpufeature.h>
20200 #include <asm/page.h>
20201+#include <asm/pgtable.h>
20202+
20203+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20204
20205 /*
20206 * Copy To/From Userspace
20207@@ -23,8 +26,8 @@ copy_user_generic_string(void *to, const void *from, unsigned len);
20208 __must_check unsigned long
20209 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20210
20211-static __always_inline __must_check unsigned long
20212-copy_user_generic(void *to, const void *from, unsigned len)
20213+static __always_inline __must_check __size_overflow(3) unsigned long
20214+copy_user_generic(void *to, const void *from, unsigned long len)
20215 {
20216 unsigned ret;
20217
20218@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20219 }
20220
20221 __must_check unsigned long
20222-copy_in_user(void __user *to, const void __user *from, unsigned len);
20223+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20224
20225 static __always_inline __must_check
20226-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20227+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20228 {
20229- int ret = 0;
20230+ size_t sz = __compiletime_object_size(dst);
20231+ unsigned ret = 0;
20232+
20233+ if (size > INT_MAX)
20234+ return size;
20235+
20236+ check_object_size(dst, size, false);
20237+
20238+#ifdef CONFIG_PAX_MEMORY_UDEREF
20239+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20240+ return size;
20241+#endif
20242+
20243+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20244+ if(__builtin_constant_p(size))
20245+ copy_from_user_overflow();
20246+ else
20247+ __copy_from_user_overflow(sz, size);
20248+ return size;
20249+ }
20250
20251 if (!__builtin_constant_p(size))
20252- return copy_user_generic(dst, (__force void *)src, size);
20253+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20254 switch (size) {
20255- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20256+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20257 ret, "b", "b", "=q", 1);
20258 return ret;
20259- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20260+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20261 ret, "w", "w", "=r", 2);
20262 return ret;
20263- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20264+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20265 ret, "l", "k", "=r", 4);
20266 return ret;
20267- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20268+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20269 ret, "q", "", "=r", 8);
20270 return ret;
20271 case 10:
20272- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20273+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20274 ret, "q", "", "=r", 10);
20275 if (unlikely(ret))
20276 return ret;
20277 __get_user_asm(*(u16 *)(8 + (char *)dst),
20278- (u16 __user *)(8 + (char __user *)src),
20279+ (const u16 __user *)(8 + (const char __user *)src),
20280 ret, "w", "w", "=r", 2);
20281 return ret;
20282 case 16:
20283- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20284+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20285 ret, "q", "", "=r", 16);
20286 if (unlikely(ret))
20287 return ret;
20288 __get_user_asm(*(u64 *)(8 + (char *)dst),
20289- (u64 __user *)(8 + (char __user *)src),
20290+ (const u64 __user *)(8 + (const char __user *)src),
20291 ret, "q", "", "=r", 8);
20292 return ret;
20293 default:
20294- return copy_user_generic(dst, (__force void *)src, size);
20295+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20296 }
20297 }
20298
20299 static __always_inline __must_check
20300-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20301+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20302 {
20303 might_fault();
20304 return __copy_from_user_nocheck(dst, src, size);
20305 }
20306
20307 static __always_inline __must_check
20308-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20309+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20310 {
20311- int ret = 0;
20312+ size_t sz = __compiletime_object_size(src);
20313+ unsigned ret = 0;
20314+
20315+ if (size > INT_MAX)
20316+ return size;
20317+
20318+ check_object_size(src, size, true);
20319+
20320+#ifdef CONFIG_PAX_MEMORY_UDEREF
20321+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20322+ return size;
20323+#endif
20324+
20325+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20326+ if(__builtin_constant_p(size))
20327+ copy_to_user_overflow();
20328+ else
20329+ __copy_to_user_overflow(sz, size);
20330+ return size;
20331+ }
20332
20333 if (!__builtin_constant_p(size))
20334- return copy_user_generic((__force void *)dst, src, size);
20335+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20336 switch (size) {
20337- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20338+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20339 ret, "b", "b", "iq", 1);
20340 return ret;
20341- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
20342+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
20343 ret, "w", "w", "ir", 2);
20344 return ret;
20345- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
20346+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
20347 ret, "l", "k", "ir", 4);
20348 return ret;
20349- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
20350+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20351 ret, "q", "", "er", 8);
20352 return ret;
20353 case 10:
20354- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20355+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20356 ret, "q", "", "er", 10);
20357 if (unlikely(ret))
20358 return ret;
20359 asm("":::"memory");
20360- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
20361+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
20362 ret, "w", "w", "ir", 2);
20363 return ret;
20364 case 16:
20365- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
20366+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
20367 ret, "q", "", "er", 16);
20368 if (unlikely(ret))
20369 return ret;
20370 asm("":::"memory");
20371- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
20372+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
20373 ret, "q", "", "er", 8);
20374 return ret;
20375 default:
20376- return copy_user_generic((__force void *)dst, src, size);
20377+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20378 }
20379 }
20380
20381 static __always_inline __must_check
20382-int __copy_to_user(void __user *dst, const void *src, unsigned size)
20383+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
20384 {
20385 might_fault();
20386 return __copy_to_user_nocheck(dst, src, size);
20387 }
20388
20389 static __always_inline __must_check
20390-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20391+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20392 {
20393- int ret = 0;
20394+ unsigned ret = 0;
20395
20396 might_fault();
20397+
20398+ if (size > INT_MAX)
20399+ return size;
20400+
20401+#ifdef CONFIG_PAX_MEMORY_UDEREF
20402+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20403+ return size;
20404+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20405+ return size;
20406+#endif
20407+
20408 if (!__builtin_constant_p(size))
20409- return copy_user_generic((__force void *)dst,
20410- (__force void *)src, size);
20411+ return copy_user_generic((__force_kernel void *)____m(dst),
20412+ (__force_kernel const void *)____m(src), size);
20413 switch (size) {
20414 case 1: {
20415 u8 tmp;
20416- __get_user_asm(tmp, (u8 __user *)src,
20417+ __get_user_asm(tmp, (const u8 __user *)src,
20418 ret, "b", "b", "=q", 1);
20419 if (likely(!ret))
20420 __put_user_asm(tmp, (u8 __user *)dst,
20421@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20422 }
20423 case 2: {
20424 u16 tmp;
20425- __get_user_asm(tmp, (u16 __user *)src,
20426+ __get_user_asm(tmp, (const u16 __user *)src,
20427 ret, "w", "w", "=r", 2);
20428 if (likely(!ret))
20429 __put_user_asm(tmp, (u16 __user *)dst,
20430@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20431
20432 case 4: {
20433 u32 tmp;
20434- __get_user_asm(tmp, (u32 __user *)src,
20435+ __get_user_asm(tmp, (const u32 __user *)src,
20436 ret, "l", "k", "=r", 4);
20437 if (likely(!ret))
20438 __put_user_asm(tmp, (u32 __user *)dst,
20439@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20440 }
20441 case 8: {
20442 u64 tmp;
20443- __get_user_asm(tmp, (u64 __user *)src,
20444+ __get_user_asm(tmp, (const u64 __user *)src,
20445 ret, "q", "", "=r", 8);
20446 if (likely(!ret))
20447 __put_user_asm(tmp, (u64 __user *)dst,
20448@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
20449 return ret;
20450 }
20451 default:
20452- return copy_user_generic((__force void *)dst,
20453- (__force void *)src, size);
20454+ return copy_user_generic((__force_kernel void *)____m(dst),
20455+ (__force_kernel const void *)____m(src), size);
20456 }
20457 }
20458
20459-static __must_check __always_inline int
20460-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
20461+static __must_check __always_inline unsigned long
20462+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
20463 {
20464 return __copy_from_user_nocheck(dst, src, size);
20465 }
20466
20467-static __must_check __always_inline int
20468-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
20469+static __must_check __always_inline unsigned long
20470+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
20471 {
20472 return __copy_to_user_nocheck(dst, src, size);
20473 }
20474
20475-extern long __copy_user_nocache(void *dst, const void __user *src,
20476- unsigned size, int zerorest);
20477+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
20478+ unsigned long size, int zerorest);
20479
20480-static inline int
20481-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
20482+static inline unsigned long
20483+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
20484 {
20485 might_fault();
20486+
20487+ if (size > INT_MAX)
20488+ return size;
20489+
20490+#ifdef CONFIG_PAX_MEMORY_UDEREF
20491+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20492+ return size;
20493+#endif
20494+
20495 return __copy_user_nocache(dst, src, size, 1);
20496 }
20497
20498-static inline int
20499+static inline unsigned long
20500 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
20501- unsigned size)
20502+ unsigned long size)
20503 {
20504+ if (size > INT_MAX)
20505+ return size;
20506+
20507+#ifdef CONFIG_PAX_MEMORY_UDEREF
20508+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20509+ return size;
20510+#endif
20511+
20512 return __copy_user_nocache(dst, src, size, 0);
20513 }
20514
20515 unsigned long
20516-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
20517+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
20518
20519 #endif /* _ASM_X86_UACCESS_64_H */
20520diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
20521index 5b238981..77fdd78 100644
20522--- a/arch/x86/include/asm/word-at-a-time.h
20523+++ b/arch/x86/include/asm/word-at-a-time.h
20524@@ -11,7 +11,7 @@
20525 * and shift, for example.
20526 */
20527 struct word_at_a_time {
20528- const unsigned long one_bits, high_bits;
20529+ unsigned long one_bits, high_bits;
20530 };
20531
20532 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
20533diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
20534index f58a9c7..dc378042a 100644
20535--- a/arch/x86/include/asm/x86_init.h
20536+++ b/arch/x86/include/asm/x86_init.h
20537@@ -129,7 +129,7 @@ struct x86_init_ops {
20538 struct x86_init_timers timers;
20539 struct x86_init_iommu iommu;
20540 struct x86_init_pci pci;
20541-};
20542+} __no_const;
20543
20544 /**
20545 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
20546@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
20547 void (*setup_percpu_clockev)(void);
20548 void (*early_percpu_clock_init)(void);
20549 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
20550-};
20551+} __no_const;
20552
20553 struct timespec;
20554
20555@@ -168,7 +168,7 @@ struct x86_platform_ops {
20556 void (*save_sched_clock_state)(void);
20557 void (*restore_sched_clock_state)(void);
20558 void (*apic_post_init)(void);
20559-};
20560+} __no_const;
20561
20562 struct pci_dev;
20563 struct msi_msg;
20564@@ -182,7 +182,7 @@ struct x86_msi_ops {
20565 void (*teardown_msi_irqs)(struct pci_dev *dev);
20566 void (*restore_msi_irqs)(struct pci_dev *dev);
20567 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
20568-};
20569+} __no_const;
20570
20571 struct IO_APIC_route_entry;
20572 struct io_apic_irq_attr;
20573@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
20574 unsigned int destination, int vector,
20575 struct io_apic_irq_attr *attr);
20576 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
20577-};
20578+} __no_const;
20579
20580 extern struct x86_init_ops x86_init;
20581 extern struct x86_cpuinit_ops x86_cpuinit;
20582diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
20583index 5eea099..ff7ef8d 100644
20584--- a/arch/x86/include/asm/xen/page.h
20585+++ b/arch/x86/include/asm/xen/page.h
20586@@ -83,7 +83,7 @@ static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
20587 * - get_phys_to_machine() is to be called by __pfn_to_mfn() only in special
20588 * cases needing an extended handling.
20589 */
20590-static inline unsigned long __pfn_to_mfn(unsigned long pfn)
20591+static inline unsigned long __intentional_overflow(-1) __pfn_to_mfn(unsigned long pfn)
20592 {
20593 unsigned long mfn;
20594
20595diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
20596index c9a6d68..cb57f42 100644
20597--- a/arch/x86/include/asm/xsave.h
20598+++ b/arch/x86/include/asm/xsave.h
20599@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20600 if (unlikely(err))
20601 return -EFAULT;
20602
20603+ pax_open_userland();
20604 __asm__ __volatile__(ASM_STAC "\n"
20605- "1:"XSAVE"\n"
20606+ "1:"
20607+ __copyuser_seg
20608+ XSAVE"\n"
20609 "2: " ASM_CLAC "\n"
20610 xstate_fault
20611 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
20612 : "memory");
20613+ pax_close_userland();
20614 return err;
20615 }
20616
20617@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
20618 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
20619 {
20620 int err = 0;
20621- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
20622+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
20623 u32 lmask = mask;
20624 u32 hmask = mask >> 32;
20625
20626+ pax_open_userland();
20627 __asm__ __volatile__(ASM_STAC "\n"
20628- "1:"XRSTOR"\n"
20629+ "1:"
20630+ __copyuser_seg
20631+ XRSTOR"\n"
20632 "2: " ASM_CLAC "\n"
20633 xstate_fault
20634 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
20635 : "memory"); /* memory required? */
20636+ pax_close_userland();
20637 return err;
20638 }
20639
20640diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
20641index d993e33..8db1b18 100644
20642--- a/arch/x86/include/uapi/asm/e820.h
20643+++ b/arch/x86/include/uapi/asm/e820.h
20644@@ -58,7 +58,7 @@ struct e820map {
20645 #define ISA_START_ADDRESS 0xa0000
20646 #define ISA_END_ADDRESS 0x100000
20647
20648-#define BIOS_BEGIN 0x000a0000
20649+#define BIOS_BEGIN 0x000c0000
20650 #define BIOS_END 0x00100000
20651
20652 #define BIOS_ROM_BASE 0xffe00000
20653diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
20654index 7b0a55a..ad115bf 100644
20655--- a/arch/x86/include/uapi/asm/ptrace-abi.h
20656+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
20657@@ -49,7 +49,6 @@
20658 #define EFLAGS 144
20659 #define RSP 152
20660 #define SS 160
20661-#define ARGOFFSET R11
20662 #endif /* __ASSEMBLY__ */
20663
20664 /* top of stack page */
20665diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
20666index 5d4502c..a567e09 100644
20667--- a/arch/x86/kernel/Makefile
20668+++ b/arch/x86/kernel/Makefile
20669@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
20670 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
20671 obj-$(CONFIG_IRQ_WORK) += irq_work.o
20672 obj-y += probe_roms.o
20673-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
20674+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
20675 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
20676 obj-$(CONFIG_X86_64) += mcount_64.o
20677 obj-y += syscall_$(BITS).o vsyscall_gtod.o
20678diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
20679index b5ddc96..490b4e4 100644
20680--- a/arch/x86/kernel/acpi/boot.c
20681+++ b/arch/x86/kernel/acpi/boot.c
20682@@ -1351,7 +1351,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
20683 * If your system is blacklisted here, but you find that acpi=force
20684 * works for you, please contact linux-acpi@vger.kernel.org
20685 */
20686-static struct dmi_system_id __initdata acpi_dmi_table[] = {
20687+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
20688 /*
20689 * Boxes that need ACPI disabled
20690 */
20691@@ -1426,7 +1426,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
20692 };
20693
20694 /* second table for DMI checks that should run after early-quirks */
20695-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
20696+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
20697 /*
20698 * HP laptops which use a DSDT reporting as HP/SB400/10000,
20699 * which includes some code which overrides all temperature
20700diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
20701index 3136820..e2c6577 100644
20702--- a/arch/x86/kernel/acpi/sleep.c
20703+++ b/arch/x86/kernel/acpi/sleep.c
20704@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
20705 #else /* CONFIG_64BIT */
20706 #ifdef CONFIG_SMP
20707 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
20708+
20709+ pax_open_kernel();
20710 early_gdt_descr.address =
20711 (unsigned long)get_cpu_gdt_table(smp_processor_id());
20712+ pax_close_kernel();
20713+
20714 initial_gs = per_cpu_offset(smp_processor_id());
20715 #endif
20716 initial_code = (unsigned long)wakeup_long64;
20717diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
20718index 665c6b7..eae4d56 100644
20719--- a/arch/x86/kernel/acpi/wakeup_32.S
20720+++ b/arch/x86/kernel/acpi/wakeup_32.S
20721@@ -29,13 +29,11 @@ wakeup_pmode_return:
20722 # and restore the stack ... but you need gdt for this to work
20723 movl saved_context_esp, %esp
20724
20725- movl %cs:saved_magic, %eax
20726- cmpl $0x12345678, %eax
20727+ cmpl $0x12345678, saved_magic
20728 jne bogus_magic
20729
20730 # jump to place where we left off
20731- movl saved_eip, %eax
20732- jmp *%eax
20733+ jmp *(saved_eip)
20734
20735 bogus_magic:
20736 jmp bogus_magic
20737diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
20738index 703130f..27a155d 100644
20739--- a/arch/x86/kernel/alternative.c
20740+++ b/arch/x86/kernel/alternative.c
20741@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20742 */
20743 for (a = start; a < end; a++) {
20744 instr = (u8 *)&a->instr_offset + a->instr_offset;
20745+
20746+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20747+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20748+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20749+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20750+#endif
20751+
20752 replacement = (u8 *)&a->repl_offset + a->repl_offset;
20753 BUG_ON(a->replacementlen > a->instrlen);
20754 BUG_ON(a->instrlen > sizeof(insnbuf));
20755@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
20756 add_nops(insnbuf + a->replacementlen,
20757 a->instrlen - a->replacementlen);
20758
20759+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20760+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
20761+ instr = ktva_ktla(instr);
20762+#endif
20763+
20764 text_poke_early(instr, insnbuf, a->instrlen);
20765 }
20766 }
20767@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
20768 for (poff = start; poff < end; poff++) {
20769 u8 *ptr = (u8 *)poff + *poff;
20770
20771+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20772+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20773+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20774+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20775+#endif
20776+
20777 if (!*poff || ptr < text || ptr >= text_end)
20778 continue;
20779 /* turn DS segment override prefix into lock prefix */
20780- if (*ptr == 0x3e)
20781+ if (*ktla_ktva(ptr) == 0x3e)
20782 text_poke(ptr, ((unsigned char []){0xf0}), 1);
20783 }
20784 mutex_unlock(&text_mutex);
20785@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
20786 for (poff = start; poff < end; poff++) {
20787 u8 *ptr = (u8 *)poff + *poff;
20788
20789+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20790+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20791+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
20792+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
20793+#endif
20794+
20795 if (!*poff || ptr < text || ptr >= text_end)
20796 continue;
20797 /* turn lock prefix into DS segment override prefix */
20798- if (*ptr == 0xf0)
20799+ if (*ktla_ktva(ptr) == 0xf0)
20800 text_poke(ptr, ((unsigned char []){0x3E}), 1);
20801 }
20802 mutex_unlock(&text_mutex);
20803@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
20804
20805 BUG_ON(p->len > MAX_PATCH_LEN);
20806 /* prep the buffer with the original instructions */
20807- memcpy(insnbuf, p->instr, p->len);
20808+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
20809 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
20810 (unsigned long)p->instr, p->len);
20811
20812@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
20813 if (!uniproc_patched || num_possible_cpus() == 1)
20814 free_init_pages("SMP alternatives",
20815 (unsigned long)__smp_locks,
20816- (unsigned long)__smp_locks_end);
20817+ PAGE_ALIGN((unsigned long)__smp_locks_end));
20818 #endif
20819
20820 apply_paravirt(__parainstructions, __parainstructions_end);
20821@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
20822 * instructions. And on the local CPU you need to be protected again NMI or MCE
20823 * handlers seeing an inconsistent instruction while you patch.
20824 */
20825-void *__init_or_module text_poke_early(void *addr, const void *opcode,
20826+void *__kprobes text_poke_early(void *addr, const void *opcode,
20827 size_t len)
20828 {
20829 unsigned long flags;
20830 local_irq_save(flags);
20831- memcpy(addr, opcode, len);
20832+
20833+ pax_open_kernel();
20834+ memcpy(ktla_ktva(addr), opcode, len);
20835 sync_core();
20836+ pax_close_kernel();
20837+
20838 local_irq_restore(flags);
20839 /* Could also do a CLFLUSH here to speed up CPU recovery; but
20840 that causes hangs on some VIA CPUs. */
20841@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
20842 */
20843 void *text_poke(void *addr, const void *opcode, size_t len)
20844 {
20845- unsigned long flags;
20846- char *vaddr;
20847+ unsigned char *vaddr = ktla_ktva(addr);
20848 struct page *pages[2];
20849- int i;
20850+ size_t i;
20851
20852 if (!core_kernel_text((unsigned long)addr)) {
20853- pages[0] = vmalloc_to_page(addr);
20854- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
20855+ pages[0] = vmalloc_to_page(vaddr);
20856+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
20857 } else {
20858- pages[0] = virt_to_page(addr);
20859+ pages[0] = virt_to_page(vaddr);
20860 WARN_ON(!PageReserved(pages[0]));
20861- pages[1] = virt_to_page(addr + PAGE_SIZE);
20862+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
20863 }
20864 BUG_ON(!pages[0]);
20865- local_irq_save(flags);
20866- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
20867- if (pages[1])
20868- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
20869- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
20870- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
20871- clear_fixmap(FIX_TEXT_POKE0);
20872- if (pages[1])
20873- clear_fixmap(FIX_TEXT_POKE1);
20874- local_flush_tlb();
20875- sync_core();
20876- /* Could also do a CLFLUSH here to speed up CPU recovery; but
20877- that causes hangs on some VIA CPUs. */
20878+ text_poke_early(addr, opcode, len);
20879 for (i = 0; i < len; i++)
20880- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
20881- local_irq_restore(flags);
20882+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
20883 return addr;
20884 }
20885
20886@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
20887 if (likely(!bp_patching_in_progress))
20888 return 0;
20889
20890- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
20891+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
20892 return 0;
20893
20894 /* set up the specified breakpoint handler */
20895@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
20896 */
20897 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
20898 {
20899- unsigned char int3 = 0xcc;
20900+ const unsigned char int3 = 0xcc;
20901
20902 bp_int3_handler = handler;
20903 bp_int3_addr = (u8 *)addr + sizeof(int3);
20904diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
20905index 29b5b18..3bdfc29 100644
20906--- a/arch/x86/kernel/apic/apic.c
20907+++ b/arch/x86/kernel/apic/apic.c
20908@@ -201,7 +201,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
20909 /*
20910 * Debug level, exported for io_apic.c
20911 */
20912-unsigned int apic_verbosity;
20913+int apic_verbosity;
20914
20915 int pic_mode;
20916
20917@@ -1991,7 +1991,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
20918 apic_write(APIC_ESR, 0);
20919 v = apic_read(APIC_ESR);
20920 ack_APIC_irq();
20921- atomic_inc(&irq_err_count);
20922+ atomic_inc_unchecked(&irq_err_count);
20923
20924 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
20925 smp_processor_id(), v);
20926diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
20927index de918c4..32eed23 100644
20928--- a/arch/x86/kernel/apic/apic_flat_64.c
20929+++ b/arch/x86/kernel/apic/apic_flat_64.c
20930@@ -154,7 +154,7 @@ static int flat_probe(void)
20931 return 1;
20932 }
20933
20934-static struct apic apic_flat = {
20935+static struct apic apic_flat __read_only = {
20936 .name = "flat",
20937 .probe = flat_probe,
20938 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
20939@@ -260,7 +260,7 @@ static int physflat_probe(void)
20940 return 0;
20941 }
20942
20943-static struct apic apic_physflat = {
20944+static struct apic apic_physflat __read_only = {
20945
20946 .name = "physical flat",
20947 .probe = physflat_probe,
20948diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
20949index b205cdb..d8503ff 100644
20950--- a/arch/x86/kernel/apic/apic_noop.c
20951+++ b/arch/x86/kernel/apic/apic_noop.c
20952@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
20953 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
20954 }
20955
20956-struct apic apic_noop = {
20957+struct apic apic_noop __read_only = {
20958 .name = "noop",
20959 .probe = noop_probe,
20960 .acpi_madt_oem_check = NULL,
20961diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
20962index c4a8d63..fe893ac 100644
20963--- a/arch/x86/kernel/apic/bigsmp_32.c
20964+++ b/arch/x86/kernel/apic/bigsmp_32.c
20965@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
20966 return dmi_bigsmp;
20967 }
20968
20969-static struct apic apic_bigsmp = {
20970+static struct apic apic_bigsmp __read_only = {
20971
20972 .name = "bigsmp",
20973 .probe = probe_bigsmp,
20974diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
20975index 3f5f604..309c0e6 100644
20976--- a/arch/x86/kernel/apic/io_apic.c
20977+++ b/arch/x86/kernel/apic/io_apic.c
20978@@ -1859,7 +1859,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
20979 return ret;
20980 }
20981
20982-atomic_t irq_mis_count;
20983+atomic_unchecked_t irq_mis_count;
20984
20985 #ifdef CONFIG_GENERIC_PENDING_IRQ
20986 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
20987@@ -2000,7 +2000,7 @@ static void ack_ioapic_level(struct irq_data *data)
20988 * at the cpu.
20989 */
20990 if (!(v & (1 << (i & 0x1f)))) {
20991- atomic_inc(&irq_mis_count);
20992+ atomic_inc_unchecked(&irq_mis_count);
20993
20994 eoi_ioapic_irq(irq, cfg);
20995 }
20996diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
20997index bda4886..f9c7195 100644
20998--- a/arch/x86/kernel/apic/probe_32.c
20999+++ b/arch/x86/kernel/apic/probe_32.c
21000@@ -72,7 +72,7 @@ static int probe_default(void)
21001 return 1;
21002 }
21003
21004-static struct apic apic_default = {
21005+static struct apic apic_default __read_only = {
21006
21007 .name = "default",
21008 .probe = probe_default,
21009diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
21010index 6cedd79..023ff8e 100644
21011--- a/arch/x86/kernel/apic/vector.c
21012+++ b/arch/x86/kernel/apic/vector.c
21013@@ -21,7 +21,7 @@
21014
21015 static DEFINE_RAW_SPINLOCK(vector_lock);
21016
21017-void lock_vector_lock(void)
21018+void lock_vector_lock(void) __acquires(vector_lock)
21019 {
21020 /* Used to the online set of cpus does not change
21021 * during assign_irq_vector.
21022@@ -29,7 +29,7 @@ void lock_vector_lock(void)
21023 raw_spin_lock(&vector_lock);
21024 }
21025
21026-void unlock_vector_lock(void)
21027+void unlock_vector_lock(void) __releases(vector_lock)
21028 {
21029 raw_spin_unlock(&vector_lock);
21030 }
21031diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21032index e658f21..b695a1a 100644
21033--- a/arch/x86/kernel/apic/x2apic_cluster.c
21034+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21035@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21036 return notifier_from_errno(err);
21037 }
21038
21039-static struct notifier_block __refdata x2apic_cpu_notifier = {
21040+static struct notifier_block x2apic_cpu_notifier = {
21041 .notifier_call = update_clusterinfo,
21042 };
21043
21044@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21045 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21046 }
21047
21048-static struct apic apic_x2apic_cluster = {
21049+static struct apic apic_x2apic_cluster __read_only = {
21050
21051 .name = "cluster x2apic",
21052 .probe = x2apic_cluster_probe,
21053diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21054index 6fae733..5ca17af 100644
21055--- a/arch/x86/kernel/apic/x2apic_phys.c
21056+++ b/arch/x86/kernel/apic/x2apic_phys.c
21057@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21058 return apic == &apic_x2apic_phys;
21059 }
21060
21061-static struct apic apic_x2apic_phys = {
21062+static struct apic apic_x2apic_phys __read_only = {
21063
21064 .name = "physical x2apic",
21065 .probe = x2apic_phys_probe,
21066diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21067index 8e9dcfd..c61b3e4 100644
21068--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21069+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21070@@ -348,7 +348,7 @@ static int uv_probe(void)
21071 return apic == &apic_x2apic_uv_x;
21072 }
21073
21074-static struct apic __refdata apic_x2apic_uv_x = {
21075+static struct apic apic_x2apic_uv_x __read_only = {
21076
21077 .name = "UV large system",
21078 .probe = uv_probe,
21079diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21080index 927ec92..0dc3bd4 100644
21081--- a/arch/x86/kernel/apm_32.c
21082+++ b/arch/x86/kernel/apm_32.c
21083@@ -432,7 +432,7 @@ static DEFINE_MUTEX(apm_mutex);
21084 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21085 * even though they are called in protected mode.
21086 */
21087-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21088+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21089 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21090
21091 static const char driver_version[] = "1.16ac"; /* no spaces */
21092@@ -610,7 +610,10 @@ static long __apm_bios_call(void *_call)
21093 BUG_ON(cpu != 0);
21094 gdt = get_cpu_gdt_table(cpu);
21095 save_desc_40 = gdt[0x40 / 8];
21096+
21097+ pax_open_kernel();
21098 gdt[0x40 / 8] = bad_bios_desc;
21099+ pax_close_kernel();
21100
21101 apm_irq_save(flags);
21102 APM_DO_SAVE_SEGS;
21103@@ -619,7 +622,11 @@ static long __apm_bios_call(void *_call)
21104 &call->esi);
21105 APM_DO_RESTORE_SEGS;
21106 apm_irq_restore(flags);
21107+
21108+ pax_open_kernel();
21109 gdt[0x40 / 8] = save_desc_40;
21110+ pax_close_kernel();
21111+
21112 put_cpu();
21113
21114 return call->eax & 0xff;
21115@@ -686,7 +693,10 @@ static long __apm_bios_call_simple(void *_call)
21116 BUG_ON(cpu != 0);
21117 gdt = get_cpu_gdt_table(cpu);
21118 save_desc_40 = gdt[0x40 / 8];
21119+
21120+ pax_open_kernel();
21121 gdt[0x40 / 8] = bad_bios_desc;
21122+ pax_close_kernel();
21123
21124 apm_irq_save(flags);
21125 APM_DO_SAVE_SEGS;
21126@@ -694,7 +704,11 @@ static long __apm_bios_call_simple(void *_call)
21127 &call->eax);
21128 APM_DO_RESTORE_SEGS;
21129 apm_irq_restore(flags);
21130+
21131+ pax_open_kernel();
21132 gdt[0x40 / 8] = save_desc_40;
21133+ pax_close_kernel();
21134+
21135 put_cpu();
21136 return error;
21137 }
21138@@ -2349,12 +2363,15 @@ static int __init apm_init(void)
21139 * code to that CPU.
21140 */
21141 gdt = get_cpu_gdt_table(0);
21142+
21143+ pax_open_kernel();
21144 set_desc_base(&gdt[APM_CS >> 3],
21145 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21146 set_desc_base(&gdt[APM_CS_16 >> 3],
21147 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21148 set_desc_base(&gdt[APM_DS >> 3],
21149 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21150+ pax_close_kernel();
21151
21152 proc_create("apm", 0, NULL, &apm_file_ops);
21153
21154diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21155index 9f6b934..cf5ffb3 100644
21156--- a/arch/x86/kernel/asm-offsets.c
21157+++ b/arch/x86/kernel/asm-offsets.c
21158@@ -32,6 +32,8 @@ void common(void) {
21159 OFFSET(TI_flags, thread_info, flags);
21160 OFFSET(TI_status, thread_info, status);
21161 OFFSET(TI_addr_limit, thread_info, addr_limit);
21162+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21163+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21164
21165 BLANK();
21166 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21167@@ -52,8 +54,26 @@ void common(void) {
21168 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21169 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21170 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21171+
21172+#ifdef CONFIG_PAX_KERNEXEC
21173+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21174 #endif
21175
21176+#ifdef CONFIG_PAX_MEMORY_UDEREF
21177+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21178+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21179+#ifdef CONFIG_X86_64
21180+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21181+#endif
21182+#endif
21183+
21184+#endif
21185+
21186+ BLANK();
21187+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21188+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21189+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21190+
21191 #ifdef CONFIG_XEN
21192 BLANK();
21193 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21194diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21195index fdcbb4d..036dd93 100644
21196--- a/arch/x86/kernel/asm-offsets_64.c
21197+++ b/arch/x86/kernel/asm-offsets_64.c
21198@@ -80,6 +80,7 @@ int main(void)
21199 BLANK();
21200 #undef ENTRY
21201
21202+ DEFINE(TSS_size, sizeof(struct tss_struct));
21203 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21204 BLANK();
21205
21206diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21207index 80091ae..0c5184f 100644
21208--- a/arch/x86/kernel/cpu/Makefile
21209+++ b/arch/x86/kernel/cpu/Makefile
21210@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21211 CFLAGS_REMOVE_perf_event.o = -pg
21212 endif
21213
21214-# Make sure load_percpu_segment has no stackprotector
21215-nostackp := $(call cc-option, -fno-stack-protector)
21216-CFLAGS_common.o := $(nostackp)
21217-
21218 obj-y := intel_cacheinfo.o scattered.o topology.o
21219 obj-y += common.o
21220 obj-y += rdrand.o
21221diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21222index 15c5df9..d9a604a 100644
21223--- a/arch/x86/kernel/cpu/amd.c
21224+++ b/arch/x86/kernel/cpu/amd.c
21225@@ -717,7 +717,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21226 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21227 {
21228 /* AMD errata T13 (order #21922) */
21229- if ((c->x86 == 6)) {
21230+ if (c->x86 == 6) {
21231 /* Duron Rev A0 */
21232 if (c->x86_model == 3 && c->x86_mask == 0)
21233 size = 64;
21234diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21235index c604965..1558f4a 100644
21236--- a/arch/x86/kernel/cpu/common.c
21237+++ b/arch/x86/kernel/cpu/common.c
21238@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21239
21240 static const struct cpu_dev *this_cpu = &default_cpu;
21241
21242-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21243-#ifdef CONFIG_X86_64
21244- /*
21245- * We need valid kernel segments for data and code in long mode too
21246- * IRET will check the segment types kkeil 2000/10/28
21247- * Also sysret mandates a special GDT layout
21248- *
21249- * TLS descriptors are currently at a different place compared to i386.
21250- * Hopefully nobody expects them at a fixed place (Wine?)
21251- */
21252- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21253- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21254- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21255- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21256- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21257- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21258-#else
21259- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21260- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21261- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21262- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21263- /*
21264- * Segments used for calling PnP BIOS have byte granularity.
21265- * They code segments and data segments have fixed 64k limits,
21266- * the transfer segment sizes are set at run time.
21267- */
21268- /* 32-bit code */
21269- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21270- /* 16-bit code */
21271- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21272- /* 16-bit data */
21273- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21274- /* 16-bit data */
21275- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21276- /* 16-bit data */
21277- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21278- /*
21279- * The APM segments have byte granularity and their bases
21280- * are set at run time. All have 64k limits.
21281- */
21282- /* 32-bit code */
21283- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21284- /* 16-bit code */
21285- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21286- /* data */
21287- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21288-
21289- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21290- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21291- GDT_STACK_CANARY_INIT
21292-#endif
21293-} };
21294-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21295-
21296 static int __init x86_xsave_setup(char *s)
21297 {
21298 if (strlen(s))
21299@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21300 }
21301 }
21302
21303+#ifdef CONFIG_X86_64
21304+static __init int setup_disable_pcid(char *arg)
21305+{
21306+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21307+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21308+
21309+#ifdef CONFIG_PAX_MEMORY_UDEREF
21310+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21311+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21312+#endif
21313+
21314+ return 1;
21315+}
21316+__setup("nopcid", setup_disable_pcid);
21317+
21318+static void setup_pcid(struct cpuinfo_x86 *c)
21319+{
21320+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21321+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21322+
21323+#ifdef CONFIG_PAX_MEMORY_UDEREF
21324+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21325+ pax_open_kernel();
21326+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21327+ pax_close_kernel();
21328+ printk("PAX: slow and weak UDEREF enabled\n");
21329+ } else
21330+ printk("PAX: UDEREF disabled\n");
21331+#endif
21332+
21333+ return;
21334+ }
21335+
21336+ printk("PAX: PCID detected\n");
21337+ set_in_cr4(X86_CR4_PCIDE);
21338+
21339+#ifdef CONFIG_PAX_MEMORY_UDEREF
21340+ pax_open_kernel();
21341+ clone_pgd_mask = ~(pgdval_t)0UL;
21342+ pax_close_kernel();
21343+ if (pax_user_shadow_base)
21344+ printk("PAX: weak UDEREF enabled\n");
21345+ else {
21346+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
21347+ printk("PAX: strong UDEREF enabled\n");
21348+ }
21349+#endif
21350+
21351+ if (cpu_has(c, X86_FEATURE_INVPCID))
21352+ printk("PAX: INVPCID detected\n");
21353+}
21354+#endif
21355+
21356 /*
21357 * Some CPU features depend on higher CPUID levels, which may not always
21358 * be available due to CPUID level capping or broken virtualization
21359@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
21360 {
21361 struct desc_ptr gdt_descr;
21362
21363- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
21364+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
21365 gdt_descr.size = GDT_SIZE - 1;
21366 load_gdt(&gdt_descr);
21367 /* Reload the per-cpu base */
21368@@ -895,6 +894,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
21369 setup_smep(c);
21370 setup_smap(c);
21371
21372+#ifdef CONFIG_X86_32
21373+#ifdef CONFIG_PAX_PAGEEXEC
21374+ if (!(__supported_pte_mask & _PAGE_NX))
21375+ clear_cpu_cap(c, X86_FEATURE_PSE);
21376+#endif
21377+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21378+ clear_cpu_cap(c, X86_FEATURE_SEP);
21379+#endif
21380+#endif
21381+
21382+#ifdef CONFIG_X86_64
21383+ setup_pcid(c);
21384+#endif
21385+
21386 /*
21387 * The vendor-specific functions might have changed features.
21388 * Now we do "generic changes."
21389@@ -977,7 +990,7 @@ static void syscall32_cpu_init(void)
21390 void enable_sep_cpu(void)
21391 {
21392 int cpu = get_cpu();
21393- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21394+ struct tss_struct *tss = init_tss + cpu;
21395
21396 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21397 put_cpu();
21398@@ -1115,14 +1128,16 @@ static __init int setup_disablecpuid(char *arg)
21399 }
21400 __setup("clearcpuid=", setup_disablecpuid);
21401
21402+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
21403+EXPORT_PER_CPU_SYMBOL(current_tinfo);
21404+
21405 DEFINE_PER_CPU(unsigned long, kernel_stack) =
21406- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
21407+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
21408 EXPORT_PER_CPU_SYMBOL(kernel_stack);
21409
21410 #ifdef CONFIG_X86_64
21411-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21412-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
21413- (unsigned long) debug_idt_table };
21414+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
21415+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
21416
21417 DEFINE_PER_CPU_FIRST(union irq_stack_union,
21418 irq_stack_union) __aligned(PAGE_SIZE) __visible;
21419@@ -1299,7 +1314,7 @@ void cpu_init(void)
21420 */
21421 load_ucode_ap();
21422
21423- t = &per_cpu(init_tss, cpu);
21424+ t = init_tss + cpu;
21425 oist = &per_cpu(orig_ist, cpu);
21426
21427 #ifdef CONFIG_NUMA
21428@@ -1331,7 +1346,6 @@ void cpu_init(void)
21429 wrmsrl(MSR_KERNEL_GS_BASE, 0);
21430 barrier();
21431
21432- x86_configure_nx();
21433 enable_x2apic();
21434
21435 /*
21436@@ -1383,7 +1397,7 @@ void cpu_init(void)
21437 {
21438 int cpu = smp_processor_id();
21439 struct task_struct *curr = current;
21440- struct tss_struct *t = &per_cpu(init_tss, cpu);
21441+ struct tss_struct *t = init_tss + cpu;
21442 struct thread_struct *thread = &curr->thread;
21443
21444 wait_for_master_cpu(cpu);
21445diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
21446index c703507..28535e3 100644
21447--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
21448+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
21449@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
21450 };
21451
21452 #ifdef CONFIG_AMD_NB
21453+static struct attribute *default_attrs_amd_nb[] = {
21454+ &type.attr,
21455+ &level.attr,
21456+ &coherency_line_size.attr,
21457+ &physical_line_partition.attr,
21458+ &ways_of_associativity.attr,
21459+ &number_of_sets.attr,
21460+ &size.attr,
21461+ &shared_cpu_map.attr,
21462+ &shared_cpu_list.attr,
21463+ NULL,
21464+ NULL,
21465+ NULL,
21466+ NULL
21467+};
21468+
21469 static struct attribute **amd_l3_attrs(void)
21470 {
21471 static struct attribute **attrs;
21472@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
21473
21474 n = ARRAY_SIZE(default_attrs);
21475
21476- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
21477- n += 2;
21478-
21479- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
21480- n += 1;
21481-
21482- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
21483- if (attrs == NULL)
21484- return attrs = default_attrs;
21485-
21486- for (n = 0; default_attrs[n]; n++)
21487- attrs[n] = default_attrs[n];
21488+ attrs = default_attrs_amd_nb;
21489
21490 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
21491 attrs[n++] = &cache_disable_0.attr;
21492@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
21493 .default_attrs = default_attrs,
21494 };
21495
21496+#ifdef CONFIG_AMD_NB
21497+static struct kobj_type ktype_cache_amd_nb = {
21498+ .sysfs_ops = &sysfs_ops,
21499+ .default_attrs = default_attrs_amd_nb,
21500+};
21501+#endif
21502+
21503 static struct kobj_type ktype_percpu_entry = {
21504 .sysfs_ops = &sysfs_ops,
21505 };
21506@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
21507 return retval;
21508 }
21509
21510+#ifdef CONFIG_AMD_NB
21511+ amd_l3_attrs();
21512+#endif
21513+
21514 for (i = 0; i < num_cache_leaves; i++) {
21515+ struct kobj_type *ktype;
21516+
21517 this_object = INDEX_KOBJECT_PTR(cpu, i);
21518 this_object->cpu = cpu;
21519 this_object->index = i;
21520
21521 this_leaf = CPUID4_INFO_IDX(cpu, i);
21522
21523- ktype_cache.default_attrs = default_attrs;
21524+ ktype = &ktype_cache;
21525 #ifdef CONFIG_AMD_NB
21526 if (this_leaf->base.nb)
21527- ktype_cache.default_attrs = amd_l3_attrs();
21528+ ktype = &ktype_cache_amd_nb;
21529 #endif
21530 retval = kobject_init_and_add(&(this_object->kobj),
21531- &ktype_cache,
21532+ ktype,
21533 per_cpu(ici_cache_kobject, cpu),
21534 "index%1lu", i);
21535 if (unlikely(retval)) {
21536diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
21537index d2c6116..62fd7aa 100644
21538--- a/arch/x86/kernel/cpu/mcheck/mce.c
21539+++ b/arch/x86/kernel/cpu/mcheck/mce.c
21540@@ -45,6 +45,7 @@
21541 #include <asm/processor.h>
21542 #include <asm/mce.h>
21543 #include <asm/msr.h>
21544+#include <asm/local.h>
21545
21546 #include "mce-internal.h"
21547
21548@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
21549 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
21550 m->cs, m->ip);
21551
21552- if (m->cs == __KERNEL_CS)
21553+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
21554 print_symbol("{%s}", m->ip);
21555 pr_cont("\n");
21556 }
21557@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
21558
21559 #define PANIC_TIMEOUT 5 /* 5 seconds */
21560
21561-static atomic_t mce_panicked;
21562+static atomic_unchecked_t mce_panicked;
21563
21564 static int fake_panic;
21565-static atomic_t mce_fake_panicked;
21566+static atomic_unchecked_t mce_fake_panicked;
21567
21568 /* Panic in progress. Enable interrupts and wait for final IPI */
21569 static void wait_for_panic(void)
21570@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21571 /*
21572 * Make sure only one CPU runs in machine check panic
21573 */
21574- if (atomic_inc_return(&mce_panicked) > 1)
21575+ if (atomic_inc_return_unchecked(&mce_panicked) > 1)
21576 wait_for_panic();
21577 barrier();
21578
21579@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21580 console_verbose();
21581 } else {
21582 /* Don't log too much for fake panic */
21583- if (atomic_inc_return(&mce_fake_panicked) > 1)
21584+ if (atomic_inc_return_unchecked(&mce_fake_panicked) > 1)
21585 return;
21586 }
21587 /* First print corrected ones that are still unlogged */
21588@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
21589 if (!fake_panic) {
21590 if (panic_timeout == 0)
21591 panic_timeout = mca_cfg.panic_timeout;
21592- panic(msg);
21593+ panic("%s", msg);
21594 } else
21595 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
21596 }
21597@@ -744,7 +745,7 @@ static int mce_timed_out(u64 *t)
21598 * might have been modified by someone else.
21599 */
21600 rmb();
21601- if (atomic_read(&mce_panicked))
21602+ if (atomic_read_unchecked(&mce_panicked))
21603 wait_for_panic();
21604 if (!mca_cfg.monarch_timeout)
21605 goto out;
21606@@ -1722,7 +1723,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
21607 }
21608
21609 /* Call the installed machine check handler for this CPU setup. */
21610-void (*machine_check_vector)(struct pt_regs *, long error_code) =
21611+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
21612 unexpected_machine_check;
21613
21614 /*
21615@@ -1745,7 +1746,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21616 return;
21617 }
21618
21619+ pax_open_kernel();
21620 machine_check_vector = do_machine_check;
21621+ pax_close_kernel();
21622
21623 __mcheck_cpu_init_generic();
21624 __mcheck_cpu_init_vendor(c);
21625@@ -1759,7 +1762,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
21626 */
21627
21628 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
21629-static int mce_chrdev_open_count; /* #times opened */
21630+static local_t mce_chrdev_open_count; /* #times opened */
21631 static int mce_chrdev_open_exclu; /* already open exclusive? */
21632
21633 static int mce_chrdev_open(struct inode *inode, struct file *file)
21634@@ -1767,7 +1770,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21635 spin_lock(&mce_chrdev_state_lock);
21636
21637 if (mce_chrdev_open_exclu ||
21638- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
21639+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
21640 spin_unlock(&mce_chrdev_state_lock);
21641
21642 return -EBUSY;
21643@@ -1775,7 +1778,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
21644
21645 if (file->f_flags & O_EXCL)
21646 mce_chrdev_open_exclu = 1;
21647- mce_chrdev_open_count++;
21648+ local_inc(&mce_chrdev_open_count);
21649
21650 spin_unlock(&mce_chrdev_state_lock);
21651
21652@@ -1786,7 +1789,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
21653 {
21654 spin_lock(&mce_chrdev_state_lock);
21655
21656- mce_chrdev_open_count--;
21657+ local_dec(&mce_chrdev_open_count);
21658 mce_chrdev_open_exclu = 0;
21659
21660 spin_unlock(&mce_chrdev_state_lock);
21661@@ -2461,7 +2464,7 @@ static __init void mce_init_banks(void)
21662
21663 for (i = 0; i < mca_cfg.banks; i++) {
21664 struct mce_bank *b = &mce_banks[i];
21665- struct device_attribute *a = &b->attr;
21666+ device_attribute_no_const *a = &b->attr;
21667
21668 sysfs_attr_init(&a->attr);
21669 a->attr.name = b->attrname;
21670@@ -2568,7 +2571,7 @@ struct dentry *mce_get_debugfs_dir(void)
21671 static void mce_reset(void)
21672 {
21673 cpu_missing = 0;
21674- atomic_set(&mce_fake_panicked, 0);
21675+ atomic_set_unchecked(&mce_fake_panicked, 0);
21676 atomic_set(&mce_executing, 0);
21677 atomic_set(&mce_callin, 0);
21678 atomic_set(&global_nwo, 0);
21679diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
21680index a304298..49b6d06 100644
21681--- a/arch/x86/kernel/cpu/mcheck/p5.c
21682+++ b/arch/x86/kernel/cpu/mcheck/p5.c
21683@@ -10,6 +10,7 @@
21684 #include <asm/processor.h>
21685 #include <asm/mce.h>
21686 #include <asm/msr.h>
21687+#include <asm/pgtable.h>
21688
21689 /* By default disabled */
21690 int mce_p5_enabled __read_mostly;
21691@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
21692 if (!cpu_has(c, X86_FEATURE_MCE))
21693 return;
21694
21695+ pax_open_kernel();
21696 machine_check_vector = pentium_machine_check;
21697+ pax_close_kernel();
21698 /* Make sure the vector pointer is visible before we enable MCEs: */
21699 wmb();
21700
21701diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
21702index 7dc5564..1273569 100644
21703--- a/arch/x86/kernel/cpu/mcheck/winchip.c
21704+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
21705@@ -9,6 +9,7 @@
21706 #include <asm/processor.h>
21707 #include <asm/mce.h>
21708 #include <asm/msr.h>
21709+#include <asm/pgtable.h>
21710
21711 /* Machine check handler for WinChip C6: */
21712 static void winchip_machine_check(struct pt_regs *regs, long error_code)
21713@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
21714 {
21715 u32 lo, hi;
21716
21717+ pax_open_kernel();
21718 machine_check_vector = winchip_machine_check;
21719+ pax_close_kernel();
21720 /* Make sure the vector pointer is visible before we enable MCEs: */
21721 wmb();
21722
21723diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
21724index 36a8361..e7058c2 100644
21725--- a/arch/x86/kernel/cpu/microcode/core.c
21726+++ b/arch/x86/kernel/cpu/microcode/core.c
21727@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21728 return NOTIFY_OK;
21729 }
21730
21731-static struct notifier_block __refdata mc_cpu_notifier = {
21732+static struct notifier_block mc_cpu_notifier = {
21733 .notifier_call = mc_cpu_callback,
21734 };
21735
21736diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
21737index c6826d1..8dc677e 100644
21738--- a/arch/x86/kernel/cpu/microcode/intel.c
21739+++ b/arch/x86/kernel/cpu/microcode/intel.c
21740@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
21741 struct microcode_header_intel mc_header;
21742 unsigned int mc_size;
21743
21744+ if (leftover < sizeof(mc_header)) {
21745+ pr_err("error! Truncated header in microcode data file\n");
21746+ break;
21747+ }
21748+
21749 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
21750 break;
21751
21752@@ -293,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21753
21754 static int get_ucode_user(void *to, const void *from, size_t n)
21755 {
21756- return copy_from_user(to, from, n);
21757+ return copy_from_user(to, (const void __force_user *)from, n);
21758 }
21759
21760 static enum ucode_state
21761 request_microcode_user(int cpu, const void __user *buf, size_t size)
21762 {
21763- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21764+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21765 }
21766
21767 static void microcode_fini_cpu(int cpu)
21768diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
21769index ec9df6f..420eb93 100644
21770--- a/arch/x86/kernel/cpu/microcode/intel_early.c
21771+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
21772@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
21773 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
21774 int i;
21775
21776- while (leftover) {
21777+ while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
21778+
21779+ if (leftover < sizeof(mc_header))
21780+ break;
21781+
21782 mc_header = (struct microcode_header_intel *)ucode_ptr;
21783
21784 mc_size = get_totalsize(mc_header);
21785diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
21786index ea5f363..cb0e905 100644
21787--- a/arch/x86/kernel/cpu/mtrr/main.c
21788+++ b/arch/x86/kernel/cpu/mtrr/main.c
21789@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
21790 u64 size_or_mask, size_and_mask;
21791 static bool mtrr_aps_delayed_init;
21792
21793-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
21794+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
21795
21796 const struct mtrr_ops *mtrr_if;
21797
21798diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
21799index df5e41f..816c719 100644
21800--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
21801+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
21802@@ -25,7 +25,7 @@ struct mtrr_ops {
21803 int (*validate_add_page)(unsigned long base, unsigned long size,
21804 unsigned int type);
21805 int (*have_wrcomb)(void);
21806-};
21807+} __do_const;
21808
21809 extern int generic_get_free_region(unsigned long base, unsigned long size,
21810 int replace_reg);
21811diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
21812index 143e5f5..5825081 100644
21813--- a/arch/x86/kernel/cpu/perf_event.c
21814+++ b/arch/x86/kernel/cpu/perf_event.c
21815@@ -1374,7 +1374,7 @@ static void __init pmu_check_apic(void)
21816
21817 }
21818
21819-static struct attribute_group x86_pmu_format_group = {
21820+static attribute_group_no_const x86_pmu_format_group = {
21821 .name = "format",
21822 .attrs = NULL,
21823 };
21824@@ -1473,7 +1473,7 @@ static struct attribute *events_attr[] = {
21825 NULL,
21826 };
21827
21828-static struct attribute_group x86_pmu_events_group = {
21829+static attribute_group_no_const x86_pmu_events_group = {
21830 .name = "events",
21831 .attrs = events_attr,
21832 };
21833@@ -1997,7 +1997,7 @@ static unsigned long get_segment_base(unsigned int segment)
21834 if (idx > GDT_ENTRIES)
21835 return 0;
21836
21837- desc = raw_cpu_ptr(gdt_page.gdt);
21838+ desc = get_cpu_gdt_table(smp_processor_id());
21839 }
21840
21841 return get_desc_base(desc + idx);
21842@@ -2087,7 +2087,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
21843 break;
21844
21845 perf_callchain_store(entry, frame.return_address);
21846- fp = frame.next_frame;
21847+ fp = (const void __force_user *)frame.next_frame;
21848 }
21849 }
21850
21851diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21852index 97242a9..cf9c30e 100644
21853--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21854+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
21855@@ -402,7 +402,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
21856 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
21857 {
21858 struct attribute **attrs;
21859- struct attribute_group *attr_group;
21860+ attribute_group_no_const *attr_group;
21861 int i = 0, j;
21862
21863 while (amd_iommu_v2_event_descs[i].attr.attr.name)
21864diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
21865index 498b6d9..4126515 100644
21866--- a/arch/x86/kernel/cpu/perf_event_intel.c
21867+++ b/arch/x86/kernel/cpu/perf_event_intel.c
21868@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
21869 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
21870
21871 if (boot_cpu_has(X86_FEATURE_PDCM)) {
21872- u64 capabilities;
21873+ u64 capabilities = x86_pmu.intel_cap.capabilities;
21874
21875- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
21876- x86_pmu.intel_cap.capabilities = capabilities;
21877+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
21878+ x86_pmu.intel_cap.capabilities = capabilities;
21879 }
21880
21881 intel_ds_init();
21882diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21883index c4bb8b8..9f7384d 100644
21884--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21885+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
21886@@ -465,7 +465,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
21887 NULL,
21888 };
21889
21890-static struct attribute_group rapl_pmu_events_group = {
21891+static attribute_group_no_const rapl_pmu_events_group __read_only = {
21892 .name = "events",
21893 .attrs = NULL, /* patched at runtime */
21894 };
21895diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21896index c635b8b..b78835e 100644
21897--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21898+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
21899@@ -733,7 +733,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
21900 static int __init uncore_type_init(struct intel_uncore_type *type)
21901 {
21902 struct intel_uncore_pmu *pmus;
21903- struct attribute_group *attr_group;
21904+ attribute_group_no_const *attr_group;
21905 struct attribute **attrs;
21906 int i, j;
21907
21908diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21909index 6c8c1e7..515b98a 100644
21910--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21911+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
21912@@ -114,7 +114,7 @@ struct intel_uncore_box {
21913 struct uncore_event_desc {
21914 struct kobj_attribute attr;
21915 const char *config;
21916-};
21917+} __do_const;
21918
21919 ssize_t uncore_event_show(struct kobject *kobj,
21920 struct kobj_attribute *attr, char *buf);
21921diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
21922index 83741a7..bd3507d 100644
21923--- a/arch/x86/kernel/cpuid.c
21924+++ b/arch/x86/kernel/cpuid.c
21925@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
21926 return notifier_from_errno(err);
21927 }
21928
21929-static struct notifier_block __refdata cpuid_class_cpu_notifier =
21930+static struct notifier_block cpuid_class_cpu_notifier =
21931 {
21932 .notifier_call = cpuid_class_cpu_callback,
21933 };
21934diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
21935index aceb2f9..c76d3e3 100644
21936--- a/arch/x86/kernel/crash.c
21937+++ b/arch/x86/kernel/crash.c
21938@@ -105,7 +105,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
21939 #ifdef CONFIG_X86_32
21940 struct pt_regs fixed_regs;
21941
21942- if (!user_mode_vm(regs)) {
21943+ if (!user_mode(regs)) {
21944 crash_fixup_ss_esp(&fixed_regs, regs);
21945 regs = &fixed_regs;
21946 }
21947diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
21948index afa64ad..dce67dd 100644
21949--- a/arch/x86/kernel/crash_dump_64.c
21950+++ b/arch/x86/kernel/crash_dump_64.c
21951@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
21952 return -ENOMEM;
21953
21954 if (userbuf) {
21955- if (copy_to_user(buf, vaddr + offset, csize)) {
21956+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
21957 iounmap(vaddr);
21958 return -EFAULT;
21959 }
21960diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
21961index f6dfd93..892ade4 100644
21962--- a/arch/x86/kernel/doublefault.c
21963+++ b/arch/x86/kernel/doublefault.c
21964@@ -12,7 +12,7 @@
21965
21966 #define DOUBLEFAULT_STACKSIZE (1024)
21967 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
21968-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
21969+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
21970
21971 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
21972
21973@@ -22,7 +22,7 @@ static void doublefault_fn(void)
21974 unsigned long gdt, tss;
21975
21976 native_store_gdt(&gdt_desc);
21977- gdt = gdt_desc.address;
21978+ gdt = (unsigned long)gdt_desc.address;
21979
21980 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
21981
21982@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
21983 /* 0x2 bit is always set */
21984 .flags = X86_EFLAGS_SF | 0x2,
21985 .sp = STACK_START,
21986- .es = __USER_DS,
21987+ .es = __KERNEL_DS,
21988 .cs = __KERNEL_CS,
21989 .ss = __KERNEL_DS,
21990- .ds = __USER_DS,
21991+ .ds = __KERNEL_DS,
21992 .fs = __KERNEL_PERCPU,
21993
21994 .__cr3 = __pa_nodebug(swapper_pg_dir),
21995diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
21996index b74ebc7..2c95874 100644
21997--- a/arch/x86/kernel/dumpstack.c
21998+++ b/arch/x86/kernel/dumpstack.c
21999@@ -2,6 +2,9 @@
22000 * Copyright (C) 1991, 1992 Linus Torvalds
22001 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22002 */
22003+#ifdef CONFIG_GRKERNSEC_HIDESYM
22004+#define __INCLUDED_BY_HIDESYM 1
22005+#endif
22006 #include <linux/kallsyms.h>
22007 #include <linux/kprobes.h>
22008 #include <linux/uaccess.h>
22009@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22010
22011 void printk_address(unsigned long address)
22012 {
22013- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22014+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22015 }
22016
22017 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22018 static void
22019 print_ftrace_graph_addr(unsigned long addr, void *data,
22020 const struct stacktrace_ops *ops,
22021- struct thread_info *tinfo, int *graph)
22022+ struct task_struct *task, int *graph)
22023 {
22024- struct task_struct *task;
22025 unsigned long ret_addr;
22026 int index;
22027
22028 if (addr != (unsigned long)return_to_handler)
22029 return;
22030
22031- task = tinfo->task;
22032 index = task->curr_ret_stack;
22033
22034 if (!task->ret_stack || index < *graph)
22035@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22036 static inline void
22037 print_ftrace_graph_addr(unsigned long addr, void *data,
22038 const struct stacktrace_ops *ops,
22039- struct thread_info *tinfo, int *graph)
22040+ struct task_struct *task, int *graph)
22041 { }
22042 #endif
22043
22044@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22045 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22046 */
22047
22048-static inline int valid_stack_ptr(struct thread_info *tinfo,
22049- void *p, unsigned int size, void *end)
22050+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22051 {
22052- void *t = tinfo;
22053 if (end) {
22054 if (p < end && p >= (end-THREAD_SIZE))
22055 return 1;
22056@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22057 }
22058
22059 unsigned long
22060-print_context_stack(struct thread_info *tinfo,
22061+print_context_stack(struct task_struct *task, void *stack_start,
22062 unsigned long *stack, unsigned long bp,
22063 const struct stacktrace_ops *ops, void *data,
22064 unsigned long *end, int *graph)
22065 {
22066 struct stack_frame *frame = (struct stack_frame *)bp;
22067
22068- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22069+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22070 unsigned long addr;
22071
22072 addr = *stack;
22073@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22074 } else {
22075 ops->address(data, addr, 0);
22076 }
22077- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22078+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22079 }
22080 stack++;
22081 }
22082@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22083 EXPORT_SYMBOL_GPL(print_context_stack);
22084
22085 unsigned long
22086-print_context_stack_bp(struct thread_info *tinfo,
22087+print_context_stack_bp(struct task_struct *task, void *stack_start,
22088 unsigned long *stack, unsigned long bp,
22089 const struct stacktrace_ops *ops, void *data,
22090 unsigned long *end, int *graph)
22091@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22092 struct stack_frame *frame = (struct stack_frame *)bp;
22093 unsigned long *ret_addr = &frame->return_address;
22094
22095- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22096+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22097 unsigned long addr = *ret_addr;
22098
22099 if (!__kernel_text_address(addr))
22100@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22101 ops->address(data, addr, 1);
22102 frame = frame->next_frame;
22103 ret_addr = &frame->return_address;
22104- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22105+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22106 }
22107
22108 return (unsigned long)frame;
22109@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22110 static void print_trace_address(void *data, unsigned long addr, int reliable)
22111 {
22112 touch_nmi_watchdog();
22113- printk(data);
22114+ printk("%s", (char *)data);
22115 printk_stack_address(addr, reliable);
22116 }
22117
22118@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22119 EXPORT_SYMBOL_GPL(oops_begin);
22120 NOKPROBE_SYMBOL(oops_begin);
22121
22122+extern void gr_handle_kernel_exploit(void);
22123+
22124 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22125 {
22126 if (regs && kexec_should_crash(current))
22127@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22128 panic("Fatal exception in interrupt");
22129 if (panic_on_oops)
22130 panic("Fatal exception");
22131- do_exit(signr);
22132+
22133+ gr_handle_kernel_exploit();
22134+
22135+ do_group_exit(signr);
22136 }
22137 NOKPROBE_SYMBOL(oops_end);
22138
22139@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22140 print_modules();
22141 show_regs(regs);
22142 #ifdef CONFIG_X86_32
22143- if (user_mode_vm(regs)) {
22144+ if (user_mode(regs)) {
22145 sp = regs->sp;
22146 ss = regs->ss & 0xffff;
22147 } else {
22148@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22149 unsigned long flags = oops_begin();
22150 int sig = SIGSEGV;
22151
22152- if (!user_mode_vm(regs))
22153+ if (!user_mode(regs))
22154 report_bug(regs->ip, regs);
22155
22156 if (__die(str, regs, err))
22157diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22158index 5abd4cd..c65733b 100644
22159--- a/arch/x86/kernel/dumpstack_32.c
22160+++ b/arch/x86/kernel/dumpstack_32.c
22161@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22162 bp = stack_frame(task, regs);
22163
22164 for (;;) {
22165- struct thread_info *context;
22166+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22167 void *end_stack;
22168
22169 end_stack = is_hardirq_stack(stack, cpu);
22170 if (!end_stack)
22171 end_stack = is_softirq_stack(stack, cpu);
22172
22173- context = task_thread_info(task);
22174- bp = ops->walk_stack(context, stack, bp, ops, data,
22175+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22176 end_stack, &graph);
22177
22178 /* Stop if not on irq stack */
22179@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22180 int i;
22181
22182 show_regs_print_info(KERN_EMERG);
22183- __show_regs(regs, !user_mode_vm(regs));
22184+ __show_regs(regs, !user_mode(regs));
22185
22186 /*
22187 * When in-kernel, we also print out the stack and code at the
22188 * time of the fault..
22189 */
22190- if (!user_mode_vm(regs)) {
22191+ if (!user_mode(regs)) {
22192 unsigned int code_prologue = code_bytes * 43 / 64;
22193 unsigned int code_len = code_bytes;
22194 unsigned char c;
22195 u8 *ip;
22196+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22197
22198 pr_emerg("Stack:\n");
22199 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22200
22201 pr_emerg("Code:");
22202
22203- ip = (u8 *)regs->ip - code_prologue;
22204+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22205 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22206 /* try starting at IP */
22207- ip = (u8 *)regs->ip;
22208+ ip = (u8 *)regs->ip + cs_base;
22209 code_len = code_len - code_prologue + 1;
22210 }
22211 for (i = 0; i < code_len; i++, ip++) {
22212@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22213 pr_cont(" Bad EIP value.");
22214 break;
22215 }
22216- if (ip == (u8 *)regs->ip)
22217+ if (ip == (u8 *)regs->ip + cs_base)
22218 pr_cont(" <%02x>", c);
22219 else
22220 pr_cont(" %02x", c);
22221@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22222 {
22223 unsigned short ud2;
22224
22225+ ip = ktla_ktva(ip);
22226 if (ip < PAGE_OFFSET)
22227 return 0;
22228 if (probe_kernel_address((unsigned short *)ip, ud2))
22229@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22230
22231 return ud2 == 0x0b0f;
22232 }
22233+
22234+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22235+void pax_check_alloca(unsigned long size)
22236+{
22237+ unsigned long sp = (unsigned long)&sp, stack_left;
22238+
22239+ /* all kernel stacks are of the same size */
22240+ stack_left = sp & (THREAD_SIZE - 1);
22241+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22242+}
22243+EXPORT_SYMBOL(pax_check_alloca);
22244+#endif
22245diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22246index ff86f19..73eabf4 100644
22247--- a/arch/x86/kernel/dumpstack_64.c
22248+++ b/arch/x86/kernel/dumpstack_64.c
22249@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22250 const struct stacktrace_ops *ops, void *data)
22251 {
22252 const unsigned cpu = get_cpu();
22253- struct thread_info *tinfo;
22254 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22255 unsigned long dummy;
22256 unsigned used = 0;
22257 int graph = 0;
22258 int done = 0;
22259+ void *stack_start;
22260
22261 if (!task)
22262 task = current;
22263@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22264 * current stack address. If the stacks consist of nested
22265 * exceptions
22266 */
22267- tinfo = task_thread_info(task);
22268 while (!done) {
22269 unsigned long *stack_end;
22270 enum stack_type stype;
22271@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22272 if (ops->stack(data, id) < 0)
22273 break;
22274
22275- bp = ops->walk_stack(tinfo, stack, bp, ops,
22276+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22277 data, stack_end, &graph);
22278 ops->stack(data, "<EOE>");
22279 /*
22280@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22281 * second-to-last pointer (index -2 to end) in the
22282 * exception stack:
22283 */
22284+ if ((u16)stack_end[-1] != __KERNEL_DS)
22285+ goto out;
22286 stack = (unsigned long *) stack_end[-2];
22287 done = 0;
22288 break;
22289@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22290
22291 if (ops->stack(data, "IRQ") < 0)
22292 break;
22293- bp = ops->walk_stack(tinfo, stack, bp,
22294+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22295 ops, data, stack_end, &graph);
22296 /*
22297 * We link to the next stack (which would be
22298@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22299 /*
22300 * This handles the process stack:
22301 */
22302- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22303+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22304+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22305+out:
22306 put_cpu();
22307 }
22308 EXPORT_SYMBOL(dump_trace);
22309@@ -344,8 +347,55 @@ int is_valid_bugaddr(unsigned long ip)
22310 {
22311 unsigned short ud2;
22312
22313- if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
22314+ if (probe_kernel_address((unsigned short *)ip, ud2))
22315 return 0;
22316
22317 return ud2 == 0x0b0f;
22318 }
22319+
22320+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22321+void pax_check_alloca(unsigned long size)
22322+{
22323+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22324+ unsigned cpu, used;
22325+ char *id;
22326+
22327+ /* check the process stack first */
22328+ stack_start = (unsigned long)task_stack_page(current);
22329+ stack_end = stack_start + THREAD_SIZE;
22330+ if (likely(stack_start <= sp && sp < stack_end)) {
22331+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22332+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22333+ return;
22334+ }
22335+
22336+ cpu = get_cpu();
22337+
22338+ /* check the irq stacks */
22339+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22340+ stack_start = stack_end - IRQ_STACK_SIZE;
22341+ if (stack_start <= sp && sp < stack_end) {
22342+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22343+ put_cpu();
22344+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22345+ return;
22346+ }
22347+
22348+ /* check the exception stacks */
22349+ used = 0;
22350+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22351+ stack_start = stack_end - EXCEPTION_STKSZ;
22352+ if (stack_end && stack_start <= sp && sp < stack_end) {
22353+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22354+ put_cpu();
22355+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22356+ return;
22357+ }
22358+
22359+ put_cpu();
22360+
22361+ /* unknown stack */
22362+ BUG();
22363+}
22364+EXPORT_SYMBOL(pax_check_alloca);
22365+#endif
22366diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22367index dd2f07a..845dc05 100644
22368--- a/arch/x86/kernel/e820.c
22369+++ b/arch/x86/kernel/e820.c
22370@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22371
22372 static void early_panic(char *msg)
22373 {
22374- early_printk(msg);
22375- panic(msg);
22376+ early_printk("%s", msg);
22377+ panic("%s", msg);
22378 }
22379
22380 static int userdef __initdata;
22381diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
22382index 01d1c18..8073693 100644
22383--- a/arch/x86/kernel/early_printk.c
22384+++ b/arch/x86/kernel/early_printk.c
22385@@ -7,6 +7,7 @@
22386 #include <linux/pci_regs.h>
22387 #include <linux/pci_ids.h>
22388 #include <linux/errno.h>
22389+#include <linux/sched.h>
22390 #include <asm/io.h>
22391 #include <asm/processor.h>
22392 #include <asm/fcntl.h>
22393diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
22394index 000d419..8f66802 100644
22395--- a/arch/x86/kernel/entry_32.S
22396+++ b/arch/x86/kernel/entry_32.S
22397@@ -177,13 +177,154 @@
22398 /*CFI_REL_OFFSET gs, PT_GS*/
22399 .endm
22400 .macro SET_KERNEL_GS reg
22401+
22402+#ifdef CONFIG_CC_STACKPROTECTOR
22403 movl $(__KERNEL_STACK_CANARY), \reg
22404+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
22405+ movl $(__USER_DS), \reg
22406+#else
22407+ xorl \reg, \reg
22408+#endif
22409+
22410 movl \reg, %gs
22411 .endm
22412
22413 #endif /* CONFIG_X86_32_LAZY_GS */
22414
22415-.macro SAVE_ALL
22416+.macro pax_enter_kernel
22417+#ifdef CONFIG_PAX_KERNEXEC
22418+ call pax_enter_kernel
22419+#endif
22420+.endm
22421+
22422+.macro pax_exit_kernel
22423+#ifdef CONFIG_PAX_KERNEXEC
22424+ call pax_exit_kernel
22425+#endif
22426+.endm
22427+
22428+#ifdef CONFIG_PAX_KERNEXEC
22429+ENTRY(pax_enter_kernel)
22430+#ifdef CONFIG_PARAVIRT
22431+ pushl %eax
22432+ pushl %ecx
22433+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
22434+ mov %eax, %esi
22435+#else
22436+ mov %cr0, %esi
22437+#endif
22438+ bts $16, %esi
22439+ jnc 1f
22440+ mov %cs, %esi
22441+ cmp $__KERNEL_CS, %esi
22442+ jz 3f
22443+ ljmp $__KERNEL_CS, $3f
22444+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
22445+2:
22446+#ifdef CONFIG_PARAVIRT
22447+ mov %esi, %eax
22448+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
22449+#else
22450+ mov %esi, %cr0
22451+#endif
22452+3:
22453+#ifdef CONFIG_PARAVIRT
22454+ popl %ecx
22455+ popl %eax
22456+#endif
22457+ ret
22458+ENDPROC(pax_enter_kernel)
22459+
22460+ENTRY(pax_exit_kernel)
22461+#ifdef CONFIG_PARAVIRT
22462+ pushl %eax
22463+ pushl %ecx
22464+#endif
22465+ mov %cs, %esi
22466+ cmp $__KERNEXEC_KERNEL_CS, %esi
22467+ jnz 2f
22468+#ifdef CONFIG_PARAVIRT
22469+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
22470+ mov %eax, %esi
22471+#else
22472+ mov %cr0, %esi
22473+#endif
22474+ btr $16, %esi
22475+ ljmp $__KERNEL_CS, $1f
22476+1:
22477+#ifdef CONFIG_PARAVIRT
22478+ mov %esi, %eax
22479+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
22480+#else
22481+ mov %esi, %cr0
22482+#endif
22483+2:
22484+#ifdef CONFIG_PARAVIRT
22485+ popl %ecx
22486+ popl %eax
22487+#endif
22488+ ret
22489+ENDPROC(pax_exit_kernel)
22490+#endif
22491+
22492+ .macro pax_erase_kstack
22493+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22494+ call pax_erase_kstack
22495+#endif
22496+ .endm
22497+
22498+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22499+/*
22500+ * ebp: thread_info
22501+ */
22502+ENTRY(pax_erase_kstack)
22503+ pushl %edi
22504+ pushl %ecx
22505+ pushl %eax
22506+
22507+ mov TI_lowest_stack(%ebp), %edi
22508+ mov $-0xBEEF, %eax
22509+ std
22510+
22511+1: mov %edi, %ecx
22512+ and $THREAD_SIZE_asm - 1, %ecx
22513+ shr $2, %ecx
22514+ repne scasl
22515+ jecxz 2f
22516+
22517+ cmp $2*16, %ecx
22518+ jc 2f
22519+
22520+ mov $2*16, %ecx
22521+ repe scasl
22522+ jecxz 2f
22523+ jne 1b
22524+
22525+2: cld
22526+ or $2*4, %edi
22527+ mov %esp, %ecx
22528+ sub %edi, %ecx
22529+
22530+ cmp $THREAD_SIZE_asm, %ecx
22531+ jb 3f
22532+ ud2
22533+3:
22534+
22535+ shr $2, %ecx
22536+ rep stosl
22537+
22538+ mov TI_task_thread_sp0(%ebp), %edi
22539+ sub $128, %edi
22540+ mov %edi, TI_lowest_stack(%ebp)
22541+
22542+ popl %eax
22543+ popl %ecx
22544+ popl %edi
22545+ ret
22546+ENDPROC(pax_erase_kstack)
22547+#endif
22548+
22549+.macro __SAVE_ALL _DS
22550 cld
22551 PUSH_GS
22552 pushl_cfi %fs
22553@@ -206,7 +347,7 @@
22554 CFI_REL_OFFSET ecx, 0
22555 pushl_cfi %ebx
22556 CFI_REL_OFFSET ebx, 0
22557- movl $(__USER_DS), %edx
22558+ movl $\_DS, %edx
22559 movl %edx, %ds
22560 movl %edx, %es
22561 movl $(__KERNEL_PERCPU), %edx
22562@@ -214,6 +355,15 @@
22563 SET_KERNEL_GS %edx
22564 .endm
22565
22566+.macro SAVE_ALL
22567+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22568+ __SAVE_ALL __KERNEL_DS
22569+ pax_enter_kernel
22570+#else
22571+ __SAVE_ALL __USER_DS
22572+#endif
22573+.endm
22574+
22575 .macro RESTORE_INT_REGS
22576 popl_cfi %ebx
22577 CFI_RESTORE ebx
22578@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
22579 popfl_cfi
22580 jmp syscall_exit
22581 CFI_ENDPROC
22582-END(ret_from_fork)
22583+ENDPROC(ret_from_fork)
22584
22585 ENTRY(ret_from_kernel_thread)
22586 CFI_STARTPROC
22587@@ -340,7 +490,15 @@ ret_from_intr:
22588 andl $SEGMENT_RPL_MASK, %eax
22589 #endif
22590 cmpl $USER_RPL, %eax
22591+
22592+#ifdef CONFIG_PAX_KERNEXEC
22593+ jae resume_userspace
22594+
22595+ pax_exit_kernel
22596+ jmp resume_kernel
22597+#else
22598 jb resume_kernel # not returning to v8086 or userspace
22599+#endif
22600
22601 ENTRY(resume_userspace)
22602 LOCKDEP_SYS_EXIT
22603@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
22604 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
22605 # int/exception return?
22606 jne work_pending
22607- jmp restore_all
22608-END(ret_from_exception)
22609+ jmp restore_all_pax
22610+ENDPROC(ret_from_exception)
22611
22612 #ifdef CONFIG_PREEMPT
22613 ENTRY(resume_kernel)
22614@@ -365,7 +523,7 @@ need_resched:
22615 jz restore_all
22616 call preempt_schedule_irq
22617 jmp need_resched
22618-END(resume_kernel)
22619+ENDPROC(resume_kernel)
22620 #endif
22621 CFI_ENDPROC
22622
22623@@ -395,30 +553,45 @@ sysenter_past_esp:
22624 /*CFI_REL_OFFSET cs, 0*/
22625 /*
22626 * Push current_thread_info()->sysenter_return to the stack.
22627- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
22628- * pushed above; +8 corresponds to copy_thread's esp0 setting.
22629 */
22630- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
22631+ pushl_cfi $0
22632 CFI_REL_OFFSET eip, 0
22633
22634 pushl_cfi %eax
22635 SAVE_ALL
22636+ GET_THREAD_INFO(%ebp)
22637+ movl TI_sysenter_return(%ebp),%ebp
22638+ movl %ebp,PT_EIP(%esp)
22639 ENABLE_INTERRUPTS(CLBR_NONE)
22640
22641 /*
22642 * Load the potential sixth argument from user stack.
22643 * Careful about security.
22644 */
22645+ movl PT_OLDESP(%esp),%ebp
22646+
22647+#ifdef CONFIG_PAX_MEMORY_UDEREF
22648+ mov PT_OLDSS(%esp),%ds
22649+1: movl %ds:(%ebp),%ebp
22650+ push %ss
22651+ pop %ds
22652+#else
22653 cmpl $__PAGE_OFFSET-3,%ebp
22654 jae syscall_fault
22655 ASM_STAC
22656 1: movl (%ebp),%ebp
22657 ASM_CLAC
22658+#endif
22659+
22660 movl %ebp,PT_EBP(%esp)
22661 _ASM_EXTABLE(1b,syscall_fault)
22662
22663 GET_THREAD_INFO(%ebp)
22664
22665+#ifdef CONFIG_PAX_RANDKSTACK
22666+ pax_erase_kstack
22667+#endif
22668+
22669 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22670 jnz sysenter_audit
22671 sysenter_do_call:
22672@@ -434,12 +607,24 @@ sysenter_after_call:
22673 testl $_TIF_ALLWORK_MASK, %ecx
22674 jne sysexit_audit
22675 sysenter_exit:
22676+
22677+#ifdef CONFIG_PAX_RANDKSTACK
22678+ pushl_cfi %eax
22679+ movl %esp, %eax
22680+ call pax_randomize_kstack
22681+ popl_cfi %eax
22682+#endif
22683+
22684+ pax_erase_kstack
22685+
22686 /* if something modifies registers it must also disable sysexit */
22687 movl PT_EIP(%esp), %edx
22688 movl PT_OLDESP(%esp), %ecx
22689 xorl %ebp,%ebp
22690 TRACE_IRQS_ON
22691 1: mov PT_FS(%esp), %fs
22692+2: mov PT_DS(%esp), %ds
22693+3: mov PT_ES(%esp), %es
22694 PTGS_TO_GS
22695 ENABLE_INTERRUPTS_SYSEXIT
22696
22697@@ -453,6 +638,9 @@ sysenter_audit:
22698 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
22699 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
22700 call __audit_syscall_entry
22701+
22702+ pax_erase_kstack
22703+
22704 popl_cfi %ecx /* get that remapped edx off the stack */
22705 popl_cfi %ecx /* get that remapped esi off the stack */
22706 movl PT_EAX(%esp),%eax /* reload syscall number */
22707@@ -479,10 +667,16 @@ sysexit_audit:
22708
22709 CFI_ENDPROC
22710 .pushsection .fixup,"ax"
22711-2: movl $0,PT_FS(%esp)
22712+4: movl $0,PT_FS(%esp)
22713+ jmp 1b
22714+5: movl $0,PT_DS(%esp)
22715+ jmp 1b
22716+6: movl $0,PT_ES(%esp)
22717 jmp 1b
22718 .popsection
22719- _ASM_EXTABLE(1b,2b)
22720+ _ASM_EXTABLE(1b,4b)
22721+ _ASM_EXTABLE(2b,5b)
22722+ _ASM_EXTABLE(3b,6b)
22723 PTGS_TO_GS_EX
22724 ENDPROC(ia32_sysenter_target)
22725
22726@@ -493,6 +687,11 @@ ENTRY(system_call)
22727 pushl_cfi %eax # save orig_eax
22728 SAVE_ALL
22729 GET_THREAD_INFO(%ebp)
22730+
22731+#ifdef CONFIG_PAX_RANDKSTACK
22732+ pax_erase_kstack
22733+#endif
22734+
22735 # system call tracing in operation / emulation
22736 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
22737 jnz syscall_trace_entry
22738@@ -512,6 +711,15 @@ syscall_exit:
22739 testl $_TIF_ALLWORK_MASK, %ecx # current->work
22740 jne syscall_exit_work
22741
22742+restore_all_pax:
22743+
22744+#ifdef CONFIG_PAX_RANDKSTACK
22745+ movl %esp, %eax
22746+ call pax_randomize_kstack
22747+#endif
22748+
22749+ pax_erase_kstack
22750+
22751 restore_all:
22752 TRACE_IRQS_IRET
22753 restore_all_notrace:
22754@@ -566,14 +774,34 @@ ldt_ss:
22755 * compensating for the offset by changing to the ESPFIX segment with
22756 * a base address that matches for the difference.
22757 */
22758-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
22759+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
22760 mov %esp, %edx /* load kernel esp */
22761 mov PT_OLDESP(%esp), %eax /* load userspace esp */
22762 mov %dx, %ax /* eax: new kernel esp */
22763 sub %eax, %edx /* offset (low word is 0) */
22764+#ifdef CONFIG_SMP
22765+ movl PER_CPU_VAR(cpu_number), %ebx
22766+ shll $PAGE_SHIFT_asm, %ebx
22767+ addl $cpu_gdt_table, %ebx
22768+#else
22769+ movl $cpu_gdt_table, %ebx
22770+#endif
22771 shr $16, %edx
22772- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
22773- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
22774+
22775+#ifdef CONFIG_PAX_KERNEXEC
22776+ mov %cr0, %esi
22777+ btr $16, %esi
22778+ mov %esi, %cr0
22779+#endif
22780+
22781+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
22782+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
22783+
22784+#ifdef CONFIG_PAX_KERNEXEC
22785+ bts $16, %esi
22786+ mov %esi, %cr0
22787+#endif
22788+
22789 pushl_cfi $__ESPFIX_SS
22790 pushl_cfi %eax /* new kernel esp */
22791 /* Disable interrupts, but do not irqtrace this section: we
22792@@ -603,20 +831,18 @@ work_resched:
22793 movl TI_flags(%ebp), %ecx
22794 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
22795 # than syscall tracing?
22796- jz restore_all
22797+ jz restore_all_pax
22798 testb $_TIF_NEED_RESCHED, %cl
22799 jnz work_resched
22800
22801 work_notifysig: # deal with pending signals and
22802 # notify-resume requests
22803+ movl %esp, %eax
22804 #ifdef CONFIG_VM86
22805 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
22806- movl %esp, %eax
22807 jne work_notifysig_v86 # returning to kernel-space or
22808 # vm86-space
22809 1:
22810-#else
22811- movl %esp, %eax
22812 #endif
22813 TRACE_IRQS_ON
22814 ENABLE_INTERRUPTS(CLBR_NONE)
22815@@ -637,7 +863,7 @@ work_notifysig_v86:
22816 movl %eax, %esp
22817 jmp 1b
22818 #endif
22819-END(work_pending)
22820+ENDPROC(work_pending)
22821
22822 # perform syscall exit tracing
22823 ALIGN
22824@@ -645,11 +871,14 @@ syscall_trace_entry:
22825 movl $-ENOSYS,PT_EAX(%esp)
22826 movl %esp, %eax
22827 call syscall_trace_enter
22828+
22829+ pax_erase_kstack
22830+
22831 /* What it returned is what we'll actually use. */
22832 cmpl $(NR_syscalls), %eax
22833 jnae syscall_call
22834 jmp syscall_exit
22835-END(syscall_trace_entry)
22836+ENDPROC(syscall_trace_entry)
22837
22838 # perform syscall exit tracing
22839 ALIGN
22840@@ -662,26 +891,30 @@ syscall_exit_work:
22841 movl %esp, %eax
22842 call syscall_trace_leave
22843 jmp resume_userspace
22844-END(syscall_exit_work)
22845+ENDPROC(syscall_exit_work)
22846 CFI_ENDPROC
22847
22848 RING0_INT_FRAME # can't unwind into user space anyway
22849 syscall_fault:
22850+#ifdef CONFIG_PAX_MEMORY_UDEREF
22851+ push %ss
22852+ pop %ds
22853+#endif
22854 ASM_CLAC
22855 GET_THREAD_INFO(%ebp)
22856 movl $-EFAULT,PT_EAX(%esp)
22857 jmp resume_userspace
22858-END(syscall_fault)
22859+ENDPROC(syscall_fault)
22860
22861 syscall_badsys:
22862 movl $-ENOSYS,%eax
22863 jmp syscall_after_call
22864-END(syscall_badsys)
22865+ENDPROC(syscall_badsys)
22866
22867 sysenter_badsys:
22868 movl $-ENOSYS,%eax
22869 jmp sysenter_after_call
22870-END(sysenter_badsys)
22871+ENDPROC(sysenter_badsys)
22872 CFI_ENDPROC
22873
22874 .macro FIXUP_ESPFIX_STACK
22875@@ -694,8 +927,15 @@ END(sysenter_badsys)
22876 */
22877 #ifdef CONFIG_X86_ESPFIX32
22878 /* fixup the stack */
22879- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
22880- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
22881+#ifdef CONFIG_SMP
22882+ movl PER_CPU_VAR(cpu_number), %ebx
22883+ shll $PAGE_SHIFT_asm, %ebx
22884+ addl $cpu_gdt_table, %ebx
22885+#else
22886+ movl $cpu_gdt_table, %ebx
22887+#endif
22888+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
22889+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
22890 shl $16, %eax
22891 addl %esp, %eax /* the adjusted stack pointer */
22892 pushl_cfi $__KERNEL_DS
22893@@ -751,7 +991,7 @@ vector=vector+1
22894 .endr
22895 2: jmp common_interrupt
22896 .endr
22897-END(irq_entries_start)
22898+ENDPROC(irq_entries_start)
22899
22900 .previous
22901 END(interrupt)
22902@@ -808,7 +1048,7 @@ ENTRY(coprocessor_error)
22903 pushl_cfi $do_coprocessor_error
22904 jmp error_code
22905 CFI_ENDPROC
22906-END(coprocessor_error)
22907+ENDPROC(coprocessor_error)
22908
22909 ENTRY(simd_coprocessor_error)
22910 RING0_INT_FRAME
22911@@ -821,7 +1061,7 @@ ENTRY(simd_coprocessor_error)
22912 .section .altinstructions,"a"
22913 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
22914 .previous
22915-.section .altinstr_replacement,"ax"
22916+.section .altinstr_replacement,"a"
22917 663: pushl $do_simd_coprocessor_error
22918 664:
22919 .previous
22920@@ -830,7 +1070,7 @@ ENTRY(simd_coprocessor_error)
22921 #endif
22922 jmp error_code
22923 CFI_ENDPROC
22924-END(simd_coprocessor_error)
22925+ENDPROC(simd_coprocessor_error)
22926
22927 ENTRY(device_not_available)
22928 RING0_INT_FRAME
22929@@ -839,18 +1079,18 @@ ENTRY(device_not_available)
22930 pushl_cfi $do_device_not_available
22931 jmp error_code
22932 CFI_ENDPROC
22933-END(device_not_available)
22934+ENDPROC(device_not_available)
22935
22936 #ifdef CONFIG_PARAVIRT
22937 ENTRY(native_iret)
22938 iret
22939 _ASM_EXTABLE(native_iret, iret_exc)
22940-END(native_iret)
22941+ENDPROC(native_iret)
22942
22943 ENTRY(native_irq_enable_sysexit)
22944 sti
22945 sysexit
22946-END(native_irq_enable_sysexit)
22947+ENDPROC(native_irq_enable_sysexit)
22948 #endif
22949
22950 ENTRY(overflow)
22951@@ -860,7 +1100,7 @@ ENTRY(overflow)
22952 pushl_cfi $do_overflow
22953 jmp error_code
22954 CFI_ENDPROC
22955-END(overflow)
22956+ENDPROC(overflow)
22957
22958 ENTRY(bounds)
22959 RING0_INT_FRAME
22960@@ -869,7 +1109,7 @@ ENTRY(bounds)
22961 pushl_cfi $do_bounds
22962 jmp error_code
22963 CFI_ENDPROC
22964-END(bounds)
22965+ENDPROC(bounds)
22966
22967 ENTRY(invalid_op)
22968 RING0_INT_FRAME
22969@@ -878,7 +1118,7 @@ ENTRY(invalid_op)
22970 pushl_cfi $do_invalid_op
22971 jmp error_code
22972 CFI_ENDPROC
22973-END(invalid_op)
22974+ENDPROC(invalid_op)
22975
22976 ENTRY(coprocessor_segment_overrun)
22977 RING0_INT_FRAME
22978@@ -887,7 +1127,7 @@ ENTRY(coprocessor_segment_overrun)
22979 pushl_cfi $do_coprocessor_segment_overrun
22980 jmp error_code
22981 CFI_ENDPROC
22982-END(coprocessor_segment_overrun)
22983+ENDPROC(coprocessor_segment_overrun)
22984
22985 ENTRY(invalid_TSS)
22986 RING0_EC_FRAME
22987@@ -895,7 +1135,7 @@ ENTRY(invalid_TSS)
22988 pushl_cfi $do_invalid_TSS
22989 jmp error_code
22990 CFI_ENDPROC
22991-END(invalid_TSS)
22992+ENDPROC(invalid_TSS)
22993
22994 ENTRY(segment_not_present)
22995 RING0_EC_FRAME
22996@@ -903,7 +1143,7 @@ ENTRY(segment_not_present)
22997 pushl_cfi $do_segment_not_present
22998 jmp error_code
22999 CFI_ENDPROC
23000-END(segment_not_present)
23001+ENDPROC(segment_not_present)
23002
23003 ENTRY(stack_segment)
23004 RING0_EC_FRAME
23005@@ -911,7 +1151,7 @@ ENTRY(stack_segment)
23006 pushl_cfi $do_stack_segment
23007 jmp error_code
23008 CFI_ENDPROC
23009-END(stack_segment)
23010+ENDPROC(stack_segment)
23011
23012 ENTRY(alignment_check)
23013 RING0_EC_FRAME
23014@@ -919,7 +1159,7 @@ ENTRY(alignment_check)
23015 pushl_cfi $do_alignment_check
23016 jmp error_code
23017 CFI_ENDPROC
23018-END(alignment_check)
23019+ENDPROC(alignment_check)
23020
23021 ENTRY(divide_error)
23022 RING0_INT_FRAME
23023@@ -928,7 +1168,7 @@ ENTRY(divide_error)
23024 pushl_cfi $do_divide_error
23025 jmp error_code
23026 CFI_ENDPROC
23027-END(divide_error)
23028+ENDPROC(divide_error)
23029
23030 #ifdef CONFIG_X86_MCE
23031 ENTRY(machine_check)
23032@@ -938,7 +1178,7 @@ ENTRY(machine_check)
23033 pushl_cfi machine_check_vector
23034 jmp error_code
23035 CFI_ENDPROC
23036-END(machine_check)
23037+ENDPROC(machine_check)
23038 #endif
23039
23040 ENTRY(spurious_interrupt_bug)
23041@@ -948,7 +1188,7 @@ ENTRY(spurious_interrupt_bug)
23042 pushl_cfi $do_spurious_interrupt_bug
23043 jmp error_code
23044 CFI_ENDPROC
23045-END(spurious_interrupt_bug)
23046+ENDPROC(spurious_interrupt_bug)
23047
23048 #ifdef CONFIG_XEN
23049 /* Xen doesn't set %esp to be precisely what the normal sysenter
23050@@ -1054,7 +1294,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23051
23052 ENTRY(mcount)
23053 ret
23054-END(mcount)
23055+ENDPROC(mcount)
23056
23057 ENTRY(ftrace_caller)
23058 pushl %eax
23059@@ -1084,7 +1324,7 @@ ftrace_graph_call:
23060 .globl ftrace_stub
23061 ftrace_stub:
23062 ret
23063-END(ftrace_caller)
23064+ENDPROC(ftrace_caller)
23065
23066 ENTRY(ftrace_regs_caller)
23067 pushf /* push flags before compare (in cs location) */
23068@@ -1182,7 +1422,7 @@ trace:
23069 popl %ecx
23070 popl %eax
23071 jmp ftrace_stub
23072-END(mcount)
23073+ENDPROC(mcount)
23074 #endif /* CONFIG_DYNAMIC_FTRACE */
23075 #endif /* CONFIG_FUNCTION_TRACER */
23076
23077@@ -1200,7 +1440,7 @@ ENTRY(ftrace_graph_caller)
23078 popl %ecx
23079 popl %eax
23080 ret
23081-END(ftrace_graph_caller)
23082+ENDPROC(ftrace_graph_caller)
23083
23084 .globl return_to_handler
23085 return_to_handler:
23086@@ -1261,15 +1501,18 @@ error_code:
23087 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23088 REG_TO_PTGS %ecx
23089 SET_KERNEL_GS %ecx
23090- movl $(__USER_DS), %ecx
23091+ movl $(__KERNEL_DS), %ecx
23092 movl %ecx, %ds
23093 movl %ecx, %es
23094+
23095+ pax_enter_kernel
23096+
23097 TRACE_IRQS_OFF
23098 movl %esp,%eax # pt_regs pointer
23099 call *%edi
23100 jmp ret_from_exception
23101 CFI_ENDPROC
23102-END(page_fault)
23103+ENDPROC(page_fault)
23104
23105 /*
23106 * Debug traps and NMI can happen at the one SYSENTER instruction
23107@@ -1312,7 +1555,7 @@ debug_stack_correct:
23108 call do_debug
23109 jmp ret_from_exception
23110 CFI_ENDPROC
23111-END(debug)
23112+ENDPROC(debug)
23113
23114 /*
23115 * NMI is doubly nasty. It can happen _while_ we're handling
23116@@ -1352,6 +1595,9 @@ nmi_stack_correct:
23117 xorl %edx,%edx # zero error code
23118 movl %esp,%eax # pt_regs pointer
23119 call do_nmi
23120+
23121+ pax_exit_kernel
23122+
23123 jmp restore_all_notrace
23124 CFI_ENDPROC
23125
23126@@ -1389,13 +1635,16 @@ nmi_espfix_stack:
23127 FIXUP_ESPFIX_STACK # %eax == %esp
23128 xorl %edx,%edx # zero error code
23129 call do_nmi
23130+
23131+ pax_exit_kernel
23132+
23133 RESTORE_REGS
23134 lss 12+4(%esp), %esp # back to espfix stack
23135 CFI_ADJUST_CFA_OFFSET -24
23136 jmp irq_return
23137 #endif
23138 CFI_ENDPROC
23139-END(nmi)
23140+ENDPROC(nmi)
23141
23142 ENTRY(int3)
23143 RING0_INT_FRAME
23144@@ -1408,14 +1657,14 @@ ENTRY(int3)
23145 call do_int3
23146 jmp ret_from_exception
23147 CFI_ENDPROC
23148-END(int3)
23149+ENDPROC(int3)
23150
23151 ENTRY(general_protection)
23152 RING0_EC_FRAME
23153 pushl_cfi $do_general_protection
23154 jmp error_code
23155 CFI_ENDPROC
23156-END(general_protection)
23157+ENDPROC(general_protection)
23158
23159 #ifdef CONFIG_KVM_GUEST
23160 ENTRY(async_page_fault)
23161@@ -1424,6 +1673,6 @@ ENTRY(async_page_fault)
23162 pushl_cfi $do_async_page_fault
23163 jmp error_code
23164 CFI_ENDPROC
23165-END(async_page_fault)
23166+ENDPROC(async_page_fault)
23167 #endif
23168
23169diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23170index 4ee9a23..c786610 100644
23171--- a/arch/x86/kernel/entry_64.S
23172+++ b/arch/x86/kernel/entry_64.S
23173@@ -59,6 +59,8 @@
23174 #include <asm/smap.h>
23175 #include <asm/pgtable_types.h>
23176 #include <linux/err.h>
23177+#include <asm/pgtable.h>
23178+#include <asm/alternative-asm.h>
23179
23180 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23181 #include <linux/elf-em.h>
23182@@ -81,6 +83,431 @@ ENTRY(native_usergs_sysret64)
23183 ENDPROC(native_usergs_sysret64)
23184 #endif /* CONFIG_PARAVIRT */
23185
23186+ .macro ljmpq sel, off
23187+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23188+ .byte 0x48; ljmp *1234f(%rip)
23189+ .pushsection .rodata
23190+ .align 16
23191+ 1234: .quad \off; .word \sel
23192+ .popsection
23193+#else
23194+ pushq $\sel
23195+ pushq $\off
23196+ lretq
23197+#endif
23198+ .endm
23199+
23200+ .macro pax_enter_kernel
23201+ pax_set_fptr_mask
23202+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23203+ call pax_enter_kernel
23204+#endif
23205+ .endm
23206+
23207+ .macro pax_exit_kernel
23208+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23209+ call pax_exit_kernel
23210+#endif
23211+
23212+ .endm
23213+
23214+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23215+ENTRY(pax_enter_kernel)
23216+ pushq %rdi
23217+
23218+#ifdef CONFIG_PARAVIRT
23219+ PV_SAVE_REGS(CLBR_RDI)
23220+#endif
23221+
23222+#ifdef CONFIG_PAX_KERNEXEC
23223+ GET_CR0_INTO_RDI
23224+ bts $16,%rdi
23225+ jnc 3f
23226+ mov %cs,%edi
23227+ cmp $__KERNEL_CS,%edi
23228+ jnz 2f
23229+1:
23230+#endif
23231+
23232+#ifdef CONFIG_PAX_MEMORY_UDEREF
23233+ 661: jmp 111f
23234+ .pushsection .altinstr_replacement, "a"
23235+ 662: ASM_NOP2
23236+ .popsection
23237+ .pushsection .altinstructions, "a"
23238+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23239+ .popsection
23240+ GET_CR3_INTO_RDI
23241+ cmp $0,%dil
23242+ jnz 112f
23243+ mov $__KERNEL_DS,%edi
23244+ mov %edi,%ss
23245+ jmp 111f
23246+112: cmp $1,%dil
23247+ jz 113f
23248+ ud2
23249+113: sub $4097,%rdi
23250+ bts $63,%rdi
23251+ SET_RDI_INTO_CR3
23252+ mov $__UDEREF_KERNEL_DS,%edi
23253+ mov %edi,%ss
23254+111:
23255+#endif
23256+
23257+#ifdef CONFIG_PARAVIRT
23258+ PV_RESTORE_REGS(CLBR_RDI)
23259+#endif
23260+
23261+ popq %rdi
23262+ pax_force_retaddr
23263+ retq
23264+
23265+#ifdef CONFIG_PAX_KERNEXEC
23266+2: ljmpq __KERNEL_CS,1b
23267+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23268+4: SET_RDI_INTO_CR0
23269+ jmp 1b
23270+#endif
23271+ENDPROC(pax_enter_kernel)
23272+
23273+ENTRY(pax_exit_kernel)
23274+ pushq %rdi
23275+
23276+#ifdef CONFIG_PARAVIRT
23277+ PV_SAVE_REGS(CLBR_RDI)
23278+#endif
23279+
23280+#ifdef CONFIG_PAX_KERNEXEC
23281+ mov %cs,%rdi
23282+ cmp $__KERNEXEC_KERNEL_CS,%edi
23283+ jz 2f
23284+ GET_CR0_INTO_RDI
23285+ bts $16,%rdi
23286+ jnc 4f
23287+1:
23288+#endif
23289+
23290+#ifdef CONFIG_PAX_MEMORY_UDEREF
23291+ 661: jmp 111f
23292+ .pushsection .altinstr_replacement, "a"
23293+ 662: ASM_NOP2
23294+ .popsection
23295+ .pushsection .altinstructions, "a"
23296+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23297+ .popsection
23298+ mov %ss,%edi
23299+ cmp $__UDEREF_KERNEL_DS,%edi
23300+ jnz 111f
23301+ GET_CR3_INTO_RDI
23302+ cmp $0,%dil
23303+ jz 112f
23304+ ud2
23305+112: add $4097,%rdi
23306+ bts $63,%rdi
23307+ SET_RDI_INTO_CR3
23308+ mov $__KERNEL_DS,%edi
23309+ mov %edi,%ss
23310+111:
23311+#endif
23312+
23313+#ifdef CONFIG_PARAVIRT
23314+ PV_RESTORE_REGS(CLBR_RDI);
23315+#endif
23316+
23317+ popq %rdi
23318+ pax_force_retaddr
23319+ retq
23320+
23321+#ifdef CONFIG_PAX_KERNEXEC
23322+2: GET_CR0_INTO_RDI
23323+ btr $16,%rdi
23324+ jnc 4f
23325+ ljmpq __KERNEL_CS,3f
23326+3: SET_RDI_INTO_CR0
23327+ jmp 1b
23328+4: ud2
23329+ jmp 4b
23330+#endif
23331+ENDPROC(pax_exit_kernel)
23332+#endif
23333+
23334+ .macro pax_enter_kernel_user
23335+ pax_set_fptr_mask
23336+#ifdef CONFIG_PAX_MEMORY_UDEREF
23337+ call pax_enter_kernel_user
23338+#endif
23339+ .endm
23340+
23341+ .macro pax_exit_kernel_user
23342+#ifdef CONFIG_PAX_MEMORY_UDEREF
23343+ call pax_exit_kernel_user
23344+#endif
23345+#ifdef CONFIG_PAX_RANDKSTACK
23346+ pushq %rax
23347+ pushq %r11
23348+ call pax_randomize_kstack
23349+ popq %r11
23350+ popq %rax
23351+#endif
23352+ .endm
23353+
23354+#ifdef CONFIG_PAX_MEMORY_UDEREF
23355+ENTRY(pax_enter_kernel_user)
23356+ pushq %rdi
23357+ pushq %rbx
23358+
23359+#ifdef CONFIG_PARAVIRT
23360+ PV_SAVE_REGS(CLBR_RDI)
23361+#endif
23362+
23363+ 661: jmp 111f
23364+ .pushsection .altinstr_replacement, "a"
23365+ 662: ASM_NOP2
23366+ .popsection
23367+ .pushsection .altinstructions, "a"
23368+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23369+ .popsection
23370+ GET_CR3_INTO_RDI
23371+ cmp $1,%dil
23372+ jnz 4f
23373+ sub $4097,%rdi
23374+ bts $63,%rdi
23375+ SET_RDI_INTO_CR3
23376+ jmp 3f
23377+111:
23378+
23379+ GET_CR3_INTO_RDI
23380+ mov %rdi,%rbx
23381+ add $__START_KERNEL_map,%rbx
23382+ sub phys_base(%rip),%rbx
23383+
23384+#ifdef CONFIG_PARAVIRT
23385+ cmpl $0, pv_info+PARAVIRT_enabled
23386+ jz 1f
23387+ pushq %rdi
23388+ i = 0
23389+ .rept USER_PGD_PTRS
23390+ mov i*8(%rbx),%rsi
23391+ mov $0,%sil
23392+ lea i*8(%rbx),%rdi
23393+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23394+ i = i + 1
23395+ .endr
23396+ popq %rdi
23397+ jmp 2f
23398+1:
23399+#endif
23400+
23401+ i = 0
23402+ .rept USER_PGD_PTRS
23403+ movb $0,i*8(%rbx)
23404+ i = i + 1
23405+ .endr
23406+
23407+2: SET_RDI_INTO_CR3
23408+
23409+#ifdef CONFIG_PAX_KERNEXEC
23410+ GET_CR0_INTO_RDI
23411+ bts $16,%rdi
23412+ SET_RDI_INTO_CR0
23413+#endif
23414+
23415+3:
23416+
23417+#ifdef CONFIG_PARAVIRT
23418+ PV_RESTORE_REGS(CLBR_RDI)
23419+#endif
23420+
23421+ popq %rbx
23422+ popq %rdi
23423+ pax_force_retaddr
23424+ retq
23425+4: ud2
23426+ENDPROC(pax_enter_kernel_user)
23427+
23428+ENTRY(pax_exit_kernel_user)
23429+ pushq %rdi
23430+ pushq %rbx
23431+
23432+#ifdef CONFIG_PARAVIRT
23433+ PV_SAVE_REGS(CLBR_RDI)
23434+#endif
23435+
23436+ GET_CR3_INTO_RDI
23437+ 661: jmp 1f
23438+ .pushsection .altinstr_replacement, "a"
23439+ 662: ASM_NOP2
23440+ .popsection
23441+ .pushsection .altinstructions, "a"
23442+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23443+ .popsection
23444+ cmp $0,%dil
23445+ jnz 3f
23446+ add $4097,%rdi
23447+ bts $63,%rdi
23448+ SET_RDI_INTO_CR3
23449+ jmp 2f
23450+1:
23451+
23452+ mov %rdi,%rbx
23453+
23454+#ifdef CONFIG_PAX_KERNEXEC
23455+ GET_CR0_INTO_RDI
23456+ btr $16,%rdi
23457+ jnc 3f
23458+ SET_RDI_INTO_CR0
23459+#endif
23460+
23461+ add $__START_KERNEL_map,%rbx
23462+ sub phys_base(%rip),%rbx
23463+
23464+#ifdef CONFIG_PARAVIRT
23465+ cmpl $0, pv_info+PARAVIRT_enabled
23466+ jz 1f
23467+ i = 0
23468+ .rept USER_PGD_PTRS
23469+ mov i*8(%rbx),%rsi
23470+ mov $0x67,%sil
23471+ lea i*8(%rbx),%rdi
23472+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
23473+ i = i + 1
23474+ .endr
23475+ jmp 2f
23476+1:
23477+#endif
23478+
23479+ i = 0
23480+ .rept USER_PGD_PTRS
23481+ movb $0x67,i*8(%rbx)
23482+ i = i + 1
23483+ .endr
23484+2:
23485+
23486+#ifdef CONFIG_PARAVIRT
23487+ PV_RESTORE_REGS(CLBR_RDI)
23488+#endif
23489+
23490+ popq %rbx
23491+ popq %rdi
23492+ pax_force_retaddr
23493+ retq
23494+3: ud2
23495+ENDPROC(pax_exit_kernel_user)
23496+#endif
23497+
23498+ .macro pax_enter_kernel_nmi
23499+ pax_set_fptr_mask
23500+
23501+#ifdef CONFIG_PAX_KERNEXEC
23502+ GET_CR0_INTO_RDI
23503+ bts $16,%rdi
23504+ jc 110f
23505+ SET_RDI_INTO_CR0
23506+ or $2,%ebx
23507+110:
23508+#endif
23509+
23510+#ifdef CONFIG_PAX_MEMORY_UDEREF
23511+ 661: jmp 111f
23512+ .pushsection .altinstr_replacement, "a"
23513+ 662: ASM_NOP2
23514+ .popsection
23515+ .pushsection .altinstructions, "a"
23516+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23517+ .popsection
23518+ GET_CR3_INTO_RDI
23519+ cmp $0,%dil
23520+ jz 111f
23521+ sub $4097,%rdi
23522+ or $4,%ebx
23523+ bts $63,%rdi
23524+ SET_RDI_INTO_CR3
23525+ mov $__UDEREF_KERNEL_DS,%edi
23526+ mov %edi,%ss
23527+111:
23528+#endif
23529+ .endm
23530+
23531+ .macro pax_exit_kernel_nmi
23532+#ifdef CONFIG_PAX_KERNEXEC
23533+ btr $1,%ebx
23534+ jnc 110f
23535+ GET_CR0_INTO_RDI
23536+ btr $16,%rdi
23537+ SET_RDI_INTO_CR0
23538+110:
23539+#endif
23540+
23541+#ifdef CONFIG_PAX_MEMORY_UDEREF
23542+ btr $2,%ebx
23543+ jnc 111f
23544+ GET_CR3_INTO_RDI
23545+ add $4097,%rdi
23546+ bts $63,%rdi
23547+ SET_RDI_INTO_CR3
23548+ mov $__KERNEL_DS,%edi
23549+ mov %edi,%ss
23550+111:
23551+#endif
23552+ .endm
23553+
23554+ .macro pax_erase_kstack
23555+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23556+ call pax_erase_kstack
23557+#endif
23558+ .endm
23559+
23560+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23561+ENTRY(pax_erase_kstack)
23562+ pushq %rdi
23563+ pushq %rcx
23564+ pushq %rax
23565+ pushq %r11
23566+
23567+ GET_THREAD_INFO(%r11)
23568+ mov TI_lowest_stack(%r11), %rdi
23569+ mov $-0xBEEF, %rax
23570+ std
23571+
23572+1: mov %edi, %ecx
23573+ and $THREAD_SIZE_asm - 1, %ecx
23574+ shr $3, %ecx
23575+ repne scasq
23576+ jecxz 2f
23577+
23578+ cmp $2*8, %ecx
23579+ jc 2f
23580+
23581+ mov $2*8, %ecx
23582+ repe scasq
23583+ jecxz 2f
23584+ jne 1b
23585+
23586+2: cld
23587+ or $2*8, %rdi
23588+ mov %esp, %ecx
23589+ sub %edi, %ecx
23590+
23591+ cmp $THREAD_SIZE_asm, %rcx
23592+ jb 3f
23593+ ud2
23594+3:
23595+
23596+ shr $3, %ecx
23597+ rep stosq
23598+
23599+ mov TI_task_thread_sp0(%r11), %rdi
23600+ sub $256, %rdi
23601+ mov %rdi, TI_lowest_stack(%r11)
23602+
23603+ popq %r11
23604+ popq %rax
23605+ popq %rcx
23606+ popq %rdi
23607+ pax_force_retaddr
23608+ ret
23609+ENDPROC(pax_erase_kstack)
23610+#endif
23611
23612 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
23613 #ifdef CONFIG_TRACE_IRQFLAGS
23614@@ -117,7 +544,7 @@ ENDPROC(native_usergs_sysret64)
23615 .endm
23616
23617 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
23618- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
23619+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
23620 jnc 1f
23621 TRACE_IRQS_ON_DEBUG
23622 1:
23623@@ -155,27 +582,6 @@ ENDPROC(native_usergs_sysret64)
23624 movq \tmp,R11+\offset(%rsp)
23625 .endm
23626
23627- .macro FAKE_STACK_FRAME child_rip
23628- /* push in order ss, rsp, eflags, cs, rip */
23629- xorl %eax, %eax
23630- pushq_cfi $__KERNEL_DS /* ss */
23631- /*CFI_REL_OFFSET ss,0*/
23632- pushq_cfi %rax /* rsp */
23633- CFI_REL_OFFSET rsp,0
23634- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
23635- /*CFI_REL_OFFSET rflags,0*/
23636- pushq_cfi $__KERNEL_CS /* cs */
23637- /*CFI_REL_OFFSET cs,0*/
23638- pushq_cfi \child_rip /* rip */
23639- CFI_REL_OFFSET rip,0
23640- pushq_cfi %rax /* orig rax */
23641- .endm
23642-
23643- .macro UNFAKE_STACK_FRAME
23644- addq $8*6, %rsp
23645- CFI_ADJUST_CFA_OFFSET -(6*8)
23646- .endm
23647-
23648 /*
23649 * initial frame state for interrupts (and exceptions without error code)
23650 */
23651@@ -241,25 +647,26 @@ ENDPROC(native_usergs_sysret64)
23652 /* save partial stack frame */
23653 .macro SAVE_ARGS_IRQ
23654 cld
23655- /* start from rbp in pt_regs and jump over */
23656- movq_cfi rdi, (RDI-RBP)
23657- movq_cfi rsi, (RSI-RBP)
23658- movq_cfi rdx, (RDX-RBP)
23659- movq_cfi rcx, (RCX-RBP)
23660- movq_cfi rax, (RAX-RBP)
23661- movq_cfi r8, (R8-RBP)
23662- movq_cfi r9, (R9-RBP)
23663- movq_cfi r10, (R10-RBP)
23664- movq_cfi r11, (R11-RBP)
23665+ /* start from r15 in pt_regs and jump over */
23666+ movq_cfi rdi, RDI
23667+ movq_cfi rsi, RSI
23668+ movq_cfi rdx, RDX
23669+ movq_cfi rcx, RCX
23670+ movq_cfi rax, RAX
23671+ movq_cfi r8, R8
23672+ movq_cfi r9, R9
23673+ movq_cfi r10, R10
23674+ movq_cfi r11, R11
23675+ movq_cfi r12, R12
23676
23677 /* Save rbp so that we can unwind from get_irq_regs() */
23678- movq_cfi rbp, 0
23679+ movq_cfi rbp, RBP
23680
23681 /* Save previous stack value */
23682 movq %rsp, %rsi
23683
23684- leaq -RBP(%rsp),%rdi /* arg1 for handler */
23685- testl $3, CS-RBP(%rsi)
23686+ movq %rsp,%rdi /* arg1 for handler */
23687+ testb $3, CS(%rsi)
23688 je 1f
23689 SWAPGS
23690 /*
23691@@ -279,6 +686,18 @@ ENDPROC(native_usergs_sysret64)
23692 0x06 /* DW_OP_deref */, \
23693 0x08 /* DW_OP_const1u */, SS+8-RBP, \
23694 0x22 /* DW_OP_plus */
23695+
23696+#ifdef CONFIG_PAX_MEMORY_UDEREF
23697+ testb $3, CS(%rdi)
23698+ jnz 1f
23699+ pax_enter_kernel
23700+ jmp 2f
23701+1: pax_enter_kernel_user
23702+2:
23703+#else
23704+ pax_enter_kernel
23705+#endif
23706+
23707 /* We entered an interrupt context - irqs are off: */
23708 TRACE_IRQS_OFF
23709 .endm
23710@@ -308,9 +727,52 @@ ENTRY(save_paranoid)
23711 js 1f /* negative -> in kernel */
23712 SWAPGS
23713 xorl %ebx,%ebx
23714-1: ret
23715+1:
23716+#ifdef CONFIG_PAX_MEMORY_UDEREF
23717+ testb $3, CS+8(%rsp)
23718+ jnz 1f
23719+ pax_enter_kernel
23720+ jmp 2f
23721+1: pax_enter_kernel_user
23722+2:
23723+#else
23724+ pax_enter_kernel
23725+#endif
23726+ pax_force_retaddr
23727+ ret
23728 CFI_ENDPROC
23729-END(save_paranoid)
23730+ENDPROC(save_paranoid)
23731+
23732+ENTRY(save_paranoid_nmi)
23733+ XCPT_FRAME 1 RDI+8
23734+ cld
23735+ movq_cfi rdi, RDI+8
23736+ movq_cfi rsi, RSI+8
23737+ movq_cfi rdx, RDX+8
23738+ movq_cfi rcx, RCX+8
23739+ movq_cfi rax, RAX+8
23740+ movq_cfi r8, R8+8
23741+ movq_cfi r9, R9+8
23742+ movq_cfi r10, R10+8
23743+ movq_cfi r11, R11+8
23744+ movq_cfi rbx, RBX+8
23745+ movq_cfi rbp, RBP+8
23746+ movq_cfi r12, R12+8
23747+ movq_cfi r13, R13+8
23748+ movq_cfi r14, R14+8
23749+ movq_cfi r15, R15+8
23750+ movl $1,%ebx
23751+ movl $MSR_GS_BASE,%ecx
23752+ rdmsr
23753+ testl %edx,%edx
23754+ js 1f /* negative -> in kernel */
23755+ SWAPGS
23756+ xorl %ebx,%ebx
23757+1: pax_enter_kernel_nmi
23758+ pax_force_retaddr
23759+ ret
23760+ CFI_ENDPROC
23761+ENDPROC(save_paranoid_nmi)
23762
23763 /*
23764 * A newly forked process directly context switches into this address.
23765@@ -331,7 +793,7 @@ ENTRY(ret_from_fork)
23766
23767 RESTORE_REST
23768
23769- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23770+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
23771 jz 1f
23772
23773 /*
23774@@ -344,15 +806,13 @@ ENTRY(ret_from_fork)
23775 jmp int_ret_from_sys_call
23776
23777 1:
23778- subq $REST_SKIP, %rsp # leave space for volatiles
23779- CFI_ADJUST_CFA_OFFSET REST_SKIP
23780 movq %rbp, %rdi
23781 call *%rbx
23782 movl $0, RAX(%rsp)
23783 RESTORE_REST
23784 jmp int_ret_from_sys_call
23785 CFI_ENDPROC
23786-END(ret_from_fork)
23787+ENDPROC(ret_from_fork)
23788
23789 /*
23790 * System call entry. Up to 6 arguments in registers are supported.
23791@@ -389,7 +849,7 @@ END(ret_from_fork)
23792 ENTRY(system_call)
23793 CFI_STARTPROC simple
23794 CFI_SIGNAL_FRAME
23795- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
23796+ CFI_DEF_CFA rsp,0
23797 CFI_REGISTER rip,rcx
23798 /*CFI_REGISTER rflags,r11*/
23799 SWAPGS_UNSAFE_STACK
23800@@ -402,16 +862,23 @@ GLOBAL(system_call_after_swapgs)
23801
23802 movq %rsp,PER_CPU_VAR(old_rsp)
23803 movq PER_CPU_VAR(kernel_stack),%rsp
23804+ SAVE_ARGS 8*6, 0, rax_enosys=1
23805+ pax_enter_kernel_user
23806+
23807+#ifdef CONFIG_PAX_RANDKSTACK
23808+ pax_erase_kstack
23809+#endif
23810+
23811 /*
23812 * No need to follow this irqs off/on section - it's straight
23813 * and short:
23814 */
23815 ENABLE_INTERRUPTS(CLBR_NONE)
23816- SAVE_ARGS 8, 0, rax_enosys=1
23817 movq_cfi rax,(ORIG_RAX-ARGOFFSET)
23818 movq %rcx,RIP-ARGOFFSET(%rsp)
23819 CFI_REL_OFFSET rip,RIP-ARGOFFSET
23820- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
23821+ GET_THREAD_INFO(%rcx)
23822+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
23823 jnz tracesys
23824 system_call_fastpath:
23825 #if __SYSCALL_MASK == ~0
23826@@ -435,10 +902,13 @@ sysret_check:
23827 LOCKDEP_SYS_EXIT
23828 DISABLE_INTERRUPTS(CLBR_NONE)
23829 TRACE_IRQS_OFF
23830- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
23831+ GET_THREAD_INFO(%rcx)
23832+ movl TI_flags(%rcx),%edx
23833 andl %edi,%edx
23834 jnz sysret_careful
23835 CFI_REMEMBER_STATE
23836+ pax_exit_kernel_user
23837+ pax_erase_kstack
23838 /*
23839 * sysretq will re-enable interrupts:
23840 */
23841@@ -497,12 +967,15 @@ sysret_audit:
23842
23843 /* Do syscall tracing */
23844 tracesys:
23845- leaq -REST_SKIP(%rsp), %rdi
23846+ movq %rsp, %rdi
23847 movq $AUDIT_ARCH_X86_64, %rsi
23848 call syscall_trace_enter_phase1
23849 test %rax, %rax
23850 jnz tracesys_phase2 /* if needed, run the slow path */
23851- LOAD_ARGS 0 /* else restore clobbered regs */
23852+
23853+ pax_erase_kstack
23854+
23855+ LOAD_ARGS /* else restore clobbered regs */
23856 jmp system_call_fastpath /* and return to the fast path */
23857
23858 tracesys_phase2:
23859@@ -513,12 +986,14 @@ tracesys_phase2:
23860 movq %rax,%rdx
23861 call syscall_trace_enter_phase2
23862
23863+ pax_erase_kstack
23864+
23865 /*
23866 * Reload arg registers from stack in case ptrace changed them.
23867 * We don't reload %rax because syscall_trace_entry_phase2() returned
23868 * the value it wants us to use in the table lookup.
23869 */
23870- LOAD_ARGS ARGOFFSET, 1
23871+ LOAD_ARGS 1
23872 RESTORE_REST
23873 #if __SYSCALL_MASK == ~0
23874 cmpq $__NR_syscall_max,%rax
23875@@ -548,7 +1023,9 @@ GLOBAL(int_with_check)
23876 andl %edi,%edx
23877 jnz int_careful
23878 andl $~TS_COMPAT,TI_status(%rcx)
23879- jmp retint_swapgs
23880+ pax_exit_kernel_user
23881+ pax_erase_kstack
23882+ jmp retint_swapgs_pax
23883
23884 /* Either reschedule or signal or syscall exit tracking needed. */
23885 /* First do a reschedule test. */
23886@@ -594,7 +1071,7 @@ int_restore_rest:
23887 TRACE_IRQS_OFF
23888 jmp int_with_check
23889 CFI_ENDPROC
23890-END(system_call)
23891+ENDPROC(system_call)
23892
23893 .macro FORK_LIKE func
23894 ENTRY(stub_\func)
23895@@ -607,9 +1084,10 @@ ENTRY(stub_\func)
23896 DEFAULT_FRAME 0 8 /* offset 8: return address */
23897 call sys_\func
23898 RESTORE_TOP_OF_STACK %r11, 8
23899- ret $REST_SKIP /* pop extended registers */
23900+ pax_force_retaddr
23901+ ret
23902 CFI_ENDPROC
23903-END(stub_\func)
23904+ENDPROC(stub_\func)
23905 .endm
23906
23907 .macro FIXED_FRAME label,func
23908@@ -619,9 +1097,10 @@ ENTRY(\label)
23909 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
23910 call \func
23911 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
23912+ pax_force_retaddr
23913 ret
23914 CFI_ENDPROC
23915-END(\label)
23916+ENDPROC(\label)
23917 .endm
23918
23919 FORK_LIKE clone
23920@@ -629,19 +1108,6 @@ END(\label)
23921 FORK_LIKE vfork
23922 FIXED_FRAME stub_iopl, sys_iopl
23923
23924-ENTRY(ptregscall_common)
23925- DEFAULT_FRAME 1 8 /* offset 8: return address */
23926- RESTORE_TOP_OF_STACK %r11, 8
23927- movq_cfi_restore R15+8, r15
23928- movq_cfi_restore R14+8, r14
23929- movq_cfi_restore R13+8, r13
23930- movq_cfi_restore R12+8, r12
23931- movq_cfi_restore RBP+8, rbp
23932- movq_cfi_restore RBX+8, rbx
23933- ret $REST_SKIP /* pop extended registers */
23934- CFI_ENDPROC
23935-END(ptregscall_common)
23936-
23937 ENTRY(stub_execve)
23938 CFI_STARTPROC
23939 addq $8, %rsp
23940@@ -653,7 +1119,7 @@ ENTRY(stub_execve)
23941 RESTORE_REST
23942 jmp int_ret_from_sys_call
23943 CFI_ENDPROC
23944-END(stub_execve)
23945+ENDPROC(stub_execve)
23946
23947 ENTRY(stub_execveat)
23948 CFI_STARTPROC
23949@@ -667,7 +1133,7 @@ ENTRY(stub_execveat)
23950 RESTORE_REST
23951 jmp int_ret_from_sys_call
23952 CFI_ENDPROC
23953-END(stub_execveat)
23954+ENDPROC(stub_execveat)
23955
23956 /*
23957 * sigreturn is special because it needs to restore all registers on return.
23958@@ -684,7 +1150,7 @@ ENTRY(stub_rt_sigreturn)
23959 RESTORE_REST
23960 jmp int_ret_from_sys_call
23961 CFI_ENDPROC
23962-END(stub_rt_sigreturn)
23963+ENDPROC(stub_rt_sigreturn)
23964
23965 #ifdef CONFIG_X86_X32_ABI
23966 ENTRY(stub_x32_rt_sigreturn)
23967@@ -698,7 +1164,7 @@ ENTRY(stub_x32_rt_sigreturn)
23968 RESTORE_REST
23969 jmp int_ret_from_sys_call
23970 CFI_ENDPROC
23971-END(stub_x32_rt_sigreturn)
23972+ENDPROC(stub_x32_rt_sigreturn)
23973
23974 ENTRY(stub_x32_execve)
23975 CFI_STARTPROC
23976@@ -763,7 +1229,7 @@ vector=vector+1
23977 2: jmp common_interrupt
23978 .endr
23979 CFI_ENDPROC
23980-END(irq_entries_start)
23981+ENDPROC(irq_entries_start)
23982
23983 .previous
23984 END(interrupt)
23985@@ -780,8 +1246,8 @@ END(interrupt)
23986 /* 0(%rsp): ~(interrupt number) */
23987 .macro interrupt func
23988 /* reserve pt_regs for scratch regs and rbp */
23989- subq $ORIG_RAX-RBP, %rsp
23990- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
23991+ subq $ORIG_RAX, %rsp
23992+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
23993 SAVE_ARGS_IRQ
23994 call \func
23995 .endm
23996@@ -804,14 +1270,14 @@ ret_from_intr:
23997
23998 /* Restore saved previous stack */
23999 popq %rsi
24000- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24001- leaq ARGOFFSET-RBP(%rsi), %rsp
24002+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24003+ movq %rsi, %rsp
24004 CFI_DEF_CFA_REGISTER rsp
24005- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24006+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24007
24008 exit_intr:
24009 GET_THREAD_INFO(%rcx)
24010- testl $3,CS-ARGOFFSET(%rsp)
24011+ testb $3,CS-ARGOFFSET(%rsp)
24012 je retint_kernel
24013
24014 /* Interrupt came from user space */
24015@@ -833,12 +1299,35 @@ retint_swapgs: /* return to user-space */
24016 * The iretq could re-enable interrupts:
24017 */
24018 DISABLE_INTERRUPTS(CLBR_ANY)
24019+ pax_exit_kernel_user
24020+retint_swapgs_pax:
24021 TRACE_IRQS_IRETQ
24022 SWAPGS
24023 jmp restore_args
24024
24025 retint_restore_args: /* return to kernel space */
24026 DISABLE_INTERRUPTS(CLBR_ANY)
24027+ pax_exit_kernel
24028+
24029+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24030+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24031+ * namely calling EFI runtime services with a phys mapping. We're
24032+ * starting off with NOPs and patch in the real instrumentation
24033+ * (BTS/OR) before starting any userland process; even before starting
24034+ * up the APs.
24035+ */
24036+ .pushsection .altinstr_replacement, "a"
24037+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24038+ 602:
24039+ .popsection
24040+ 603: .fill 602b-601b, 1, 0x90
24041+ .pushsection .altinstructions, "a"
24042+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24043+ .popsection
24044+#else
24045+ pax_force_retaddr (RIP-ARGOFFSET)
24046+#endif
24047+
24048 /*
24049 * The iretq could re-enable interrupts:
24050 */
24051@@ -876,15 +1365,15 @@ native_irq_return_ldt:
24052 SWAPGS
24053 movq PER_CPU_VAR(espfix_waddr),%rdi
24054 movq %rax,(0*8)(%rdi) /* RAX */
24055- movq (2*8)(%rsp),%rax /* RIP */
24056+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
24057 movq %rax,(1*8)(%rdi)
24058- movq (3*8)(%rsp),%rax /* CS */
24059+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
24060 movq %rax,(2*8)(%rdi)
24061- movq (4*8)(%rsp),%rax /* RFLAGS */
24062+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
24063 movq %rax,(3*8)(%rdi)
24064- movq (6*8)(%rsp),%rax /* SS */
24065+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
24066 movq %rax,(5*8)(%rdi)
24067- movq (5*8)(%rsp),%rax /* RSP */
24068+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
24069 movq %rax,(4*8)(%rdi)
24070 andl $0xffff0000,%eax
24071 popq_cfi %rdi
24072@@ -938,7 +1427,7 @@ ENTRY(retint_kernel)
24073 jmp exit_intr
24074 #endif
24075 CFI_ENDPROC
24076-END(common_interrupt)
24077+ENDPROC(common_interrupt)
24078
24079 /*
24080 * APIC interrupts.
24081@@ -952,7 +1441,7 @@ ENTRY(\sym)
24082 interrupt \do_sym
24083 jmp ret_from_intr
24084 CFI_ENDPROC
24085-END(\sym)
24086+ENDPROC(\sym)
24087 .endm
24088
24089 #ifdef CONFIG_TRACING
24090@@ -1025,7 +1514,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24091 /*
24092 * Exception entry points.
24093 */
24094-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24095+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24096
24097 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24098 ENTRY(\sym)
24099@@ -1076,6 +1565,12 @@ ENTRY(\sym)
24100 .endif
24101
24102 .if \shift_ist != -1
24103+#ifdef CONFIG_SMP
24104+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24105+ lea init_tss(%r13), %r13
24106+#else
24107+ lea init_tss(%rip), %r13
24108+#endif
24109 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24110 .endif
24111
24112@@ -1092,7 +1587,7 @@ ENTRY(\sym)
24113 .endif
24114
24115 CFI_ENDPROC
24116-END(\sym)
24117+ENDPROC(\sym)
24118 .endm
24119
24120 #ifdef CONFIG_TRACING
24121@@ -1133,9 +1628,10 @@ gs_change:
24122 2: mfence /* workaround */
24123 SWAPGS
24124 popfq_cfi
24125+ pax_force_retaddr
24126 ret
24127 CFI_ENDPROC
24128-END(native_load_gs_index)
24129+ENDPROC(native_load_gs_index)
24130
24131 _ASM_EXTABLE(gs_change,bad_gs)
24132 .section .fixup,"ax"
24133@@ -1163,9 +1659,10 @@ ENTRY(do_softirq_own_stack)
24134 CFI_DEF_CFA_REGISTER rsp
24135 CFI_ADJUST_CFA_OFFSET -8
24136 decl PER_CPU_VAR(irq_count)
24137+ pax_force_retaddr
24138 ret
24139 CFI_ENDPROC
24140-END(do_softirq_own_stack)
24141+ENDPROC(do_softirq_own_stack)
24142
24143 #ifdef CONFIG_XEN
24144 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24145@@ -1203,7 +1700,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24146 decl PER_CPU_VAR(irq_count)
24147 jmp error_exit
24148 CFI_ENDPROC
24149-END(xen_do_hypervisor_callback)
24150+ENDPROC(xen_do_hypervisor_callback)
24151
24152 /*
24153 * Hypervisor uses this for application faults while it executes.
24154@@ -1262,7 +1759,7 @@ ENTRY(xen_failsafe_callback)
24155 SAVE_ALL
24156 jmp error_exit
24157 CFI_ENDPROC
24158-END(xen_failsafe_callback)
24159+ENDPROC(xen_failsafe_callback)
24160
24161 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24162 xen_hvm_callback_vector xen_evtchn_do_upcall
24163@@ -1309,18 +1806,33 @@ ENTRY(paranoid_exit)
24164 DEFAULT_FRAME
24165 DISABLE_INTERRUPTS(CLBR_NONE)
24166 TRACE_IRQS_OFF_DEBUG
24167- testl %ebx,%ebx /* swapgs needed? */
24168+ testl $1,%ebx /* swapgs needed? */
24169 jnz paranoid_restore
24170- testl $3,CS(%rsp)
24171+ testb $3,CS(%rsp)
24172 jnz paranoid_userspace
24173+#ifdef CONFIG_PAX_MEMORY_UDEREF
24174+ pax_exit_kernel
24175+ TRACE_IRQS_IRETQ 0
24176+ SWAPGS_UNSAFE_STACK
24177+ RESTORE_ALL 8
24178+ pax_force_retaddr_bts
24179+ jmp irq_return
24180+#endif
24181 paranoid_swapgs:
24182+#ifdef CONFIG_PAX_MEMORY_UDEREF
24183+ pax_exit_kernel_user
24184+#else
24185+ pax_exit_kernel
24186+#endif
24187 TRACE_IRQS_IRETQ 0
24188 SWAPGS_UNSAFE_STACK
24189 RESTORE_ALL 8
24190 jmp irq_return
24191 paranoid_restore:
24192+ pax_exit_kernel
24193 TRACE_IRQS_IRETQ_DEBUG 0
24194 RESTORE_ALL 8
24195+ pax_force_retaddr_bts
24196 jmp irq_return
24197 paranoid_userspace:
24198 GET_THREAD_INFO(%rcx)
24199@@ -1349,7 +1861,7 @@ paranoid_schedule:
24200 TRACE_IRQS_OFF
24201 jmp paranoid_userspace
24202 CFI_ENDPROC
24203-END(paranoid_exit)
24204+ENDPROC(paranoid_exit)
24205
24206 /*
24207 * Exception entry point. This expects an error code/orig_rax on the stack.
24208@@ -1376,12 +1888,23 @@ ENTRY(error_entry)
24209 movq %r14, R14+8(%rsp)
24210 movq %r15, R15+8(%rsp)
24211 xorl %ebx,%ebx
24212- testl $3,CS+8(%rsp)
24213+ testb $3,CS+8(%rsp)
24214 je error_kernelspace
24215 error_swapgs:
24216 SWAPGS
24217 error_sti:
24218+#ifdef CONFIG_PAX_MEMORY_UDEREF
24219+ testb $3, CS+8(%rsp)
24220+ jnz 1f
24221+ pax_enter_kernel
24222+ jmp 2f
24223+1: pax_enter_kernel_user
24224+2:
24225+#else
24226+ pax_enter_kernel
24227+#endif
24228 TRACE_IRQS_OFF
24229+ pax_force_retaddr
24230 ret
24231
24232 /*
24233@@ -1416,7 +1939,7 @@ error_bad_iret:
24234 decl %ebx /* Return to usergs */
24235 jmp error_sti
24236 CFI_ENDPROC
24237-END(error_entry)
24238+ENDPROC(error_entry)
24239
24240
24241 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24242@@ -1427,7 +1950,7 @@ ENTRY(error_exit)
24243 DISABLE_INTERRUPTS(CLBR_NONE)
24244 TRACE_IRQS_OFF
24245 GET_THREAD_INFO(%rcx)
24246- testl %eax,%eax
24247+ testl $1,%eax
24248 jne retint_kernel
24249 LOCKDEP_SYS_EXIT_IRQ
24250 movl TI_flags(%rcx),%edx
24251@@ -1436,7 +1959,7 @@ ENTRY(error_exit)
24252 jnz retint_careful
24253 jmp retint_swapgs
24254 CFI_ENDPROC
24255-END(error_exit)
24256+ENDPROC(error_exit)
24257
24258 /*
24259 * Test if a given stack is an NMI stack or not.
24260@@ -1494,9 +2017,11 @@ ENTRY(nmi)
24261 * If %cs was not the kernel segment, then the NMI triggered in user
24262 * space, which means it is definitely not nested.
24263 */
24264+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24265+ je 1f
24266 cmpl $__KERNEL_CS, 16(%rsp)
24267 jne first_nmi
24268-
24269+1:
24270 /*
24271 * Check the special variable on the stack to see if NMIs are
24272 * executing.
24273@@ -1530,8 +2055,7 @@ nested_nmi:
24274
24275 1:
24276 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24277- leaq -1*8(%rsp), %rdx
24278- movq %rdx, %rsp
24279+ subq $8, %rsp
24280 CFI_ADJUST_CFA_OFFSET 1*8
24281 leaq -10*8(%rsp), %rdx
24282 pushq_cfi $__KERNEL_DS
24283@@ -1549,6 +2073,7 @@ nested_nmi_out:
24284 CFI_RESTORE rdx
24285
24286 /* No need to check faults here */
24287+# pax_force_retaddr_bts
24288 INTERRUPT_RETURN
24289
24290 CFI_RESTORE_STATE
24291@@ -1645,13 +2170,13 @@ end_repeat_nmi:
24292 subq $ORIG_RAX-R15, %rsp
24293 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24294 /*
24295- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24296+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24297 * as we should not be calling schedule in NMI context.
24298 * Even with normal interrupts enabled. An NMI should not be
24299 * setting NEED_RESCHED or anything that normal interrupts and
24300 * exceptions might do.
24301 */
24302- call save_paranoid
24303+ call save_paranoid_nmi
24304 DEFAULT_FRAME 0
24305
24306 /*
24307@@ -1661,9 +2186,9 @@ end_repeat_nmi:
24308 * NMI itself takes a page fault, the page fault that was preempted
24309 * will read the information from the NMI page fault and not the
24310 * origin fault. Save it off and restore it if it changes.
24311- * Use the r12 callee-saved register.
24312+ * Use the r13 callee-saved register.
24313 */
24314- movq %cr2, %r12
24315+ movq %cr2, %r13
24316
24317 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24318 movq %rsp,%rdi
24319@@ -1672,29 +2197,34 @@ end_repeat_nmi:
24320
24321 /* Did the NMI take a page fault? Restore cr2 if it did */
24322 movq %cr2, %rcx
24323- cmpq %rcx, %r12
24324+ cmpq %rcx, %r13
24325 je 1f
24326- movq %r12, %cr2
24327+ movq %r13, %cr2
24328 1:
24329
24330- testl %ebx,%ebx /* swapgs needed? */
24331+ testl $1,%ebx /* swapgs needed? */
24332 jnz nmi_restore
24333 nmi_swapgs:
24334 SWAPGS_UNSAFE_STACK
24335 nmi_restore:
24336+ pax_exit_kernel_nmi
24337 /* Pop the extra iret frame at once */
24338 RESTORE_ALL 6*8
24339+ testb $3, 8(%rsp)
24340+ jnz 1f
24341+ pax_force_retaddr_bts
24342+1:
24343
24344 /* Clear the NMI executing stack variable */
24345 movq $0, 5*8(%rsp)
24346 jmp irq_return
24347 CFI_ENDPROC
24348-END(nmi)
24349+ENDPROC(nmi)
24350
24351 ENTRY(ignore_sysret)
24352 CFI_STARTPROC
24353 mov $-ENOSYS,%eax
24354 sysret
24355 CFI_ENDPROC
24356-END(ignore_sysret)
24357+ENDPROC(ignore_sysret)
24358
24359diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24360index f5d0730..5bce89c 100644
24361--- a/arch/x86/kernel/espfix_64.c
24362+++ b/arch/x86/kernel/espfix_64.c
24363@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
24364 #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
24365 static void *espfix_pages[ESPFIX_MAX_PAGES];
24366
24367-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
24368- __aligned(PAGE_SIZE);
24369+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
24370
24371 static unsigned int page_random, slot_random;
24372
24373@@ -122,11 +121,17 @@ static void init_espfix_random(void)
24374 void __init init_espfix_bsp(void)
24375 {
24376 pgd_t *pgd_p;
24377+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
24378
24379 /* Install the espfix pud into the kernel page directory */
24380- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
24381+ pgd_p = &init_level4_pgt[index];
24382 pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
24383
24384+#ifdef CONFIG_PAX_PER_CPU_PGD
24385+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
24386+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
24387+#endif
24388+
24389 /* Randomize the locations */
24390 init_espfix_random();
24391
24392@@ -194,7 +199,7 @@ void init_espfix_ap(void)
24393 set_pte(&pte_p[n*PTE_STRIDE], pte);
24394
24395 /* Job is done for this CPU and any CPU which shares this page */
24396- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24397+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24398
24399 unlock_done:
24400 mutex_unlock(&espfix_init_mutex);
24401diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24402index 8b7b0a5..2395f29 100644
24403--- a/arch/x86/kernel/ftrace.c
24404+++ b/arch/x86/kernel/ftrace.c
24405@@ -89,7 +89,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24406 * kernel identity mapping to modify code.
24407 */
24408 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24409- ip = (unsigned long)__va(__pa_symbol(ip));
24410+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24411
24412 return ip;
24413 }
24414@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24415 {
24416 unsigned char replaced[MCOUNT_INSN_SIZE];
24417
24418+ ip = ktla_ktva(ip);
24419+
24420 /*
24421 * Note: Due to modules and __init, code can
24422 * disappear and change, we need to protect against faulting
24423@@ -230,7 +232,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24424 unsigned char old[MCOUNT_INSN_SIZE];
24425 int ret;
24426
24427- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
24428+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
24429
24430 ftrace_update_func = ip;
24431 /* Make sure the breakpoints see the ftrace_update_func update */
24432@@ -311,7 +313,7 @@ static int add_break(unsigned long ip, const char *old)
24433 unsigned char replaced[MCOUNT_INSN_SIZE];
24434 unsigned char brk = BREAKPOINT_INSTRUCTION;
24435
24436- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
24437+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
24438 return -EFAULT;
24439
24440 /* Make sure it is what we expect it to be */
24441diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
24442index eda1a86..8f6df48 100644
24443--- a/arch/x86/kernel/head64.c
24444+++ b/arch/x86/kernel/head64.c
24445@@ -67,12 +67,12 @@ again:
24446 pgd = *pgd_p;
24447
24448 /*
24449- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
24450- * critical -- __PAGE_OFFSET would point us back into the dynamic
24451+ * The use of __early_va rather than __va here is critical:
24452+ * __va would point us back into the dynamic
24453 * range and we might end up looping forever...
24454 */
24455 if (pgd)
24456- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24457+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
24458 else {
24459 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24460 reset_early_page_tables();
24461@@ -82,13 +82,13 @@ again:
24462 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
24463 for (i = 0; i < PTRS_PER_PUD; i++)
24464 pud_p[i] = 0;
24465- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24466+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
24467 }
24468 pud_p += pud_index(address);
24469 pud = *pud_p;
24470
24471 if (pud)
24472- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
24473+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
24474 else {
24475 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
24476 reset_early_page_tables();
24477@@ -98,7 +98,7 @@ again:
24478 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
24479 for (i = 0; i < PTRS_PER_PMD; i++)
24480 pmd_p[i] = 0;
24481- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
24482+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
24483 }
24484 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
24485 pmd_p[pmd_index(address)] = pmd;
24486@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
24487 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
24488 early_printk("Kernel alive\n");
24489
24490- clear_page(init_level4_pgt);
24491 /* set init_level4_pgt kernel high mapping*/
24492 init_level4_pgt[511] = early_level4_pgt[511];
24493
24494diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
24495index f36bd42..0ab4474 100644
24496--- a/arch/x86/kernel/head_32.S
24497+++ b/arch/x86/kernel/head_32.S
24498@@ -26,6 +26,12 @@
24499 /* Physical address */
24500 #define pa(X) ((X) - __PAGE_OFFSET)
24501
24502+#ifdef CONFIG_PAX_KERNEXEC
24503+#define ta(X) (X)
24504+#else
24505+#define ta(X) ((X) - __PAGE_OFFSET)
24506+#endif
24507+
24508 /*
24509 * References to members of the new_cpu_data structure.
24510 */
24511@@ -55,11 +61,7 @@
24512 * and small than max_low_pfn, otherwise will waste some page table entries
24513 */
24514
24515-#if PTRS_PER_PMD > 1
24516-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
24517-#else
24518-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
24519-#endif
24520+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
24521
24522 /* Number of possible pages in the lowmem region */
24523 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
24524@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
24525 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24526
24527 /*
24528+ * Real beginning of normal "text" segment
24529+ */
24530+ENTRY(stext)
24531+ENTRY(_stext)
24532+
24533+/*
24534 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
24535 * %esi points to the real-mode code as a 32-bit pointer.
24536 * CS and DS must be 4 GB flat segments, but we don't depend on
24537@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
24538 * can.
24539 */
24540 __HEAD
24541+
24542+#ifdef CONFIG_PAX_KERNEXEC
24543+ jmp startup_32
24544+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
24545+.fill PAGE_SIZE-5,1,0xcc
24546+#endif
24547+
24548 ENTRY(startup_32)
24549 movl pa(stack_start),%ecx
24550
24551@@ -106,6 +121,59 @@ ENTRY(startup_32)
24552 2:
24553 leal -__PAGE_OFFSET(%ecx),%esp
24554
24555+#ifdef CONFIG_SMP
24556+ movl $pa(cpu_gdt_table),%edi
24557+ movl $__per_cpu_load,%eax
24558+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
24559+ rorl $16,%eax
24560+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
24561+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
24562+ movl $__per_cpu_end - 1,%eax
24563+ subl $__per_cpu_start,%eax
24564+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
24565+#endif
24566+
24567+#ifdef CONFIG_PAX_MEMORY_UDEREF
24568+ movl $NR_CPUS,%ecx
24569+ movl $pa(cpu_gdt_table),%edi
24570+1:
24571+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
24572+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
24573+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
24574+ addl $PAGE_SIZE_asm,%edi
24575+ loop 1b
24576+#endif
24577+
24578+#ifdef CONFIG_PAX_KERNEXEC
24579+ movl $pa(boot_gdt),%edi
24580+ movl $__LOAD_PHYSICAL_ADDR,%eax
24581+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
24582+ rorl $16,%eax
24583+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
24584+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
24585+ rorl $16,%eax
24586+
24587+ ljmp $(__BOOT_CS),$1f
24588+1:
24589+
24590+ movl $NR_CPUS,%ecx
24591+ movl $pa(cpu_gdt_table),%edi
24592+ addl $__PAGE_OFFSET,%eax
24593+1:
24594+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
24595+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
24596+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
24597+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
24598+ rorl $16,%eax
24599+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
24600+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
24601+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
24602+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
24603+ rorl $16,%eax
24604+ addl $PAGE_SIZE_asm,%edi
24605+ loop 1b
24606+#endif
24607+
24608 /*
24609 * Clear BSS first so that there are no surprises...
24610 */
24611@@ -201,8 +269,11 @@ ENTRY(startup_32)
24612 movl %eax, pa(max_pfn_mapped)
24613
24614 /* Do early initialization of the fixmap area */
24615- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24616- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
24617+#ifdef CONFIG_COMPAT_VDSO
24618+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
24619+#else
24620+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
24621+#endif
24622 #else /* Not PAE */
24623
24624 page_pde_offset = (__PAGE_OFFSET >> 20);
24625@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24626 movl %eax, pa(max_pfn_mapped)
24627
24628 /* Do early initialization of the fixmap area */
24629- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
24630- movl %eax,pa(initial_page_table+0xffc)
24631+#ifdef CONFIG_COMPAT_VDSO
24632+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
24633+#else
24634+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
24635+#endif
24636 #endif
24637
24638 #ifdef CONFIG_PARAVIRT
24639@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
24640 cmpl $num_subarch_entries, %eax
24641 jae bad_subarch
24642
24643- movl pa(subarch_entries)(,%eax,4), %eax
24644- subl $__PAGE_OFFSET, %eax
24645- jmp *%eax
24646+ jmp *pa(subarch_entries)(,%eax,4)
24647
24648 bad_subarch:
24649 WEAK(lguest_entry)
24650@@ -261,10 +333,10 @@ WEAK(xen_entry)
24651 __INITDATA
24652
24653 subarch_entries:
24654- .long default_entry /* normal x86/PC */
24655- .long lguest_entry /* lguest hypervisor */
24656- .long xen_entry /* Xen hypervisor */
24657- .long default_entry /* Moorestown MID */
24658+ .long ta(default_entry) /* normal x86/PC */
24659+ .long ta(lguest_entry) /* lguest hypervisor */
24660+ .long ta(xen_entry) /* Xen hypervisor */
24661+ .long ta(default_entry) /* Moorestown MID */
24662 num_subarch_entries = (. - subarch_entries) / 4
24663 .previous
24664 #else
24665@@ -354,6 +426,7 @@ default_entry:
24666 movl pa(mmu_cr4_features),%eax
24667 movl %eax,%cr4
24668
24669+#ifdef CONFIG_X86_PAE
24670 testb $X86_CR4_PAE, %al # check if PAE is enabled
24671 jz enable_paging
24672
24673@@ -382,6 +455,9 @@ default_entry:
24674 /* Make changes effective */
24675 wrmsr
24676
24677+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
24678+#endif
24679+
24680 enable_paging:
24681
24682 /*
24683@@ -449,14 +525,20 @@ is486:
24684 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
24685 movl %eax,%ss # after changing gdt.
24686
24687- movl $(__USER_DS),%eax # DS/ES contains default USER segment
24688+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
24689 movl %eax,%ds
24690 movl %eax,%es
24691
24692 movl $(__KERNEL_PERCPU), %eax
24693 movl %eax,%fs # set this cpu's percpu
24694
24695+#ifdef CONFIG_CC_STACKPROTECTOR
24696 movl $(__KERNEL_STACK_CANARY),%eax
24697+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
24698+ movl $(__USER_DS),%eax
24699+#else
24700+ xorl %eax,%eax
24701+#endif
24702 movl %eax,%gs
24703
24704 xorl %eax,%eax # Clear LDT
24705@@ -512,8 +594,11 @@ setup_once:
24706 * relocation. Manually set base address in stack canary
24707 * segment descriptor.
24708 */
24709- movl $gdt_page,%eax
24710+ movl $cpu_gdt_table,%eax
24711 movl $stack_canary,%ecx
24712+#ifdef CONFIG_SMP
24713+ addl $__per_cpu_load,%ecx
24714+#endif
24715 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
24716 shrl $16, %ecx
24717 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
24718@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
24719 cmpl $2,(%esp) # X86_TRAP_NMI
24720 je is_nmi # Ignore NMI
24721
24722- cmpl $2,%ss:early_recursion_flag
24723+ cmpl $1,%ss:early_recursion_flag
24724 je hlt_loop
24725 incl %ss:early_recursion_flag
24726
24727@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
24728 pushl (20+6*4)(%esp) /* trapno */
24729 pushl $fault_msg
24730 call printk
24731-#endif
24732 call dump_stack
24733+#endif
24734 hlt_loop:
24735 hlt
24736 jmp hlt_loop
24737@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
24738 /* This is the default interrupt "handler" :-) */
24739 ALIGN
24740 ignore_int:
24741- cld
24742 #ifdef CONFIG_PRINTK
24743+ cmpl $2,%ss:early_recursion_flag
24744+ je hlt_loop
24745+ incl %ss:early_recursion_flag
24746+ cld
24747 pushl %eax
24748 pushl %ecx
24749 pushl %edx
24750@@ -617,9 +705,6 @@ ignore_int:
24751 movl $(__KERNEL_DS),%eax
24752 movl %eax,%ds
24753 movl %eax,%es
24754- cmpl $2,early_recursion_flag
24755- je hlt_loop
24756- incl early_recursion_flag
24757 pushl 16(%esp)
24758 pushl 24(%esp)
24759 pushl 32(%esp)
24760@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
24761 /*
24762 * BSS section
24763 */
24764-__PAGE_ALIGNED_BSS
24765- .align PAGE_SIZE
24766 #ifdef CONFIG_X86_PAE
24767+.section .initial_pg_pmd,"a",@progbits
24768 initial_pg_pmd:
24769 .fill 1024*KPMDS,4,0
24770 #else
24771+.section .initial_page_table,"a",@progbits
24772 ENTRY(initial_page_table)
24773 .fill 1024,4,0
24774 #endif
24775+.section .initial_pg_fixmap,"a",@progbits
24776 initial_pg_fixmap:
24777 .fill 1024,4,0
24778+.section .empty_zero_page,"a",@progbits
24779 ENTRY(empty_zero_page)
24780 .fill 4096,1,0
24781+.section .swapper_pg_dir,"a",@progbits
24782 ENTRY(swapper_pg_dir)
24783+#ifdef CONFIG_X86_PAE
24784+ .fill 4,8,0
24785+#else
24786 .fill 1024,4,0
24787+#endif
24788
24789 /*
24790 * This starts the data section.
24791 */
24792 #ifdef CONFIG_X86_PAE
24793-__PAGE_ALIGNED_DATA
24794- /* Page-aligned for the benefit of paravirt? */
24795- .align PAGE_SIZE
24796+.section .initial_page_table,"a",@progbits
24797 ENTRY(initial_page_table)
24798 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
24799 # if KPMDS == 3
24800@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
24801 # error "Kernel PMDs should be 1, 2 or 3"
24802 # endif
24803 .align PAGE_SIZE /* needs to be page-sized too */
24804+
24805+#ifdef CONFIG_PAX_PER_CPU_PGD
24806+ENTRY(cpu_pgd)
24807+ .rept 2*NR_CPUS
24808+ .fill 4,8,0
24809+ .endr
24810+#endif
24811+
24812 #endif
24813
24814 .data
24815 .balign 4
24816 ENTRY(stack_start)
24817- .long init_thread_union+THREAD_SIZE
24818+ .long init_thread_union+THREAD_SIZE-8
24819
24820 __INITRODATA
24821 int_msg:
24822@@ -727,7 +825,7 @@ fault_msg:
24823 * segment size, and 32-bit linear address value:
24824 */
24825
24826- .data
24827+.section .rodata,"a",@progbits
24828 .globl boot_gdt_descr
24829 .globl idt_descr
24830
24831@@ -736,7 +834,7 @@ fault_msg:
24832 .word 0 # 32 bit align gdt_desc.address
24833 boot_gdt_descr:
24834 .word __BOOT_DS+7
24835- .long boot_gdt - __PAGE_OFFSET
24836+ .long pa(boot_gdt)
24837
24838 .word 0 # 32-bit align idt_desc.address
24839 idt_descr:
24840@@ -747,7 +845,7 @@ idt_descr:
24841 .word 0 # 32 bit align gdt_desc.address
24842 ENTRY(early_gdt_descr)
24843 .word GDT_ENTRIES*8-1
24844- .long gdt_page /* Overwritten for secondary CPUs */
24845+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
24846
24847 /*
24848 * The boot_gdt must mirror the equivalent in setup.S and is
24849@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
24850 .align L1_CACHE_BYTES
24851 ENTRY(boot_gdt)
24852 .fill GDT_ENTRY_BOOT_CS,8,0
24853- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
24854- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
24855+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
24856+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
24857+
24858+ .align PAGE_SIZE_asm
24859+ENTRY(cpu_gdt_table)
24860+ .rept NR_CPUS
24861+ .quad 0x0000000000000000 /* NULL descriptor */
24862+ .quad 0x0000000000000000 /* 0x0b reserved */
24863+ .quad 0x0000000000000000 /* 0x13 reserved */
24864+ .quad 0x0000000000000000 /* 0x1b reserved */
24865+
24866+#ifdef CONFIG_PAX_KERNEXEC
24867+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
24868+#else
24869+ .quad 0x0000000000000000 /* 0x20 unused */
24870+#endif
24871+
24872+ .quad 0x0000000000000000 /* 0x28 unused */
24873+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
24874+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
24875+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
24876+ .quad 0x0000000000000000 /* 0x4b reserved */
24877+ .quad 0x0000000000000000 /* 0x53 reserved */
24878+ .quad 0x0000000000000000 /* 0x5b reserved */
24879+
24880+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
24881+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
24882+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
24883+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
24884+
24885+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
24886+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
24887+
24888+ /*
24889+ * Segments used for calling PnP BIOS have byte granularity.
24890+ * The code segments and data segments have fixed 64k limits,
24891+ * the transfer segment sizes are set at run time.
24892+ */
24893+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
24894+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
24895+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
24896+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
24897+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
24898+
24899+ /*
24900+ * The APM segments have byte granularity and their bases
24901+ * are set at run time. All have 64k limits.
24902+ */
24903+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
24904+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
24905+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
24906+
24907+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
24908+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
24909+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
24910+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
24911+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
24912+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
24913+
24914+ /* Be sure this is zeroed to avoid false validations in Xen */
24915+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
24916+ .endr
24917diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
24918index a468c0a..8b5a879 100644
24919--- a/arch/x86/kernel/head_64.S
24920+++ b/arch/x86/kernel/head_64.S
24921@@ -20,6 +20,8 @@
24922 #include <asm/processor-flags.h>
24923 #include <asm/percpu.h>
24924 #include <asm/nops.h>
24925+#include <asm/cpufeature.h>
24926+#include <asm/alternative-asm.h>
24927
24928 #ifdef CONFIG_PARAVIRT
24929 #include <asm/asm-offsets.h>
24930@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
24931 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
24932 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
24933 L3_START_KERNEL = pud_index(__START_KERNEL_map)
24934+L4_VMALLOC_START = pgd_index(VMALLOC_START)
24935+L3_VMALLOC_START = pud_index(VMALLOC_START)
24936+L4_VMALLOC_END = pgd_index(VMALLOC_END)
24937+L3_VMALLOC_END = pud_index(VMALLOC_END)
24938+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
24939+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
24940
24941 .text
24942 __HEAD
24943@@ -89,11 +97,24 @@ startup_64:
24944 * Fixup the physical addresses in the page table
24945 */
24946 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
24947+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
24948+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
24949+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
24950+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
24951+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
24952
24953- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
24954- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
24955+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
24956+#ifndef CONFIG_XEN
24957+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
24958+#endif
24959+
24960+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
24961+
24962+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
24963+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
24964
24965 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
24966+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
24967
24968 /*
24969 * Set up the identity mapping for the switchover. These
24970@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
24971 * after the boot processor executes this code.
24972 */
24973
24974+ orq $-1, %rbp
24975 movq $(init_level4_pgt - __START_KERNEL_map), %rax
24976 1:
24977
24978- /* Enable PAE mode and PGE */
24979- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
24980+ /* Enable PAE mode and PSE/PGE */
24981+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
24982 movq %rcx, %cr4
24983
24984 /* Setup early boot stage 4 level pagetables. */
24985@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
24986 movl $MSR_EFER, %ecx
24987 rdmsr
24988 btsl $_EFER_SCE, %eax /* Enable System Call */
24989- btl $20,%edi /* No Execute supported? */
24990+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
24991 jnc 1f
24992 btsl $_EFER_NX, %eax
24993+ cmpq $-1, %rbp
24994+ je 1f
24995 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
24996+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
24997+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
24998+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
24999+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25000+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25001+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25002+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25003 1: wrmsr /* Make changes effective */
25004
25005 /* Setup cr0 */
25006@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25007 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25008 * address given in m16:64.
25009 */
25010+ pax_set_fptr_mask
25011 movq initial_code(%rip),%rax
25012 pushq $0 # fake return address to stop unwinder
25013 pushq $__KERNEL_CS # set correct cs
25014@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25015 .quad INIT_PER_CPU_VAR(irq_stack_union)
25016
25017 GLOBAL(stack_start)
25018- .quad init_thread_union+THREAD_SIZE-8
25019+ .quad init_thread_union+THREAD_SIZE-16
25020 .word 0
25021 __FINITDATA
25022
25023@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25024 call dump_stack
25025 #ifdef CONFIG_KALLSYMS
25026 leaq early_idt_ripmsg(%rip),%rdi
25027- movq 40(%rsp),%rsi # %rip again
25028+ movq 88(%rsp),%rsi # %rip again
25029 call __print_symbol
25030 #endif
25031 #endif /* EARLY_PRINTK */
25032@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25033 early_recursion_flag:
25034 .long 0
25035
25036+ .section .rodata,"a",@progbits
25037 #ifdef CONFIG_EARLY_PRINTK
25038 early_idt_msg:
25039 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25040@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25041 NEXT_PAGE(early_dynamic_pgts)
25042 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25043
25044- .data
25045+ .section .rodata,"a",@progbits
25046
25047-#ifndef CONFIG_XEN
25048 NEXT_PAGE(init_level4_pgt)
25049- .fill 512,8,0
25050-#else
25051-NEXT_PAGE(init_level4_pgt)
25052- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25053 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25054 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25055+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25056+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25057+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25058+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25059+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25060+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25061 .org init_level4_pgt + L4_START_KERNEL*8, 0
25062 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25063 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25064
25065+#ifdef CONFIG_PAX_PER_CPU_PGD
25066+NEXT_PAGE(cpu_pgd)
25067+ .rept 2*NR_CPUS
25068+ .fill 512,8,0
25069+ .endr
25070+#endif
25071+
25072 NEXT_PAGE(level3_ident_pgt)
25073 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25074+#ifdef CONFIG_XEN
25075 .fill 511, 8, 0
25076+#else
25077+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25078+ .fill 510,8,0
25079+#endif
25080+
25081+NEXT_PAGE(level3_vmalloc_start_pgt)
25082+ .fill 512,8,0
25083+
25084+NEXT_PAGE(level3_vmalloc_end_pgt)
25085+ .fill 512,8,0
25086+
25087+NEXT_PAGE(level3_vmemmap_pgt)
25088+ .fill L3_VMEMMAP_START,8,0
25089+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25090+
25091 NEXT_PAGE(level2_ident_pgt)
25092- /* Since I easily can, map the first 1G.
25093+ /* Since I easily can, map the first 2G.
25094 * Don't set NX because code runs from these pages.
25095 */
25096- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25097-#endif
25098+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25099
25100 NEXT_PAGE(level3_kernel_pgt)
25101 .fill L3_START_KERNEL,8,0
25102@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25103 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25104 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25105
25106+NEXT_PAGE(level2_vmemmap_pgt)
25107+ .fill 512,8,0
25108+
25109 NEXT_PAGE(level2_kernel_pgt)
25110 /*
25111 * 512 MB kernel mapping. We spend a full page on this pagetable
25112@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25113 NEXT_PAGE(level2_fixmap_pgt)
25114 .fill 506,8,0
25115 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25116- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25117- .fill 5,8,0
25118+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25119+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25120+ .fill 4,8,0
25121
25122 NEXT_PAGE(level1_fixmap_pgt)
25123 .fill 512,8,0
25124
25125+NEXT_PAGE(level1_vsyscall_pgt)
25126+ .fill 512,8,0
25127+
25128 #undef PMDS
25129
25130- .data
25131+ .align PAGE_SIZE
25132+ENTRY(cpu_gdt_table)
25133+ .rept NR_CPUS
25134+ .quad 0x0000000000000000 /* NULL descriptor */
25135+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25136+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25137+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25138+ .quad 0x00cffb000000ffff /* __USER32_CS */
25139+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25140+ .quad 0x00affb000000ffff /* __USER_CS */
25141+
25142+#ifdef CONFIG_PAX_KERNEXEC
25143+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25144+#else
25145+ .quad 0x0 /* unused */
25146+#endif
25147+
25148+ .quad 0,0 /* TSS */
25149+ .quad 0,0 /* LDT */
25150+ .quad 0,0,0 /* three TLS descriptors */
25151+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25152+ /* asm/segment.h:GDT_ENTRIES must match this */
25153+
25154+#ifdef CONFIG_PAX_MEMORY_UDEREF
25155+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25156+#else
25157+ .quad 0x0 /* unused */
25158+#endif
25159+
25160+ /* zero the remaining page */
25161+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25162+ .endr
25163+
25164 .align 16
25165 .globl early_gdt_descr
25166 early_gdt_descr:
25167 .word GDT_ENTRIES*8-1
25168 early_gdt_descr_base:
25169- .quad INIT_PER_CPU_VAR(gdt_page)
25170+ .quad cpu_gdt_table
25171
25172 ENTRY(phys_base)
25173 /* This must match the first entry in level2_kernel_pgt */
25174 .quad 0x0000000000000000
25175
25176 #include "../../x86/xen/xen-head.S"
25177-
25178- __PAGE_ALIGNED_BSS
25179+
25180+ .section .rodata,"a",@progbits
25181 NEXT_PAGE(empty_zero_page)
25182 .skip PAGE_SIZE
25183diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25184index 05fd74f..c3548b1 100644
25185--- a/arch/x86/kernel/i386_ksyms_32.c
25186+++ b/arch/x86/kernel/i386_ksyms_32.c
25187@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25188 EXPORT_SYMBOL(cmpxchg8b_emu);
25189 #endif
25190
25191+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25192+
25193 /* Networking helper routines. */
25194 EXPORT_SYMBOL(csum_partial_copy_generic);
25195+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25196+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25197
25198 EXPORT_SYMBOL(__get_user_1);
25199 EXPORT_SYMBOL(__get_user_2);
25200@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25201 EXPORT_SYMBOL(___preempt_schedule_context);
25202 #endif
25203 #endif
25204+
25205+#ifdef CONFIG_PAX_KERNEXEC
25206+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25207+#endif
25208+
25209+#ifdef CONFIG_PAX_PER_CPU_PGD
25210+EXPORT_SYMBOL(cpu_pgd);
25211+#endif
25212diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25213index a9a4229..6f4d476 100644
25214--- a/arch/x86/kernel/i387.c
25215+++ b/arch/x86/kernel/i387.c
25216@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25217 static inline bool interrupted_user_mode(void)
25218 {
25219 struct pt_regs *regs = get_irq_regs();
25220- return regs && user_mode_vm(regs);
25221+ return regs && user_mode(regs);
25222 }
25223
25224 /*
25225diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25226index e7cc537..67d7372 100644
25227--- a/arch/x86/kernel/i8259.c
25228+++ b/arch/x86/kernel/i8259.c
25229@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25230 static void make_8259A_irq(unsigned int irq)
25231 {
25232 disable_irq_nosync(irq);
25233- io_apic_irqs &= ~(1<<irq);
25234+ io_apic_irqs &= ~(1UL<<irq);
25235 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
25236 enable_irq(irq);
25237 }
25238@@ -208,7 +208,7 @@ spurious_8259A_irq:
25239 "spurious 8259A interrupt: IRQ%d.\n", irq);
25240 spurious_irq_mask |= irqmask;
25241 }
25242- atomic_inc(&irq_err_count);
25243+ atomic_inc_unchecked(&irq_err_count);
25244 /*
25245 * Theoretically we do not have to handle this IRQ,
25246 * but in Linux this does not cause problems and is
25247@@ -349,14 +349,16 @@ static void init_8259A(int auto_eoi)
25248 /* (slave's support for AEOI in flat mode is to be investigated) */
25249 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25250
25251+ pax_open_kernel();
25252 if (auto_eoi)
25253 /*
25254 * In AEOI mode we just have to mask the interrupt
25255 * when acking.
25256 */
25257- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25258+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25259 else
25260- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25261+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25262+ pax_close_kernel();
25263
25264 udelay(100); /* wait for 8259A to initialize */
25265
25266diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25267index a979b5b..1d6db75 100644
25268--- a/arch/x86/kernel/io_delay.c
25269+++ b/arch/x86/kernel/io_delay.c
25270@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25271 * Quirk table for systems that misbehave (lock up, etc.) if port
25272 * 0x80 is used:
25273 */
25274-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25275+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25276 {
25277 .callback = dmi_io_delay_0xed_port,
25278 .ident = "Compaq Presario V6000",
25279diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25280index 4ddaf66..49d5c18 100644
25281--- a/arch/x86/kernel/ioport.c
25282+++ b/arch/x86/kernel/ioport.c
25283@@ -6,6 +6,7 @@
25284 #include <linux/sched.h>
25285 #include <linux/kernel.h>
25286 #include <linux/capability.h>
25287+#include <linux/security.h>
25288 #include <linux/errno.h>
25289 #include <linux/types.h>
25290 #include <linux/ioport.h>
25291@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25292 return -EINVAL;
25293 if (turn_on && !capable(CAP_SYS_RAWIO))
25294 return -EPERM;
25295+#ifdef CONFIG_GRKERNSEC_IO
25296+ if (turn_on && grsec_disable_privio) {
25297+ gr_handle_ioperm();
25298+ return -ENODEV;
25299+ }
25300+#endif
25301
25302 /*
25303 * If it's the first ioperm() call in this thread's lifetime, set the
25304@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25305 * because the ->io_bitmap_max value must match the bitmap
25306 * contents:
25307 */
25308- tss = &per_cpu(init_tss, get_cpu());
25309+ tss = init_tss + get_cpu();
25310
25311 if (turn_on)
25312 bitmap_clear(t->io_bitmap_ptr, from, num);
25313@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25314 if (level > old) {
25315 if (!capable(CAP_SYS_RAWIO))
25316 return -EPERM;
25317+#ifdef CONFIG_GRKERNSEC_IO
25318+ if (grsec_disable_privio) {
25319+ gr_handle_iopl();
25320+ return -ENODEV;
25321+ }
25322+#endif
25323 }
25324 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25325 t->iopl = level << 12;
25326diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25327index 705ef8d..8672c9d 100644
25328--- a/arch/x86/kernel/irq.c
25329+++ b/arch/x86/kernel/irq.c
25330@@ -22,7 +22,7 @@
25331 #define CREATE_TRACE_POINTS
25332 #include <asm/trace/irq_vectors.h>
25333
25334-atomic_t irq_err_count;
25335+atomic_unchecked_t irq_err_count;
25336
25337 /* Function pointer for generic interrupt vector handling */
25338 void (*x86_platform_ipi_callback)(void) = NULL;
25339@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25340 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25341 seq_puts(p, " Hypervisor callback interrupts\n");
25342 #endif
25343- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25344+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25345 #if defined(CONFIG_X86_IO_APIC)
25346- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25347+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25348 #endif
25349 return 0;
25350 }
25351@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25352
25353 u64 arch_irq_stat(void)
25354 {
25355- u64 sum = atomic_read(&irq_err_count);
25356+ u64 sum = atomic_read_unchecked(&irq_err_count);
25357 return sum;
25358 }
25359
25360diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25361index 63ce838..2ea3e06 100644
25362--- a/arch/x86/kernel/irq_32.c
25363+++ b/arch/x86/kernel/irq_32.c
25364@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25365
25366 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25367
25368+extern void gr_handle_kernel_exploit(void);
25369+
25370 int sysctl_panic_on_stackoverflow __read_mostly;
25371
25372 /* Debugging check for stack overflow: is there less than 1KB free? */
25373@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25374 __asm__ __volatile__("andl %%esp,%0" :
25375 "=r" (sp) : "0" (THREAD_SIZE - 1));
25376
25377- return sp < (sizeof(struct thread_info) + STACK_WARN);
25378+ return sp < STACK_WARN;
25379 }
25380
25381 static void print_stack_overflow(void)
25382 {
25383 printk(KERN_WARNING "low stack detected by irq handler\n");
25384 dump_stack();
25385+ gr_handle_kernel_exploit();
25386 if (sysctl_panic_on_stackoverflow)
25387 panic("low stack detected by irq handler - check messages\n");
25388 }
25389@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25390 static inline int
25391 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25392 {
25393- struct irq_stack *curstk, *irqstk;
25394+ struct irq_stack *irqstk;
25395 u32 *isp, *prev_esp, arg1, arg2;
25396
25397- curstk = (struct irq_stack *) current_stack();
25398 irqstk = __this_cpu_read(hardirq_stack);
25399
25400 /*
25401@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25402 * handler) we can't do that and just have to keep using the
25403 * current stack (which is the irq stack already after all)
25404 */
25405- if (unlikely(curstk == irqstk))
25406+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25407 return 0;
25408
25409- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25410+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25411
25412 /* Save the next esp at the bottom of the stack */
25413 prev_esp = (u32 *)irqstk;
25414 *prev_esp = current_stack_pointer;
25415
25416+#ifdef CONFIG_PAX_MEMORY_UDEREF
25417+ __set_fs(MAKE_MM_SEG(0));
25418+#endif
25419+
25420 if (unlikely(overflow))
25421 call_on_stack(print_stack_overflow, isp);
25422
25423@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25424 : "0" (irq), "1" (desc), "2" (isp),
25425 "D" (desc->handle_irq)
25426 : "memory", "cc", "ecx");
25427+
25428+#ifdef CONFIG_PAX_MEMORY_UDEREF
25429+ __set_fs(current_thread_info()->addr_limit);
25430+#endif
25431+
25432 return 1;
25433 }
25434
25435@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25436 */
25437 void irq_ctx_init(int cpu)
25438 {
25439- struct irq_stack *irqstk;
25440-
25441 if (per_cpu(hardirq_stack, cpu))
25442 return;
25443
25444- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25445- THREADINFO_GFP,
25446- THREAD_SIZE_ORDER));
25447- per_cpu(hardirq_stack, cpu) = irqstk;
25448-
25449- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
25450- THREADINFO_GFP,
25451- THREAD_SIZE_ORDER));
25452- per_cpu(softirq_stack, cpu) = irqstk;
25453-
25454- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
25455- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
25456+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25457+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
25458 }
25459
25460 void do_softirq_own_stack(void)
25461 {
25462- struct thread_info *curstk;
25463 struct irq_stack *irqstk;
25464 u32 *isp, *prev_esp;
25465
25466- curstk = current_stack();
25467 irqstk = __this_cpu_read(softirq_stack);
25468
25469 /* build the stack frame on the softirq stack */
25470@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
25471 prev_esp = (u32 *)irqstk;
25472 *prev_esp = current_stack_pointer;
25473
25474+#ifdef CONFIG_PAX_MEMORY_UDEREF
25475+ __set_fs(MAKE_MM_SEG(0));
25476+#endif
25477+
25478 call_on_stack(__do_softirq, isp);
25479+
25480+#ifdef CONFIG_PAX_MEMORY_UDEREF
25481+ __set_fs(current_thread_info()->addr_limit);
25482+#endif
25483+
25484 }
25485
25486 bool handle_irq(unsigned irq, struct pt_regs *regs)
25487@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
25488 if (unlikely(!desc))
25489 return false;
25490
25491- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25492+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
25493 if (unlikely(overflow))
25494 print_stack_overflow();
25495 desc->handle_irq(irq, desc);
25496diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
25497index e4b503d..824fce8 100644
25498--- a/arch/x86/kernel/irq_64.c
25499+++ b/arch/x86/kernel/irq_64.c
25500@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
25501 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
25502 EXPORT_PER_CPU_SYMBOL(irq_regs);
25503
25504+extern void gr_handle_kernel_exploit(void);
25505+
25506 int sysctl_panic_on_stackoverflow;
25507
25508 /*
25509@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25510 u64 estack_top, estack_bottom;
25511 u64 curbase = (u64)task_stack_page(current);
25512
25513- if (user_mode_vm(regs))
25514+ if (user_mode(regs))
25515 return;
25516
25517 if (regs->sp >= curbase + sizeof(struct thread_info) +
25518@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
25519 irq_stack_top, irq_stack_bottom,
25520 estack_top, estack_bottom);
25521
25522+ gr_handle_kernel_exploit();
25523+
25524 if (sysctl_panic_on_stackoverflow)
25525 panic("low stack detected by irq handler - check messages\n");
25526 #endif
25527diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
25528index 26d5a55..a01160a 100644
25529--- a/arch/x86/kernel/jump_label.c
25530+++ b/arch/x86/kernel/jump_label.c
25531@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25532 * Jump label is enabled for the first time.
25533 * So we expect a default_nop...
25534 */
25535- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
25536+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
25537 != 0))
25538 bug_at((void *)entry->code, __LINE__);
25539 } else {
25540@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
25541 * ...otherwise expect an ideal_nop. Otherwise
25542 * something went horribly wrong.
25543 */
25544- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
25545+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
25546 != 0))
25547 bug_at((void *)entry->code, __LINE__);
25548 }
25549@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
25550 * are converting the default nop to the ideal nop.
25551 */
25552 if (init) {
25553- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
25554+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
25555 bug_at((void *)entry->code, __LINE__);
25556 } else {
25557 code.jump = 0xe9;
25558 code.offset = entry->target -
25559 (entry->code + JUMP_LABEL_NOP_SIZE);
25560- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
25561+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
25562 bug_at((void *)entry->code, __LINE__);
25563 }
25564 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
25565diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
25566index 7ec1d5f..5a7d130 100644
25567--- a/arch/x86/kernel/kgdb.c
25568+++ b/arch/x86/kernel/kgdb.c
25569@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
25570 #ifdef CONFIG_X86_32
25571 switch (regno) {
25572 case GDB_SS:
25573- if (!user_mode_vm(regs))
25574+ if (!user_mode(regs))
25575 *(unsigned long *)mem = __KERNEL_DS;
25576 break;
25577 case GDB_SP:
25578- if (!user_mode_vm(regs))
25579+ if (!user_mode(regs))
25580 *(unsigned long *)mem = kernel_stack_pointer(regs);
25581 break;
25582 case GDB_GS:
25583@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
25584 bp->attr.bp_addr = breakinfo[breakno].addr;
25585 bp->attr.bp_len = breakinfo[breakno].len;
25586 bp->attr.bp_type = breakinfo[breakno].type;
25587- info->address = breakinfo[breakno].addr;
25588+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
25589+ info->address = ktla_ktva(breakinfo[breakno].addr);
25590+ else
25591+ info->address = breakinfo[breakno].addr;
25592 info->len = breakinfo[breakno].len;
25593 info->type = breakinfo[breakno].type;
25594 val = arch_install_hw_breakpoint(bp);
25595@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
25596 case 'k':
25597 /* clear the trace bit */
25598 linux_regs->flags &= ~X86_EFLAGS_TF;
25599- atomic_set(&kgdb_cpu_doing_single_step, -1);
25600+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
25601
25602 /* set the trace bit if we're stepping */
25603 if (remcomInBuffer[0] == 's') {
25604 linux_regs->flags |= X86_EFLAGS_TF;
25605- atomic_set(&kgdb_cpu_doing_single_step,
25606+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
25607 raw_smp_processor_id());
25608 }
25609
25610@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
25611
25612 switch (cmd) {
25613 case DIE_DEBUG:
25614- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
25615+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
25616 if (user_mode(regs))
25617 return single_step_cont(regs, args);
25618 break;
25619@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25620 #endif /* CONFIG_DEBUG_RODATA */
25621
25622 bpt->type = BP_BREAKPOINT;
25623- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
25624+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
25625 BREAK_INSTR_SIZE);
25626 if (err)
25627 return err;
25628- err = probe_kernel_write((char *)bpt->bpt_addr,
25629+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25630 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
25631 #ifdef CONFIG_DEBUG_RODATA
25632 if (!err)
25633@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
25634 return -EBUSY;
25635 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
25636 BREAK_INSTR_SIZE);
25637- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25638+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25639 if (err)
25640 return err;
25641 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
25642@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
25643 if (mutex_is_locked(&text_mutex))
25644 goto knl_write;
25645 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
25646- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
25647+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
25648 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
25649 goto knl_write;
25650 return err;
25651 knl_write:
25652 #endif /* CONFIG_DEBUG_RODATA */
25653- return probe_kernel_write((char *)bpt->bpt_addr,
25654+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
25655 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
25656 }
25657
25658diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
25659index 98f654d..ac04352 100644
25660--- a/arch/x86/kernel/kprobes/core.c
25661+++ b/arch/x86/kernel/kprobes/core.c
25662@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
25663 s32 raddr;
25664 } __packed *insn;
25665
25666- insn = (struct __arch_relative_insn *)from;
25667+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
25668+
25669+ pax_open_kernel();
25670 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
25671 insn->op = op;
25672+ pax_close_kernel();
25673 }
25674
25675 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
25676@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
25677 kprobe_opcode_t opcode;
25678 kprobe_opcode_t *orig_opcodes = opcodes;
25679
25680- if (search_exception_tables((unsigned long)opcodes))
25681+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
25682 return 0; /* Page fault may occur on this address. */
25683
25684 retry:
25685@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
25686 * for the first byte, we can recover the original instruction
25687 * from it and kp->opcode.
25688 */
25689- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25690+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
25691 buf[0] = kp->opcode;
25692- return (unsigned long)buf;
25693+ return ktva_ktla((unsigned long)buf);
25694 }
25695
25696 /*
25697@@ -338,7 +341,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25698 /* Another subsystem puts a breakpoint, failed to recover */
25699 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
25700 return 0;
25701+ pax_open_kernel();
25702 memcpy(dest, insn.kaddr, insn.length);
25703+ pax_close_kernel();
25704
25705 #ifdef CONFIG_X86_64
25706 if (insn_rip_relative(&insn)) {
25707@@ -365,7 +370,9 @@ int __copy_instruction(u8 *dest, u8 *src)
25708 return 0;
25709 }
25710 disp = (u8 *) dest + insn_offset_displacement(&insn);
25711+ pax_open_kernel();
25712 *(s32 *) disp = (s32) newdisp;
25713+ pax_close_kernel();
25714 }
25715 #endif
25716 return insn.length;
25717@@ -507,7 +514,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25718 * nor set current_kprobe, because it doesn't use single
25719 * stepping.
25720 */
25721- regs->ip = (unsigned long)p->ainsn.insn;
25722+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25723 preempt_enable_no_resched();
25724 return;
25725 }
25726@@ -524,9 +531,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
25727 regs->flags &= ~X86_EFLAGS_IF;
25728 /* single step inline if the instruction is an int3 */
25729 if (p->opcode == BREAKPOINT_INSTRUCTION)
25730- regs->ip = (unsigned long)p->addr;
25731+ regs->ip = ktla_ktva((unsigned long)p->addr);
25732 else
25733- regs->ip = (unsigned long)p->ainsn.insn;
25734+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
25735 }
25736 NOKPROBE_SYMBOL(setup_singlestep);
25737
25738@@ -576,7 +583,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25739 struct kprobe *p;
25740 struct kprobe_ctlblk *kcb;
25741
25742- if (user_mode_vm(regs))
25743+ if (user_mode(regs))
25744 return 0;
25745
25746 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
25747@@ -611,7 +618,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
25748 setup_singlestep(p, regs, kcb, 0);
25749 return 1;
25750 }
25751- } else if (*addr != BREAKPOINT_INSTRUCTION) {
25752+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
25753 /*
25754 * The breakpoint instruction was removed right
25755 * after we hit it. Another cpu has removed
25756@@ -658,6 +665,9 @@ static void __used kretprobe_trampoline_holder(void)
25757 " movq %rax, 152(%rsp)\n"
25758 RESTORE_REGS_STRING
25759 " popfq\n"
25760+#ifdef KERNEXEC_PLUGIN
25761+ " btsq $63,(%rsp)\n"
25762+#endif
25763 #else
25764 " pushf\n"
25765 SAVE_REGS_STRING
25766@@ -798,7 +808,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
25767 struct kprobe_ctlblk *kcb)
25768 {
25769 unsigned long *tos = stack_addr(regs);
25770- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
25771+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
25772 unsigned long orig_ip = (unsigned long)p->addr;
25773 kprobe_opcode_t *insn = p->ainsn.insn;
25774
25775@@ -981,7 +991,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
25776 struct die_args *args = data;
25777 int ret = NOTIFY_DONE;
25778
25779- if (args->regs && user_mode_vm(args->regs))
25780+ if (args->regs && user_mode(args->regs))
25781 return ret;
25782
25783 if (val == DIE_GPF) {
25784diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
25785index 7c523bb..01b051b 100644
25786--- a/arch/x86/kernel/kprobes/opt.c
25787+++ b/arch/x86/kernel/kprobes/opt.c
25788@@ -79,6 +79,7 @@ found:
25789 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
25790 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25791 {
25792+ pax_open_kernel();
25793 #ifdef CONFIG_X86_64
25794 *addr++ = 0x48;
25795 *addr++ = 0xbf;
25796@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
25797 *addr++ = 0xb8;
25798 #endif
25799 *(unsigned long *)addr = val;
25800+ pax_close_kernel();
25801 }
25802
25803 asm (
25804@@ -339,7 +341,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25805 * Verify if the address gap is in 2GB range, because this uses
25806 * a relative jump.
25807 */
25808- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
25809+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
25810 if (abs(rel) > 0x7fffffff) {
25811 __arch_remove_optimized_kprobe(op, 0);
25812 return -ERANGE;
25813@@ -356,16 +358,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
25814 op->optinsn.size = ret;
25815
25816 /* Copy arch-dep-instance from template */
25817- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
25818+ pax_open_kernel();
25819+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
25820+ pax_close_kernel();
25821
25822 /* Set probe information */
25823 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
25824
25825 /* Set probe function call */
25826- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
25827+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
25828
25829 /* Set returning jmp instruction at the tail of out-of-line buffer */
25830- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
25831+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
25832 (u8 *)op->kp.addr + op->optinsn.size);
25833
25834 flush_icache_range((unsigned long) buf,
25835@@ -390,7 +394,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
25836 WARN_ON(kprobe_disabled(&op->kp));
25837
25838 /* Backup instructions which will be replaced by jump address */
25839- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
25840+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
25841 RELATIVE_ADDR_SIZE);
25842
25843 insn_buf[0] = RELATIVEJUMP_OPCODE;
25844@@ -438,7 +442,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
25845 /* This kprobe is really able to run optimized path. */
25846 op = container_of(p, struct optimized_kprobe, kp);
25847 /* Detour through copied instructions */
25848- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
25849+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
25850 if (!reenter)
25851 reset_current_kprobe();
25852 preempt_enable_no_resched();
25853diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
25854index c2bedae..25e7ab60 100644
25855--- a/arch/x86/kernel/ksysfs.c
25856+++ b/arch/x86/kernel/ksysfs.c
25857@@ -184,7 +184,7 @@ out:
25858
25859 static struct kobj_attribute type_attr = __ATTR_RO(type);
25860
25861-static struct bin_attribute data_attr = {
25862+static bin_attribute_no_const data_attr __read_only = {
25863 .attr = {
25864 .name = "data",
25865 .mode = S_IRUGO,
25866diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
25867index c37886d..d851d32 100644
25868--- a/arch/x86/kernel/ldt.c
25869+++ b/arch/x86/kernel/ldt.c
25870@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
25871 if (reload) {
25872 #ifdef CONFIG_SMP
25873 preempt_disable();
25874- load_LDT(pc);
25875+ load_LDT_nolock(pc);
25876 if (!cpumask_equal(mm_cpumask(current->mm),
25877 cpumask_of(smp_processor_id())))
25878 smp_call_function(flush_ldt, current->mm, 1);
25879 preempt_enable();
25880 #else
25881- load_LDT(pc);
25882+ load_LDT_nolock(pc);
25883 #endif
25884 }
25885 if (oldsize) {
25886@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
25887 return err;
25888
25889 for (i = 0; i < old->size; i++)
25890- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
25891+ write_ldt_entry(new->ldt, i, old->ldt + i);
25892 return 0;
25893 }
25894
25895@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25896 retval = copy_ldt(&mm->context, &old_mm->context);
25897 mutex_unlock(&old_mm->context.lock);
25898 }
25899+
25900+ if (tsk == current) {
25901+ mm->context.vdso = 0;
25902+
25903+#ifdef CONFIG_X86_32
25904+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25905+ mm->context.user_cs_base = 0UL;
25906+ mm->context.user_cs_limit = ~0UL;
25907+
25908+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
25909+ cpus_clear(mm->context.cpu_user_cs_mask);
25910+#endif
25911+
25912+#endif
25913+#endif
25914+
25915+ }
25916+
25917 return retval;
25918 }
25919
25920@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
25921 }
25922 }
25923
25924+#ifdef CONFIG_PAX_SEGMEXEC
25925+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
25926+ error = -EINVAL;
25927+ goto out_unlock;
25928+ }
25929+#endif
25930+
25931 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
25932 error = -EINVAL;
25933 goto out_unlock;
25934diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
25935index 469b23d..5449cfe 100644
25936--- a/arch/x86/kernel/machine_kexec_32.c
25937+++ b/arch/x86/kernel/machine_kexec_32.c
25938@@ -26,7 +26,7 @@
25939 #include <asm/cacheflush.h>
25940 #include <asm/debugreg.h>
25941
25942-static void set_idt(void *newidt, __u16 limit)
25943+static void set_idt(struct desc_struct *newidt, __u16 limit)
25944 {
25945 struct desc_ptr curidt;
25946
25947@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
25948 }
25949
25950
25951-static void set_gdt(void *newgdt, __u16 limit)
25952+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
25953 {
25954 struct desc_ptr curgdt;
25955
25956@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
25957 }
25958
25959 control_page = page_address(image->control_code_page);
25960- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
25961+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
25962
25963 relocate_kernel_ptr = control_page;
25964 page_list[PA_CONTROL_PAGE] = __pa(control_page);
25965diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
25966index 94ea120..4154cea 100644
25967--- a/arch/x86/kernel/mcount_64.S
25968+++ b/arch/x86/kernel/mcount_64.S
25969@@ -7,7 +7,7 @@
25970 #include <linux/linkage.h>
25971 #include <asm/ptrace.h>
25972 #include <asm/ftrace.h>
25973-
25974+#include <asm/alternative-asm.h>
25975
25976 .code64
25977 .section .entry.text, "ax"
25978@@ -148,8 +148,9 @@
25979 #ifdef CONFIG_DYNAMIC_FTRACE
25980
25981 ENTRY(function_hook)
25982+ pax_force_retaddr
25983 retq
25984-END(function_hook)
25985+ENDPROC(function_hook)
25986
25987 ENTRY(ftrace_caller)
25988 /* save_mcount_regs fills in first two parameters */
25989@@ -181,8 +182,9 @@ GLOBAL(ftrace_graph_call)
25990 #endif
25991
25992 GLOBAL(ftrace_stub)
25993+ pax_force_retaddr
25994 retq
25995-END(ftrace_caller)
25996+ENDPROC(ftrace_caller)
25997
25998 ENTRY(ftrace_regs_caller)
25999 /* Save the current flags before any operations that can change them */
26000@@ -253,7 +255,7 @@ GLOBAL(ftrace_regs_caller_end)
26001
26002 jmp ftrace_return
26003
26004-END(ftrace_regs_caller)
26005+ENDPROC(ftrace_regs_caller)
26006
26007
26008 #else /* ! CONFIG_DYNAMIC_FTRACE */
26009@@ -272,18 +274,20 @@ fgraph_trace:
26010 #endif
26011
26012 GLOBAL(ftrace_stub)
26013+ pax_force_retaddr
26014 retq
26015
26016 trace:
26017 /* save_mcount_regs fills in first two parameters */
26018 save_mcount_regs
26019
26020+ pax_force_fptr ftrace_trace_function
26021 call *ftrace_trace_function
26022
26023 restore_mcount_regs
26024
26025 jmp fgraph_trace
26026-END(function_hook)
26027+ENDPROC(function_hook)
26028 #endif /* CONFIG_DYNAMIC_FTRACE */
26029 #endif /* CONFIG_FUNCTION_TRACER */
26030
26031@@ -305,8 +309,9 @@ ENTRY(ftrace_graph_caller)
26032
26033 restore_mcount_regs
26034
26035+ pax_force_retaddr
26036 retq
26037-END(ftrace_graph_caller)
26038+ENDPROC(ftrace_graph_caller)
26039
26040 GLOBAL(return_to_handler)
26041 subq $24, %rsp
26042@@ -322,5 +327,7 @@ GLOBAL(return_to_handler)
26043 movq 8(%rsp), %rdx
26044 movq (%rsp), %rax
26045 addq $24, %rsp
26046+ pax_force_fptr %rdi
26047 jmp *%rdi
26048+ENDPROC(return_to_handler)
26049 #endif
26050diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26051index e69f988..72902b7 100644
26052--- a/arch/x86/kernel/module.c
26053+++ b/arch/x86/kernel/module.c
26054@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26055 }
26056 #endif
26057
26058-void *module_alloc(unsigned long size)
26059+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26060 {
26061- if (PAGE_ALIGN(size) > MODULES_LEN)
26062+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26063 return NULL;
26064 return __vmalloc_node_range(size, 1,
26065 MODULES_VADDR + get_module_load_offset(),
26066- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26067- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26068+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26069+ prot, NUMA_NO_NODE,
26070 __builtin_return_address(0));
26071 }
26072
26073+void *module_alloc(unsigned long size)
26074+{
26075+
26076+#ifdef CONFIG_PAX_KERNEXEC
26077+ return __module_alloc(size, PAGE_KERNEL);
26078+#else
26079+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26080+#endif
26081+
26082+}
26083+
26084+#ifdef CONFIG_PAX_KERNEXEC
26085+#ifdef CONFIG_X86_32
26086+void *module_alloc_exec(unsigned long size)
26087+{
26088+ struct vm_struct *area;
26089+
26090+ if (size == 0)
26091+ return NULL;
26092+
26093+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26094+return area ? area->addr : NULL;
26095+}
26096+EXPORT_SYMBOL(module_alloc_exec);
26097+
26098+void module_memfree_exec(void *module_region)
26099+{
26100+ vunmap(module_region);
26101+}
26102+EXPORT_SYMBOL(module_memfree_exec);
26103+#else
26104+void module_memfree_exec(void *module_region)
26105+{
26106+ module_memfree(module_region);
26107+}
26108+EXPORT_SYMBOL(module_memfree_exec);
26109+
26110+void *module_alloc_exec(unsigned long size)
26111+{
26112+ return __module_alloc(size, PAGE_KERNEL_RX);
26113+}
26114+EXPORT_SYMBOL(module_alloc_exec);
26115+#endif
26116+#endif
26117+
26118 #ifdef CONFIG_X86_32
26119 int apply_relocate(Elf32_Shdr *sechdrs,
26120 const char *strtab,
26121@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26122 unsigned int i;
26123 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26124 Elf32_Sym *sym;
26125- uint32_t *location;
26126+ uint32_t *plocation, location;
26127
26128 DEBUGP("Applying relocate section %u to %u\n",
26129 relsec, sechdrs[relsec].sh_info);
26130 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26131 /* This is where to make the change */
26132- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26133- + rel[i].r_offset;
26134+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26135+ location = (uint32_t)plocation;
26136+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26137+ plocation = ktla_ktva((void *)plocation);
26138 /* This is the symbol it is referring to. Note that all
26139 undefined symbols have been resolved. */
26140 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26141@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26142 switch (ELF32_R_TYPE(rel[i].r_info)) {
26143 case R_386_32:
26144 /* We add the value into the location given */
26145- *location += sym->st_value;
26146+ pax_open_kernel();
26147+ *plocation += sym->st_value;
26148+ pax_close_kernel();
26149 break;
26150 case R_386_PC32:
26151 /* Add the value, subtract its position */
26152- *location += sym->st_value - (uint32_t)location;
26153+ pax_open_kernel();
26154+ *plocation += sym->st_value - location;
26155+ pax_close_kernel();
26156 break;
26157 default:
26158 pr_err("%s: Unknown relocation: %u\n",
26159@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26160 case R_X86_64_NONE:
26161 break;
26162 case R_X86_64_64:
26163+ pax_open_kernel();
26164 *(u64 *)loc = val;
26165+ pax_close_kernel();
26166 break;
26167 case R_X86_64_32:
26168+ pax_open_kernel();
26169 *(u32 *)loc = val;
26170+ pax_close_kernel();
26171 if (val != *(u32 *)loc)
26172 goto overflow;
26173 break;
26174 case R_X86_64_32S:
26175+ pax_open_kernel();
26176 *(s32 *)loc = val;
26177+ pax_close_kernel();
26178 if ((s64)val != *(s32 *)loc)
26179 goto overflow;
26180 break;
26181 case R_X86_64_PC32:
26182 val -= (u64)loc;
26183+ pax_open_kernel();
26184 *(u32 *)loc = val;
26185+ pax_close_kernel();
26186+
26187 #if 0
26188 if ((s64)val != *(s32 *)loc)
26189 goto overflow;
26190diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26191index 113e707..0a690e1 100644
26192--- a/arch/x86/kernel/msr.c
26193+++ b/arch/x86/kernel/msr.c
26194@@ -39,6 +39,7 @@
26195 #include <linux/notifier.h>
26196 #include <linux/uaccess.h>
26197 #include <linux/gfp.h>
26198+#include <linux/grsecurity.h>
26199
26200 #include <asm/processor.h>
26201 #include <asm/msr.h>
26202@@ -105,6 +106,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26203 int err = 0;
26204 ssize_t bytes = 0;
26205
26206+#ifdef CONFIG_GRKERNSEC_KMEM
26207+ gr_handle_msr_write();
26208+ return -EPERM;
26209+#endif
26210+
26211 if (count % 8)
26212 return -EINVAL; /* Invalid chunk size */
26213
26214@@ -152,6 +158,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26215 err = -EBADF;
26216 break;
26217 }
26218+#ifdef CONFIG_GRKERNSEC_KMEM
26219+ gr_handle_msr_write();
26220+ return -EPERM;
26221+#endif
26222 if (copy_from_user(&regs, uregs, sizeof regs)) {
26223 err = -EFAULT;
26224 break;
26225@@ -235,7 +245,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26226 return notifier_from_errno(err);
26227 }
26228
26229-static struct notifier_block __refdata msr_class_cpu_notifier = {
26230+static struct notifier_block msr_class_cpu_notifier = {
26231 .notifier_call = msr_class_cpu_callback,
26232 };
26233
26234diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26235index c3e985d..110a36a 100644
26236--- a/arch/x86/kernel/nmi.c
26237+++ b/arch/x86/kernel/nmi.c
26238@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26239
26240 static void nmi_max_handler(struct irq_work *w)
26241 {
26242- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26243+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26244 int remainder_ns, decimal_msecs;
26245- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26246+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26247
26248 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26249 decimal_msecs = remainder_ns / 1000;
26250
26251 printk_ratelimited(KERN_INFO
26252 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26253- a->handler, whole_msecs, decimal_msecs);
26254+ n->action->handler, whole_msecs, decimal_msecs);
26255 }
26256
26257 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26258@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26259 delta = sched_clock() - delta;
26260 trace_nmi_handler(a->handler, (int)delta, thishandled);
26261
26262- if (delta < nmi_longest_ns || delta < a->max_duration)
26263+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26264 continue;
26265
26266- a->max_duration = delta;
26267- irq_work_queue(&a->irq_work);
26268+ a->work->max_duration = delta;
26269+ irq_work_queue(&a->work->irq_work);
26270 }
26271
26272 rcu_read_unlock();
26273@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26274 }
26275 NOKPROBE_SYMBOL(nmi_handle);
26276
26277-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26278+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26279 {
26280 struct nmi_desc *desc = nmi_to_desc(type);
26281 unsigned long flags;
26282@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26283 if (!action->handler)
26284 return -EINVAL;
26285
26286- init_irq_work(&action->irq_work, nmi_max_handler);
26287+ action->work->action = action;
26288+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26289
26290 spin_lock_irqsave(&desc->lock, flags);
26291
26292@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26293 * event confuses some handlers (kdump uses this flag)
26294 */
26295 if (action->flags & NMI_FLAG_FIRST)
26296- list_add_rcu(&action->list, &desc->head);
26297+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26298 else
26299- list_add_tail_rcu(&action->list, &desc->head);
26300+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26301
26302 spin_unlock_irqrestore(&desc->lock, flags);
26303 return 0;
26304@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26305 if (!strcmp(n->name, name)) {
26306 WARN(in_nmi(),
26307 "Trying to free NMI (%s) from NMI context!\n", n->name);
26308- list_del_rcu(&n->list);
26309+ pax_list_del_rcu((struct list_head *)&n->list);
26310 break;
26311 }
26312 }
26313@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26314 dotraplinkage notrace void
26315 do_nmi(struct pt_regs *regs, long error_code)
26316 {
26317+
26318+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26319+ if (!user_mode(regs)) {
26320+ unsigned long cs = regs->cs & 0xFFFF;
26321+ unsigned long ip = ktva_ktla(regs->ip);
26322+
26323+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26324+ regs->ip = ip;
26325+ }
26326+#endif
26327+
26328 nmi_nesting_preprocess(regs);
26329
26330 nmi_enter();
26331diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26332index 6d9582e..f746287 100644
26333--- a/arch/x86/kernel/nmi_selftest.c
26334+++ b/arch/x86/kernel/nmi_selftest.c
26335@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26336 {
26337 /* trap all the unknown NMIs we may generate */
26338 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26339- __initdata);
26340+ __initconst);
26341 }
26342
26343 static void __init cleanup_nmi_testsuite(void)
26344@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26345 unsigned long timeout;
26346
26347 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26348- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26349+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26350 nmi_fail = FAILURE;
26351 return;
26352 }
26353diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26354index bbb6c73..24a58ef 100644
26355--- a/arch/x86/kernel/paravirt-spinlocks.c
26356+++ b/arch/x86/kernel/paravirt-spinlocks.c
26357@@ -8,7 +8,7 @@
26358
26359 #include <asm/paravirt.h>
26360
26361-struct pv_lock_ops pv_lock_ops = {
26362+struct pv_lock_ops pv_lock_ops __read_only = {
26363 #ifdef CONFIG_SMP
26364 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26365 .unlock_kick = paravirt_nop,
26366diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26367index 548d25f..f8fb99c 100644
26368--- a/arch/x86/kernel/paravirt.c
26369+++ b/arch/x86/kernel/paravirt.c
26370@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26371 {
26372 return x;
26373 }
26374+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26375+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26376+#endif
26377
26378 void __init default_banner(void)
26379 {
26380@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26381
26382 if (opfunc == NULL)
26383 /* If there's no function, patch it with a ud2a (BUG) */
26384- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26385- else if (opfunc == _paravirt_nop)
26386+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26387+ else if (opfunc == (void *)_paravirt_nop)
26388 /* If the operation is a nop, then nop the callsite */
26389 ret = paravirt_patch_nop();
26390
26391 /* identity functions just return their single argument */
26392- else if (opfunc == _paravirt_ident_32)
26393+ else if (opfunc == (void *)_paravirt_ident_32)
26394 ret = paravirt_patch_ident_32(insnbuf, len);
26395- else if (opfunc == _paravirt_ident_64)
26396+ else if (opfunc == (void *)_paravirt_ident_64)
26397 ret = paravirt_patch_ident_64(insnbuf, len);
26398+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26399+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26400+ ret = paravirt_patch_ident_64(insnbuf, len);
26401+#endif
26402
26403 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26404 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26405@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26406 if (insn_len > len || start == NULL)
26407 insn_len = len;
26408 else
26409- memcpy(insnbuf, start, insn_len);
26410+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26411
26412 return insn_len;
26413 }
26414@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26415 return this_cpu_read(paravirt_lazy_mode);
26416 }
26417
26418-struct pv_info pv_info = {
26419+struct pv_info pv_info __read_only = {
26420 .name = "bare hardware",
26421 .paravirt_enabled = 0,
26422 .kernel_rpl = 0,
26423@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26424 #endif
26425 };
26426
26427-struct pv_init_ops pv_init_ops = {
26428+struct pv_init_ops pv_init_ops __read_only = {
26429 .patch = native_patch,
26430 };
26431
26432-struct pv_time_ops pv_time_ops = {
26433+struct pv_time_ops pv_time_ops __read_only = {
26434 .sched_clock = native_sched_clock,
26435 .steal_clock = native_steal_clock,
26436 };
26437
26438-__visible struct pv_irq_ops pv_irq_ops = {
26439+__visible struct pv_irq_ops pv_irq_ops __read_only = {
26440 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
26441 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
26442 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
26443@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
26444 #endif
26445 };
26446
26447-__visible struct pv_cpu_ops pv_cpu_ops = {
26448+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
26449 .cpuid = native_cpuid,
26450 .get_debugreg = native_get_debugreg,
26451 .set_debugreg = native_set_debugreg,
26452@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
26453 NOKPROBE_SYMBOL(native_set_debugreg);
26454 NOKPROBE_SYMBOL(native_load_idt);
26455
26456-struct pv_apic_ops pv_apic_ops = {
26457+struct pv_apic_ops pv_apic_ops __read_only= {
26458 #ifdef CONFIG_X86_LOCAL_APIC
26459 .startup_ipi_hook = paravirt_nop,
26460 #endif
26461 };
26462
26463-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
26464+#ifdef CONFIG_X86_32
26465+#ifdef CONFIG_X86_PAE
26466+/* 64-bit pagetable entries */
26467+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
26468+#else
26469 /* 32-bit pagetable entries */
26470 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
26471+#endif
26472 #else
26473 /* 64-bit pagetable entries */
26474 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
26475 #endif
26476
26477-struct pv_mmu_ops pv_mmu_ops = {
26478+struct pv_mmu_ops pv_mmu_ops __read_only = {
26479
26480 .read_cr2 = native_read_cr2,
26481 .write_cr2 = native_write_cr2,
26482@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
26483 .make_pud = PTE_IDENT,
26484
26485 .set_pgd = native_set_pgd,
26486+ .set_pgd_batched = native_set_pgd_batched,
26487 #endif
26488 #endif /* PAGETABLE_LEVELS >= 3 */
26489
26490@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
26491 },
26492
26493 .set_fixmap = native_set_fixmap,
26494+
26495+#ifdef CONFIG_PAX_KERNEXEC
26496+ .pax_open_kernel = native_pax_open_kernel,
26497+ .pax_close_kernel = native_pax_close_kernel,
26498+#endif
26499+
26500 };
26501
26502 EXPORT_SYMBOL_GPL(pv_time_ops);
26503diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
26504index a1da673..b6f5831 100644
26505--- a/arch/x86/kernel/paravirt_patch_64.c
26506+++ b/arch/x86/kernel/paravirt_patch_64.c
26507@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
26508 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
26509 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
26510 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
26511+
26512+#ifndef CONFIG_PAX_MEMORY_UDEREF
26513 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
26514+#endif
26515+
26516 DEF_NATIVE(pv_cpu_ops, clts, "clts");
26517 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
26518
26519@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
26520 PATCH_SITE(pv_mmu_ops, read_cr3);
26521 PATCH_SITE(pv_mmu_ops, write_cr3);
26522 PATCH_SITE(pv_cpu_ops, clts);
26523+
26524+#ifndef CONFIG_PAX_MEMORY_UDEREF
26525 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
26526+#endif
26527+
26528 PATCH_SITE(pv_cpu_ops, wbinvd);
26529
26530 patch_site:
26531diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
26532index 0497f71..7186c0d 100644
26533--- a/arch/x86/kernel/pci-calgary_64.c
26534+++ b/arch/x86/kernel/pci-calgary_64.c
26535@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
26536 tce_space = be64_to_cpu(readq(target));
26537 tce_space = tce_space & TAR_SW_BITS;
26538
26539- tce_space = tce_space & (~specified_table_size);
26540+ tce_space = tce_space & (~(unsigned long)specified_table_size);
26541 info->tce_space = (u64 *)__va(tce_space);
26542 }
26543 }
26544diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
26545index 35ccf75..7a15747 100644
26546--- a/arch/x86/kernel/pci-iommu_table.c
26547+++ b/arch/x86/kernel/pci-iommu_table.c
26548@@ -2,7 +2,7 @@
26549 #include <asm/iommu_table.h>
26550 #include <linux/string.h>
26551 #include <linux/kallsyms.h>
26552-
26553+#include <linux/sched.h>
26554
26555 #define DEBUG 1
26556
26557diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
26558index 77dd0ad..9ec4723 100644
26559--- a/arch/x86/kernel/pci-swiotlb.c
26560+++ b/arch/x86/kernel/pci-swiotlb.c
26561@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
26562 struct dma_attrs *attrs)
26563 {
26564 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
26565- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
26566+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
26567 else
26568 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
26569 }
26570diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
26571index e127dda..94e384d 100644
26572--- a/arch/x86/kernel/process.c
26573+++ b/arch/x86/kernel/process.c
26574@@ -36,7 +36,8 @@
26575 * section. Since TSS's are completely CPU-local, we want them
26576 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
26577 */
26578-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
26579+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
26580+EXPORT_SYMBOL(init_tss);
26581
26582 #ifdef CONFIG_X86_64
26583 static DEFINE_PER_CPU(unsigned char, is_idle);
26584@@ -94,7 +95,7 @@ void arch_task_cache_init(void)
26585 task_xstate_cachep =
26586 kmem_cache_create("task_xstate", xstate_size,
26587 __alignof__(union thread_xstate),
26588- SLAB_PANIC | SLAB_NOTRACK, NULL);
26589+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
26590 setup_xstate_comp();
26591 }
26592
26593@@ -108,7 +109,7 @@ void exit_thread(void)
26594 unsigned long *bp = t->io_bitmap_ptr;
26595
26596 if (bp) {
26597- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
26598+ struct tss_struct *tss = init_tss + get_cpu();
26599
26600 t->io_bitmap_ptr = NULL;
26601 clear_thread_flag(TIF_IO_BITMAP);
26602@@ -128,6 +129,9 @@ void flush_thread(void)
26603 {
26604 struct task_struct *tsk = current;
26605
26606+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
26607+ loadsegment(gs, 0);
26608+#endif
26609 flush_ptrace_hw_breakpoint(tsk);
26610 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
26611 drop_init_fpu(tsk);
26612@@ -274,7 +278,7 @@ static void __exit_idle(void)
26613 void exit_idle(void)
26614 {
26615 /* idle loop has pid 0 */
26616- if (current->pid)
26617+ if (task_pid_nr(current))
26618 return;
26619 __exit_idle();
26620 }
26621@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
26622 return ret;
26623 }
26624 #endif
26625-void stop_this_cpu(void *dummy)
26626+__noreturn void stop_this_cpu(void *dummy)
26627 {
26628 local_irq_disable();
26629 /*
26630@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
26631 }
26632 early_param("idle", idle_setup);
26633
26634-unsigned long arch_align_stack(unsigned long sp)
26635+#ifdef CONFIG_PAX_RANDKSTACK
26636+void pax_randomize_kstack(struct pt_regs *regs)
26637 {
26638- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
26639- sp -= get_random_int() % 8192;
26640- return sp & ~0xf;
26641-}
26642+ struct thread_struct *thread = &current->thread;
26643+ unsigned long time;
26644
26645-unsigned long arch_randomize_brk(struct mm_struct *mm)
26646-{
26647- unsigned long range_end = mm->brk + 0x02000000;
26648- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
26649-}
26650+ if (!randomize_va_space)
26651+ return;
26652+
26653+ if (v8086_mode(regs))
26654+ return;
26655
26656+ rdtscl(time);
26657+
26658+ /* P4 seems to return a 0 LSB, ignore it */
26659+#ifdef CONFIG_MPENTIUM4
26660+ time &= 0x3EUL;
26661+ time <<= 2;
26662+#elif defined(CONFIG_X86_64)
26663+ time &= 0xFUL;
26664+ time <<= 4;
26665+#else
26666+ time &= 0x1FUL;
26667+ time <<= 3;
26668+#endif
26669+
26670+ thread->sp0 ^= time;
26671+ load_sp0(init_tss + smp_processor_id(), thread);
26672+
26673+#ifdef CONFIG_X86_64
26674+ this_cpu_write(kernel_stack, thread->sp0);
26675+#endif
26676+}
26677+#endif
26678diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
26679index 8f3ebfe..cbc731b 100644
26680--- a/arch/x86/kernel/process_32.c
26681+++ b/arch/x86/kernel/process_32.c
26682@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
26683 unsigned long thread_saved_pc(struct task_struct *tsk)
26684 {
26685 return ((unsigned long *)tsk->thread.sp)[3];
26686+//XXX return tsk->thread.eip;
26687 }
26688
26689 void __show_regs(struct pt_regs *regs, int all)
26690@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
26691 unsigned long sp;
26692 unsigned short ss, gs;
26693
26694- if (user_mode_vm(regs)) {
26695+ if (user_mode(regs)) {
26696 sp = regs->sp;
26697 ss = regs->ss & 0xffff;
26698- gs = get_user_gs(regs);
26699 } else {
26700 sp = kernel_stack_pointer(regs);
26701 savesegment(ss, ss);
26702- savesegment(gs, gs);
26703 }
26704+ gs = get_user_gs(regs);
26705
26706 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
26707 (u16)regs->cs, regs->ip, regs->flags,
26708- smp_processor_id());
26709+ raw_smp_processor_id());
26710 print_symbol("EIP is at %s\n", regs->ip);
26711
26712 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
26713@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
26714 int copy_thread(unsigned long clone_flags, unsigned long sp,
26715 unsigned long arg, struct task_struct *p)
26716 {
26717- struct pt_regs *childregs = task_pt_regs(p);
26718+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
26719 struct task_struct *tsk;
26720 int err;
26721
26722 p->thread.sp = (unsigned long) childregs;
26723 p->thread.sp0 = (unsigned long) (childregs+1);
26724+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26725 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26726
26727 if (unlikely(p->flags & PF_KTHREAD)) {
26728 /* kernel thread */
26729 memset(childregs, 0, sizeof(struct pt_regs));
26730 p->thread.ip = (unsigned long) ret_from_kernel_thread;
26731- task_user_gs(p) = __KERNEL_STACK_CANARY;
26732- childregs->ds = __USER_DS;
26733- childregs->es = __USER_DS;
26734+ savesegment(gs, childregs->gs);
26735+ childregs->ds = __KERNEL_DS;
26736+ childregs->es = __KERNEL_DS;
26737 childregs->fs = __KERNEL_PERCPU;
26738 childregs->bx = sp; /* function */
26739 childregs->bp = arg;
26740@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26741 struct thread_struct *prev = &prev_p->thread,
26742 *next = &next_p->thread;
26743 int cpu = smp_processor_id();
26744- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26745+ struct tss_struct *tss = init_tss + cpu;
26746 fpu_switch_t fpu;
26747
26748 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
26749@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26750 */
26751 lazy_save_gs(prev->gs);
26752
26753+#ifdef CONFIG_PAX_MEMORY_UDEREF
26754+ __set_fs(task_thread_info(next_p)->addr_limit);
26755+#endif
26756+
26757 /*
26758 * Load the per-thread Thread-Local Storage descriptor.
26759 */
26760@@ -310,9 +315,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26761 */
26762 arch_end_context_switch(next_p);
26763
26764- this_cpu_write(kernel_stack,
26765- (unsigned long)task_stack_page(next_p) +
26766- THREAD_SIZE - KERNEL_STACK_OFFSET);
26767+ this_cpu_write(current_task, next_p);
26768+ this_cpu_write(current_tinfo, &next_p->tinfo);
26769+ this_cpu_write(kernel_stack, next->sp0);
26770
26771 /*
26772 * Restore %gs if needed (which is common)
26773@@ -322,8 +327,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26774
26775 switch_fpu_finish(next_p, fpu);
26776
26777- this_cpu_write(current_task, next_p);
26778-
26779 return prev_p;
26780 }
26781
26782@@ -353,4 +356,3 @@ unsigned long get_wchan(struct task_struct *p)
26783 } while (count++ < 16);
26784 return 0;
26785 }
26786-
26787diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
26788index 5a2c029..ec8611d 100644
26789--- a/arch/x86/kernel/process_64.c
26790+++ b/arch/x86/kernel/process_64.c
26791@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26792 struct pt_regs *childregs;
26793 struct task_struct *me = current;
26794
26795- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
26796+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
26797 childregs = task_pt_regs(p);
26798 p->thread.sp = (unsigned long) childregs;
26799 p->thread.usersp = me->thread.usersp;
26800+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p) + 2 * sizeof(unsigned long);
26801 set_tsk_thread_flag(p, TIF_FORK);
26802 p->thread.io_bitmap_ptr = NULL;
26803
26804@@ -171,6 +172,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
26805 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
26806 savesegment(es, p->thread.es);
26807 savesegment(ds, p->thread.ds);
26808+ savesegment(ss, p->thread.ss);
26809+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
26810 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
26811
26812 if (unlikely(p->flags & PF_KTHREAD)) {
26813@@ -277,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26814 struct thread_struct *prev = &prev_p->thread;
26815 struct thread_struct *next = &next_p->thread;
26816 int cpu = smp_processor_id();
26817- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26818+ struct tss_struct *tss = init_tss + cpu;
26819 unsigned fsindex, gsindex;
26820 fpu_switch_t fpu;
26821
26822@@ -331,6 +334,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26823 if (unlikely(next->ds | prev->ds))
26824 loadsegment(ds, next->ds);
26825
26826+ savesegment(ss, prev->ss);
26827+ if (unlikely(next->ss != prev->ss))
26828+ loadsegment(ss, next->ss);
26829+
26830 /*
26831 * Switch FS and GS.
26832 *
26833@@ -404,6 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26834 prev->usersp = this_cpu_read(old_rsp);
26835 this_cpu_write(old_rsp, next->usersp);
26836 this_cpu_write(current_task, next_p);
26837+ this_cpu_write(current_tinfo, &next_p->tinfo);
26838
26839 /*
26840 * If it were not for PREEMPT_ACTIVE we could guarantee that the
26841@@ -413,9 +421,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
26842 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
26843 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
26844
26845- this_cpu_write(kernel_stack,
26846- (unsigned long)task_stack_page(next_p) +
26847- THREAD_SIZE - KERNEL_STACK_OFFSET);
26848+ this_cpu_write(kernel_stack, next->sp0);
26849
26850 /*
26851 * Now maybe reload the debug registers and handle I/O bitmaps
26852@@ -485,12 +491,11 @@ unsigned long get_wchan(struct task_struct *p)
26853 if (!p || p == current || p->state == TASK_RUNNING)
26854 return 0;
26855 stack = (unsigned long)task_stack_page(p);
26856- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
26857+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
26858 return 0;
26859 fp = *(u64 *)(p->thread.sp);
26860 do {
26861- if (fp < (unsigned long)stack ||
26862- fp >= (unsigned long)stack+THREAD_SIZE)
26863+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
26864 return 0;
26865 ip = *(u64 *)(fp+8);
26866 if (!in_sched_functions(ip))
26867diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
26868index e510618..5165ac0 100644
26869--- a/arch/x86/kernel/ptrace.c
26870+++ b/arch/x86/kernel/ptrace.c
26871@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
26872 unsigned long sp = (unsigned long)&regs->sp;
26873 u32 *prev_esp;
26874
26875- if (context == (sp & ~(THREAD_SIZE - 1)))
26876+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
26877 return sp;
26878
26879- prev_esp = (u32 *)(context);
26880+ prev_esp = *(u32 **)(context);
26881 if (prev_esp)
26882 return (unsigned long)prev_esp;
26883
26884@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
26885 if (child->thread.gs != value)
26886 return do_arch_prctl(child, ARCH_SET_GS, value);
26887 return 0;
26888+
26889+ case offsetof(struct user_regs_struct,ip):
26890+ /*
26891+ * Protect against any attempt to set ip to an
26892+ * impossible address. There are dragons lurking if the
26893+ * address is noncanonical. (This explicitly allows
26894+ * setting ip to TASK_SIZE_MAX, because user code can do
26895+ * that all by itself by running off the end of its
26896+ * address space.
26897+ */
26898+ if (value > TASK_SIZE_MAX)
26899+ return -EIO;
26900+ break;
26901+
26902 #endif
26903 }
26904
26905@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
26906 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
26907 {
26908 int i;
26909- int dr7 = 0;
26910+ unsigned long dr7 = 0;
26911 struct arch_hw_breakpoint *info;
26912
26913 for (i = 0; i < HBP_NUM; i++) {
26914@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
26915 unsigned long addr, unsigned long data)
26916 {
26917 int ret;
26918- unsigned long __user *datap = (unsigned long __user *)data;
26919+ unsigned long __user *datap = (__force unsigned long __user *)data;
26920
26921 switch (request) {
26922 /* read the word at location addr in the USER area. */
26923@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
26924 if ((int) addr < 0)
26925 return -EIO;
26926 ret = do_get_thread_area(child, addr,
26927- (struct user_desc __user *)data);
26928+ (__force struct user_desc __user *) data);
26929 break;
26930
26931 case PTRACE_SET_THREAD_AREA:
26932 if ((int) addr < 0)
26933 return -EIO;
26934 ret = do_set_thread_area(child, addr,
26935- (struct user_desc __user *)data, 0);
26936+ (__force struct user_desc __user *) data, 0);
26937 break;
26938 #endif
26939
26940@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
26941
26942 #ifdef CONFIG_X86_64
26943
26944-static struct user_regset x86_64_regsets[] __read_mostly = {
26945+static user_regset_no_const x86_64_regsets[] __read_only = {
26946 [REGSET_GENERAL] = {
26947 .core_note_type = NT_PRSTATUS,
26948 .n = sizeof(struct user_regs_struct) / sizeof(long),
26949@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
26950 #endif /* CONFIG_X86_64 */
26951
26952 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
26953-static struct user_regset x86_32_regsets[] __read_mostly = {
26954+static user_regset_no_const x86_32_regsets[] __read_only = {
26955 [REGSET_GENERAL] = {
26956 .core_note_type = NT_PRSTATUS,
26957 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
26958@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
26959 */
26960 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
26961
26962-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26963+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
26964 {
26965 #ifdef CONFIG_X86_64
26966 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
26967@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
26968 memset(info, 0, sizeof(*info));
26969 info->si_signo = SIGTRAP;
26970 info->si_code = si_code;
26971- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
26972+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
26973 }
26974
26975 void user_single_step_siginfo(struct task_struct *tsk,
26976@@ -1455,6 +1469,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
26977 }
26978 }
26979
26980+#ifdef CONFIG_GRKERNSEC_SETXID
26981+extern void gr_delayed_cred_worker(void);
26982+#endif
26983+
26984 /*
26985 * We can return 0 to resume the syscall or anything else to go to phase
26986 * 2. If we resume the syscall, we need to put something appropriate in
26987@@ -1562,6 +1580,11 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
26988
26989 BUG_ON(regs != task_pt_regs(current));
26990
26991+#ifdef CONFIG_GRKERNSEC_SETXID
26992+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
26993+ gr_delayed_cred_worker();
26994+#endif
26995+
26996 /*
26997 * If we stepped into a sysenter/syscall insn, it trapped in
26998 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
26999@@ -1620,6 +1643,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27000 */
27001 user_exit();
27002
27003+#ifdef CONFIG_GRKERNSEC_SETXID
27004+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27005+ gr_delayed_cred_worker();
27006+#endif
27007+
27008 audit_syscall_exit(regs);
27009
27010 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27011diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27012index 2f355d2..e75ed0a 100644
27013--- a/arch/x86/kernel/pvclock.c
27014+++ b/arch/x86/kernel/pvclock.c
27015@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27016 reset_hung_task_detector();
27017 }
27018
27019-static atomic64_t last_value = ATOMIC64_INIT(0);
27020+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27021
27022 void pvclock_resume(void)
27023 {
27024- atomic64_set(&last_value, 0);
27025+ atomic64_set_unchecked(&last_value, 0);
27026 }
27027
27028 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27029@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27030 * updating at the same time, and one of them could be slightly behind,
27031 * making the assumption that last_value always go forward fail to hold.
27032 */
27033- last = atomic64_read(&last_value);
27034+ last = atomic64_read_unchecked(&last_value);
27035 do {
27036 if (ret < last)
27037 return last;
27038- last = atomic64_cmpxchg(&last_value, last, ret);
27039+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27040 } while (unlikely(last != ret));
27041
27042 return ret;
27043diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27044index 86db4bc..531675b 100644
27045--- a/arch/x86/kernel/reboot.c
27046+++ b/arch/x86/kernel/reboot.c
27047@@ -70,6 +70,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27048
27049 void __noreturn machine_real_restart(unsigned int type)
27050 {
27051+
27052+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27053+ struct desc_struct *gdt;
27054+#endif
27055+
27056 local_irq_disable();
27057
27058 /*
27059@@ -97,7 +102,29 @@ void __noreturn machine_real_restart(unsigned int type)
27060
27061 /* Jump to the identity-mapped low memory code */
27062 #ifdef CONFIG_X86_32
27063- asm volatile("jmpl *%0" : :
27064+
27065+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27066+ gdt = get_cpu_gdt_table(smp_processor_id());
27067+ pax_open_kernel();
27068+#ifdef CONFIG_PAX_MEMORY_UDEREF
27069+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27070+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27071+ loadsegment(ds, __KERNEL_DS);
27072+ loadsegment(es, __KERNEL_DS);
27073+ loadsegment(ss, __KERNEL_DS);
27074+#endif
27075+#ifdef CONFIG_PAX_KERNEXEC
27076+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27077+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27078+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27079+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27080+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27081+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27082+#endif
27083+ pax_close_kernel();
27084+#endif
27085+
27086+ asm volatile("ljmpl *%0" : :
27087 "rm" (real_mode_header->machine_real_restart_asm),
27088 "a" (type));
27089 #else
27090@@ -511,7 +538,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27091 * This means that this function can never return, it can misbehave
27092 * by not rebooting properly and hanging.
27093 */
27094-static void native_machine_emergency_restart(void)
27095+static void __noreturn native_machine_emergency_restart(void)
27096 {
27097 int i;
27098 int attempt = 0;
27099@@ -631,13 +658,13 @@ void native_machine_shutdown(void)
27100 #endif
27101 }
27102
27103-static void __machine_emergency_restart(int emergency)
27104+static void __noreturn __machine_emergency_restart(int emergency)
27105 {
27106 reboot_emergency = emergency;
27107 machine_ops.emergency_restart();
27108 }
27109
27110-static void native_machine_restart(char *__unused)
27111+static void __noreturn native_machine_restart(char *__unused)
27112 {
27113 pr_notice("machine restart\n");
27114
27115@@ -646,7 +673,7 @@ static void native_machine_restart(char *__unused)
27116 __machine_emergency_restart(0);
27117 }
27118
27119-static void native_machine_halt(void)
27120+static void __noreturn native_machine_halt(void)
27121 {
27122 /* Stop other cpus and apics */
27123 machine_shutdown();
27124@@ -656,7 +683,7 @@ static void native_machine_halt(void)
27125 stop_this_cpu(NULL);
27126 }
27127
27128-static void native_machine_power_off(void)
27129+static void __noreturn native_machine_power_off(void)
27130 {
27131 if (pm_power_off) {
27132 if (!reboot_force)
27133@@ -665,9 +692,10 @@ static void native_machine_power_off(void)
27134 }
27135 /* A fallback in case there is no PM info available */
27136 tboot_shutdown(TB_SHUTDOWN_HALT);
27137+ unreachable();
27138 }
27139
27140-struct machine_ops machine_ops = {
27141+struct machine_ops machine_ops __read_only = {
27142 .power_off = native_machine_power_off,
27143 .shutdown = native_machine_shutdown,
27144 .emergency_restart = native_machine_emergency_restart,
27145diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27146index c8e41e9..64049ef 100644
27147--- a/arch/x86/kernel/reboot_fixups_32.c
27148+++ b/arch/x86/kernel/reboot_fixups_32.c
27149@@ -57,7 +57,7 @@ struct device_fixup {
27150 unsigned int vendor;
27151 unsigned int device;
27152 void (*reboot_fixup)(struct pci_dev *);
27153-};
27154+} __do_const;
27155
27156 /*
27157 * PCI ids solely used for fixups_table go here
27158diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27159index 3fd2c69..a444264 100644
27160--- a/arch/x86/kernel/relocate_kernel_64.S
27161+++ b/arch/x86/kernel/relocate_kernel_64.S
27162@@ -96,8 +96,7 @@ relocate_kernel:
27163
27164 /* jump to identity mapped page */
27165 addq $(identity_mapped - relocate_kernel), %r8
27166- pushq %r8
27167- ret
27168+ jmp *%r8
27169
27170 identity_mapped:
27171 /* set return address to 0 if not preserving context */
27172diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27173index ab4734e..c4ca0eb 100644
27174--- a/arch/x86/kernel/setup.c
27175+++ b/arch/x86/kernel/setup.c
27176@@ -110,6 +110,7 @@
27177 #include <asm/mce.h>
27178 #include <asm/alternative.h>
27179 #include <asm/prom.h>
27180+#include <asm/boot.h>
27181
27182 /*
27183 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27184@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27185 #endif
27186
27187
27188-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27189-__visible unsigned long mmu_cr4_features;
27190+#ifdef CONFIG_X86_64
27191+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27192+#elif defined(CONFIG_X86_PAE)
27193+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27194 #else
27195-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27196+__visible unsigned long mmu_cr4_features __read_only;
27197 #endif
27198
27199+void set_in_cr4(unsigned long mask)
27200+{
27201+ unsigned long cr4 = read_cr4();
27202+
27203+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27204+ return;
27205+
27206+ pax_open_kernel();
27207+ mmu_cr4_features |= mask;
27208+ pax_close_kernel();
27209+
27210+ if (trampoline_cr4_features)
27211+ *trampoline_cr4_features = mmu_cr4_features;
27212+ cr4 |= mask;
27213+ write_cr4(cr4);
27214+}
27215+EXPORT_SYMBOL(set_in_cr4);
27216+
27217+void clear_in_cr4(unsigned long mask)
27218+{
27219+ unsigned long cr4 = read_cr4();
27220+
27221+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27222+ return;
27223+
27224+ pax_open_kernel();
27225+ mmu_cr4_features &= ~mask;
27226+ pax_close_kernel();
27227+
27228+ if (trampoline_cr4_features)
27229+ *trampoline_cr4_features = mmu_cr4_features;
27230+ cr4 &= ~mask;
27231+ write_cr4(cr4);
27232+}
27233+EXPORT_SYMBOL(clear_in_cr4);
27234+
27235 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27236 int bootloader_type, bootloader_version;
27237
27238@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27239 * area (640->1Mb) as ram even though it is not.
27240 * take them out.
27241 */
27242- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27243+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27244
27245 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27246 }
27247@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27248 /* called before trim_bios_range() to spare extra sanitize */
27249 static void __init e820_add_kernel_range(void)
27250 {
27251- u64 start = __pa_symbol(_text);
27252+ u64 start = __pa_symbol(ktla_ktva(_text));
27253 u64 size = __pa_symbol(_end) - start;
27254
27255 /*
27256@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27257
27258 void __init setup_arch(char **cmdline_p)
27259 {
27260+#ifdef CONFIG_X86_32
27261+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27262+#else
27263 memblock_reserve(__pa_symbol(_text),
27264 (unsigned long)__bss_stop - (unsigned long)_text);
27265+#endif
27266
27267 early_reserve_initrd();
27268
27269@@ -955,16 +998,16 @@ void __init setup_arch(char **cmdline_p)
27270
27271 if (!boot_params.hdr.root_flags)
27272 root_mountflags &= ~MS_RDONLY;
27273- init_mm.start_code = (unsigned long) _text;
27274- init_mm.end_code = (unsigned long) _etext;
27275+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27276+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27277 init_mm.end_data = (unsigned long) _edata;
27278 init_mm.brk = _brk_end;
27279
27280 mpx_mm_init(&init_mm);
27281
27282- code_resource.start = __pa_symbol(_text);
27283- code_resource.end = __pa_symbol(_etext)-1;
27284- data_resource.start = __pa_symbol(_etext);
27285+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27286+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27287+ data_resource.start = __pa_symbol(_sdata);
27288 data_resource.end = __pa_symbol(_edata)-1;
27289 bss_resource.start = __pa_symbol(__bss_start);
27290 bss_resource.end = __pa_symbol(__bss_stop)-1;
27291diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27292index e4fcb87..9c06c55 100644
27293--- a/arch/x86/kernel/setup_percpu.c
27294+++ b/arch/x86/kernel/setup_percpu.c
27295@@ -21,19 +21,17 @@
27296 #include <asm/cpu.h>
27297 #include <asm/stackprotector.h>
27298
27299-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27300+#ifdef CONFIG_SMP
27301+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27302 EXPORT_PER_CPU_SYMBOL(cpu_number);
27303+#endif
27304
27305-#ifdef CONFIG_X86_64
27306 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27307-#else
27308-#define BOOT_PERCPU_OFFSET 0
27309-#endif
27310
27311 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27312 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27313
27314-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27315+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27316 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27317 };
27318 EXPORT_SYMBOL(__per_cpu_offset);
27319@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27320 {
27321 #ifdef CONFIG_NEED_MULTIPLE_NODES
27322 pg_data_t *last = NULL;
27323- unsigned int cpu;
27324+ int cpu;
27325
27326 for_each_possible_cpu(cpu) {
27327 int node = early_cpu_to_node(cpu);
27328@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27329 {
27330 #ifdef CONFIG_X86_32
27331 struct desc_struct gdt;
27332+ unsigned long base = per_cpu_offset(cpu);
27333
27334- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27335- 0x2 | DESCTYPE_S, 0x8);
27336- gdt.s = 1;
27337+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27338+ 0x83 | DESCTYPE_S, 0xC);
27339 write_gdt_entry(get_cpu_gdt_table(cpu),
27340 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27341 #endif
27342@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27343 /* alrighty, percpu areas up and running */
27344 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27345 for_each_possible_cpu(cpu) {
27346+#ifdef CONFIG_CC_STACKPROTECTOR
27347+#ifdef CONFIG_X86_32
27348+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27349+#endif
27350+#endif
27351 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27352 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27353 per_cpu(cpu_number, cpu) = cpu;
27354@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27355 */
27356 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27357 #endif
27358+#ifdef CONFIG_CC_STACKPROTECTOR
27359+#ifdef CONFIG_X86_32
27360+ if (!cpu)
27361+ per_cpu(stack_canary.canary, cpu) = canary;
27362+#endif
27363+#endif
27364 /*
27365 * Up to this point, the boot CPU has been using .init.data
27366 * area. Reload any changed state for the boot CPU.
27367diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27368index ed37a76..39f936e 100644
27369--- a/arch/x86/kernel/signal.c
27370+++ b/arch/x86/kernel/signal.c
27371@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27372 * Align the stack pointer according to the i386 ABI,
27373 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27374 */
27375- sp = ((sp + 4) & -16ul) - 4;
27376+ sp = ((sp - 12) & -16ul) - 4;
27377 #else /* !CONFIG_X86_32 */
27378 sp = round_down(sp, 16) - 8;
27379 #endif
27380@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27381 }
27382
27383 if (current->mm->context.vdso)
27384- restorer = current->mm->context.vdso +
27385- selected_vdso32->sym___kernel_sigreturn;
27386+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27387 else
27388- restorer = &frame->retcode;
27389+ restorer = (void __user *)&frame->retcode;
27390 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27391 restorer = ksig->ka.sa.sa_restorer;
27392
27393@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27394 * reasons and because gdb uses it as a signature to notice
27395 * signal handler stack frames.
27396 */
27397- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27398+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27399
27400 if (err)
27401 return -EFAULT;
27402@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27403 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27404
27405 /* Set up to return from userspace. */
27406- restorer = current->mm->context.vdso +
27407- selected_vdso32->sym___kernel_rt_sigreturn;
27408+ if (current->mm->context.vdso)
27409+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27410+ else
27411+ restorer = (void __user *)&frame->retcode;
27412 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27413 restorer = ksig->ka.sa.sa_restorer;
27414 put_user_ex(restorer, &frame->pretcode);
27415@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27416 * reasons and because gdb uses it as a signature to notice
27417 * signal handler stack frames.
27418 */
27419- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27420+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27421 } put_user_catch(err);
27422
27423 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27424@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27425 {
27426 int usig = signr_convert(ksig->sig);
27427 sigset_t *set = sigmask_to_save();
27428- compat_sigset_t *cset = (compat_sigset_t *) set;
27429+ sigset_t sigcopy;
27430+ compat_sigset_t *cset;
27431+
27432+ sigcopy = *set;
27433+
27434+ cset = (compat_sigset_t *) &sigcopy;
27435
27436 /* Set up the stack frame */
27437 if (is_ia32_frame()) {
27438@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27439 } else if (is_x32_frame()) {
27440 return x32_setup_rt_frame(ksig, cset, regs);
27441 } else {
27442- return __setup_rt_frame(ksig->sig, ksig, set, regs);
27443+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
27444 }
27445 }
27446
27447diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
27448index be8e1bd..a3d93fa 100644
27449--- a/arch/x86/kernel/smp.c
27450+++ b/arch/x86/kernel/smp.c
27451@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
27452
27453 __setup("nonmi_ipi", nonmi_ipi_setup);
27454
27455-struct smp_ops smp_ops = {
27456+struct smp_ops smp_ops __read_only = {
27457 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
27458 .smp_prepare_cpus = native_smp_prepare_cpus,
27459 .smp_cpus_done = native_smp_cpus_done,
27460diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
27461index 6d7022c..4feb6be 100644
27462--- a/arch/x86/kernel/smpboot.c
27463+++ b/arch/x86/kernel/smpboot.c
27464@@ -194,14 +194,17 @@ static void notrace start_secondary(void *unused)
27465
27466 enable_start_cpu0 = 0;
27467
27468-#ifdef CONFIG_X86_32
27469+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
27470+ barrier();
27471+
27472 /* switch away from the initial page table */
27473+#ifdef CONFIG_PAX_PER_CPU_PGD
27474+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
27475+#else
27476 load_cr3(swapper_pg_dir);
27477+#endif
27478 __flush_tlb_all();
27479-#endif
27480
27481- /* otherwise gcc will move up smp_processor_id before the cpu_init */
27482- barrier();
27483 /*
27484 * Check TSC synchronization with the BP:
27485 */
27486@@ -765,8 +768,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27487 alternatives_enable_smp();
27488
27489 idle->thread.sp = (unsigned long) (((struct pt_regs *)
27490- (THREAD_SIZE + task_stack_page(idle))) - 1);
27491+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
27492 per_cpu(current_task, cpu) = idle;
27493+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27494
27495 #ifdef CONFIG_X86_32
27496 /* Stack for startup_32 can be just as for start_secondary onwards */
27497@@ -775,10 +779,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
27498 clear_tsk_thread_flag(idle, TIF_FORK);
27499 initial_gs = per_cpu_offset(cpu);
27500 #endif
27501- per_cpu(kernel_stack, cpu) =
27502- (unsigned long)task_stack_page(idle) -
27503- KERNEL_STACK_OFFSET + THREAD_SIZE;
27504+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27505+ pax_open_kernel();
27506 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
27507+ pax_close_kernel();
27508 initial_code = (unsigned long)start_secondary;
27509 stack_start = idle->thread.sp;
27510
27511@@ -918,6 +922,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
27512 /* the FPU context is blank, nobody can own it */
27513 __cpu_disable_lazy_restore(cpu);
27514
27515+#ifdef CONFIG_PAX_PER_CPU_PGD
27516+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
27517+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27518+ KERNEL_PGD_PTRS);
27519+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
27520+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27521+ KERNEL_PGD_PTRS);
27522+#endif
27523+
27524 err = do_boot_cpu(apicid, cpu, tidle);
27525 if (err) {
27526 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
27527diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
27528index 9b4d51d..5d28b58 100644
27529--- a/arch/x86/kernel/step.c
27530+++ b/arch/x86/kernel/step.c
27531@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27532 struct desc_struct *desc;
27533 unsigned long base;
27534
27535- seg &= ~7UL;
27536+ seg >>= 3;
27537
27538 mutex_lock(&child->mm->context.lock);
27539- if (unlikely((seg >> 3) >= child->mm->context.size))
27540+ if (unlikely(seg >= child->mm->context.size))
27541 addr = -1L; /* bogus selector, access would fault */
27542 else {
27543 desc = child->mm->context.ldt + seg;
27544@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27545 addr += base;
27546 }
27547 mutex_unlock(&child->mm->context.lock);
27548- }
27549+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
27550+ addr = ktla_ktva(addr);
27551
27552 return addr;
27553 }
27554@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
27555 unsigned char opcode[15];
27556 unsigned long addr = convert_ip_to_linear(child, regs);
27557
27558+ if (addr == -EINVAL)
27559+ return 0;
27560+
27561 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
27562 for (i = 0; i < copied; i++) {
27563 switch (opcode[i]) {
27564diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
27565new file mode 100644
27566index 0000000..5877189
27567--- /dev/null
27568+++ b/arch/x86/kernel/sys_i386_32.c
27569@@ -0,0 +1,189 @@
27570+/*
27571+ * This file contains various random system calls that
27572+ * have a non-standard calling sequence on the Linux/i386
27573+ * platform.
27574+ */
27575+
27576+#include <linux/errno.h>
27577+#include <linux/sched.h>
27578+#include <linux/mm.h>
27579+#include <linux/fs.h>
27580+#include <linux/smp.h>
27581+#include <linux/sem.h>
27582+#include <linux/msg.h>
27583+#include <linux/shm.h>
27584+#include <linux/stat.h>
27585+#include <linux/syscalls.h>
27586+#include <linux/mman.h>
27587+#include <linux/file.h>
27588+#include <linux/utsname.h>
27589+#include <linux/ipc.h>
27590+#include <linux/elf.h>
27591+
27592+#include <linux/uaccess.h>
27593+#include <linux/unistd.h>
27594+
27595+#include <asm/syscalls.h>
27596+
27597+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
27598+{
27599+ unsigned long pax_task_size = TASK_SIZE;
27600+
27601+#ifdef CONFIG_PAX_SEGMEXEC
27602+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
27603+ pax_task_size = SEGMEXEC_TASK_SIZE;
27604+#endif
27605+
27606+ if (flags & MAP_FIXED)
27607+ if (len > pax_task_size || addr > pax_task_size - len)
27608+ return -EINVAL;
27609+
27610+ return 0;
27611+}
27612+
27613+/*
27614+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
27615+ */
27616+static unsigned long get_align_mask(void)
27617+{
27618+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
27619+ return 0;
27620+
27621+ if (!(current->flags & PF_RANDOMIZE))
27622+ return 0;
27623+
27624+ return va_align.mask;
27625+}
27626+
27627+unsigned long
27628+arch_get_unmapped_area(struct file *filp, unsigned long addr,
27629+ unsigned long len, unsigned long pgoff, unsigned long flags)
27630+{
27631+ struct mm_struct *mm = current->mm;
27632+ struct vm_area_struct *vma;
27633+ unsigned long pax_task_size = TASK_SIZE;
27634+ struct vm_unmapped_area_info info;
27635+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27636+
27637+#ifdef CONFIG_PAX_SEGMEXEC
27638+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27639+ pax_task_size = SEGMEXEC_TASK_SIZE;
27640+#endif
27641+
27642+ pax_task_size -= PAGE_SIZE;
27643+
27644+ if (len > pax_task_size)
27645+ return -ENOMEM;
27646+
27647+ if (flags & MAP_FIXED)
27648+ return addr;
27649+
27650+#ifdef CONFIG_PAX_RANDMMAP
27651+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27652+#endif
27653+
27654+ if (addr) {
27655+ addr = PAGE_ALIGN(addr);
27656+ if (pax_task_size - len >= addr) {
27657+ vma = find_vma(mm, addr);
27658+ if (check_heap_stack_gap(vma, addr, len, offset))
27659+ return addr;
27660+ }
27661+ }
27662+
27663+ info.flags = 0;
27664+ info.length = len;
27665+ info.align_mask = filp ? get_align_mask() : 0;
27666+ info.align_offset = pgoff << PAGE_SHIFT;
27667+ info.threadstack_offset = offset;
27668+
27669+#ifdef CONFIG_PAX_PAGEEXEC
27670+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
27671+ info.low_limit = 0x00110000UL;
27672+ info.high_limit = mm->start_code;
27673+
27674+#ifdef CONFIG_PAX_RANDMMAP
27675+ if (mm->pax_flags & MF_PAX_RANDMMAP)
27676+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
27677+#endif
27678+
27679+ if (info.low_limit < info.high_limit) {
27680+ addr = vm_unmapped_area(&info);
27681+ if (!IS_ERR_VALUE(addr))
27682+ return addr;
27683+ }
27684+ } else
27685+#endif
27686+
27687+ info.low_limit = mm->mmap_base;
27688+ info.high_limit = pax_task_size;
27689+
27690+ return vm_unmapped_area(&info);
27691+}
27692+
27693+unsigned long
27694+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27695+ const unsigned long len, const unsigned long pgoff,
27696+ const unsigned long flags)
27697+{
27698+ struct vm_area_struct *vma;
27699+ struct mm_struct *mm = current->mm;
27700+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
27701+ struct vm_unmapped_area_info info;
27702+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27703+
27704+#ifdef CONFIG_PAX_SEGMEXEC
27705+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27706+ pax_task_size = SEGMEXEC_TASK_SIZE;
27707+#endif
27708+
27709+ pax_task_size -= PAGE_SIZE;
27710+
27711+ /* requested length too big for entire address space */
27712+ if (len > pax_task_size)
27713+ return -ENOMEM;
27714+
27715+ if (flags & MAP_FIXED)
27716+ return addr;
27717+
27718+#ifdef CONFIG_PAX_PAGEEXEC
27719+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
27720+ goto bottomup;
27721+#endif
27722+
27723+#ifdef CONFIG_PAX_RANDMMAP
27724+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27725+#endif
27726+
27727+ /* requesting a specific address */
27728+ if (addr) {
27729+ addr = PAGE_ALIGN(addr);
27730+ if (pax_task_size - len >= addr) {
27731+ vma = find_vma(mm, addr);
27732+ if (check_heap_stack_gap(vma, addr, len, offset))
27733+ return addr;
27734+ }
27735+ }
27736+
27737+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
27738+ info.length = len;
27739+ info.low_limit = PAGE_SIZE;
27740+ info.high_limit = mm->mmap_base;
27741+ info.align_mask = filp ? get_align_mask() : 0;
27742+ info.align_offset = pgoff << PAGE_SHIFT;
27743+ info.threadstack_offset = offset;
27744+
27745+ addr = vm_unmapped_area(&info);
27746+ if (!(addr & ~PAGE_MASK))
27747+ return addr;
27748+ VM_BUG_ON(addr != -ENOMEM);
27749+
27750+bottomup:
27751+ /*
27752+ * A failed mmap() very likely causes application failure,
27753+ * so fall back to the bottom-up function here. This scenario
27754+ * can happen with large stack limits and large mmap()
27755+ * allocations.
27756+ */
27757+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
27758+}
27759diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
27760index 30277e2..5664a29 100644
27761--- a/arch/x86/kernel/sys_x86_64.c
27762+++ b/arch/x86/kernel/sys_x86_64.c
27763@@ -81,8 +81,8 @@ out:
27764 return error;
27765 }
27766
27767-static void find_start_end(unsigned long flags, unsigned long *begin,
27768- unsigned long *end)
27769+static void find_start_end(struct mm_struct *mm, unsigned long flags,
27770+ unsigned long *begin, unsigned long *end)
27771 {
27772 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
27773 unsigned long new_begin;
27774@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
27775 *begin = new_begin;
27776 }
27777 } else {
27778- *begin = current->mm->mmap_legacy_base;
27779+ *begin = mm->mmap_legacy_base;
27780 *end = TASK_SIZE;
27781 }
27782 }
27783@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27784 struct vm_area_struct *vma;
27785 struct vm_unmapped_area_info info;
27786 unsigned long begin, end;
27787+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27788
27789 if (flags & MAP_FIXED)
27790 return addr;
27791
27792- find_start_end(flags, &begin, &end);
27793+ find_start_end(mm, flags, &begin, &end);
27794
27795 if (len > end)
27796 return -ENOMEM;
27797
27798+#ifdef CONFIG_PAX_RANDMMAP
27799+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27800+#endif
27801+
27802 if (addr) {
27803 addr = PAGE_ALIGN(addr);
27804 vma = find_vma(mm, addr);
27805- if (end - len >= addr &&
27806- (!vma || addr + len <= vma->vm_start))
27807+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27808 return addr;
27809 }
27810
27811@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
27812 info.high_limit = end;
27813 info.align_mask = filp ? get_align_mask() : 0;
27814 info.align_offset = pgoff << PAGE_SHIFT;
27815+ info.threadstack_offset = offset;
27816 return vm_unmapped_area(&info);
27817 }
27818
27819@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27820 struct mm_struct *mm = current->mm;
27821 unsigned long addr = addr0;
27822 struct vm_unmapped_area_info info;
27823+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
27824
27825 /* requested length too big for entire address space */
27826 if (len > TASK_SIZE)
27827@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27828 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
27829 goto bottomup;
27830
27831+#ifdef CONFIG_PAX_RANDMMAP
27832+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27833+#endif
27834+
27835 /* requesting a specific address */
27836 if (addr) {
27837 addr = PAGE_ALIGN(addr);
27838 vma = find_vma(mm, addr);
27839- if (TASK_SIZE - len >= addr &&
27840- (!vma || addr + len <= vma->vm_start))
27841+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27842 return addr;
27843 }
27844
27845@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
27846 info.high_limit = mm->mmap_base;
27847 info.align_mask = filp ? get_align_mask() : 0;
27848 info.align_offset = pgoff << PAGE_SHIFT;
27849+ info.threadstack_offset = offset;
27850 addr = vm_unmapped_area(&info);
27851 if (!(addr & ~PAGE_MASK))
27852 return addr;
27853diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
27854index 91a4496..bb87552 100644
27855--- a/arch/x86/kernel/tboot.c
27856+++ b/arch/x86/kernel/tboot.c
27857@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
27858
27859 void tboot_shutdown(u32 shutdown_type)
27860 {
27861- void (*shutdown)(void);
27862+ void (* __noreturn shutdown)(void);
27863
27864 if (!tboot_enabled())
27865 return;
27866@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
27867
27868 switch_to_tboot_pt();
27869
27870- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
27871+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
27872 shutdown();
27873
27874 /* should not reach here */
27875@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
27876 return -ENODEV;
27877 }
27878
27879-static atomic_t ap_wfs_count;
27880+static atomic_unchecked_t ap_wfs_count;
27881
27882 static int tboot_wait_for_aps(int num_aps)
27883 {
27884@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
27885 {
27886 switch (action) {
27887 case CPU_DYING:
27888- atomic_inc(&ap_wfs_count);
27889+ atomic_inc_unchecked(&ap_wfs_count);
27890 if (num_online_cpus() == 1)
27891- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
27892+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
27893 return NOTIFY_BAD;
27894 break;
27895 }
27896@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
27897
27898 tboot_create_trampoline();
27899
27900- atomic_set(&ap_wfs_count, 0);
27901+ atomic_set_unchecked(&ap_wfs_count, 0);
27902 register_hotcpu_notifier(&tboot_cpu_notifier);
27903
27904 #ifdef CONFIG_DEBUG_FS
27905diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
27906index 25adc0e..1df4349 100644
27907--- a/arch/x86/kernel/time.c
27908+++ b/arch/x86/kernel/time.c
27909@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
27910 {
27911 unsigned long pc = instruction_pointer(regs);
27912
27913- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
27914+ if (!user_mode(regs) && in_lock_functions(pc)) {
27915 #ifdef CONFIG_FRAME_POINTER
27916- return *(unsigned long *)(regs->bp + sizeof(long));
27917+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
27918 #else
27919 unsigned long *sp =
27920 (unsigned long *)kernel_stack_pointer(regs);
27921@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
27922 * or above a saved flags. Eflags has bits 22-31 zero,
27923 * kernel addresses don't.
27924 */
27925+
27926+#ifdef CONFIG_PAX_KERNEXEC
27927+ return ktla_ktva(sp[0]);
27928+#else
27929 if (sp[0] >> 22)
27930 return sp[0];
27931 if (sp[1] >> 22)
27932 return sp[1];
27933 #endif
27934+
27935+#endif
27936 }
27937 return pc;
27938 }
27939diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
27940index 7fc5e84..c6e445a 100644
27941--- a/arch/x86/kernel/tls.c
27942+++ b/arch/x86/kernel/tls.c
27943@@ -139,6 +139,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
27944 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
27945 return -EINVAL;
27946
27947+#ifdef CONFIG_PAX_SEGMEXEC
27948+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
27949+ return -EINVAL;
27950+#endif
27951+
27952 set_tls_desc(p, idx, &info, 1);
27953
27954 return 0;
27955@@ -256,7 +261,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
27956
27957 if (kbuf)
27958 info = kbuf;
27959- else if (__copy_from_user(infobuf, ubuf, count))
27960+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
27961 return -EFAULT;
27962 else
27963 info = infobuf;
27964diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
27965index 1c113db..287b42e 100644
27966--- a/arch/x86/kernel/tracepoint.c
27967+++ b/arch/x86/kernel/tracepoint.c
27968@@ -9,11 +9,11 @@
27969 #include <linux/atomic.h>
27970
27971 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
27972-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27973+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
27974 (unsigned long) trace_idt_table };
27975
27976 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27977-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
27978+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
27979
27980 static int trace_irq_vector_refcount;
27981 static DEFINE_MUTEX(irq_vector_mutex);
27982diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
27983index 89f4e64..aa4149d 100644
27984--- a/arch/x86/kernel/traps.c
27985+++ b/arch/x86/kernel/traps.c
27986@@ -68,7 +68,7 @@
27987 #include <asm/proto.h>
27988
27989 /* No need to be aligned, but done to keep all IDTs defined the same way. */
27990-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
27991+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
27992 #else
27993 #include <asm/processor-flags.h>
27994 #include <asm/setup.h>
27995@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
27996 #endif
27997
27998 /* Must be page-aligned because the real IDT is used in a fixmap. */
27999-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28000+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28001
28002 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28003 EXPORT_SYMBOL_GPL(used_vectors);
28004@@ -109,11 +109,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28005 }
28006
28007 static nokprobe_inline int
28008-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28009+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28010 struct pt_regs *regs, long error_code)
28011 {
28012 #ifdef CONFIG_X86_32
28013- if (regs->flags & X86_VM_MASK) {
28014+ if (v8086_mode(regs)) {
28015 /*
28016 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28017 * On nmi (interrupt 2), do_trap should not be called.
28018@@ -126,12 +126,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28019 return -1;
28020 }
28021 #endif
28022- if (!user_mode(regs)) {
28023+ if (!user_mode_novm(regs)) {
28024 if (!fixup_exception(regs)) {
28025 tsk->thread.error_code = error_code;
28026 tsk->thread.trap_nr = trapnr;
28027+
28028+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28029+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28030+ str = "PAX: suspicious stack segment fault";
28031+#endif
28032+
28033 die(str, regs, error_code);
28034 }
28035+
28036+#ifdef CONFIG_PAX_REFCOUNT
28037+ if (trapnr == X86_TRAP_OF)
28038+ pax_report_refcount_overflow(regs);
28039+#endif
28040+
28041 return 0;
28042 }
28043
28044@@ -170,7 +182,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28045 }
28046
28047 static void
28048-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28049+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28050 long error_code, siginfo_t *info)
28051 {
28052 struct task_struct *tsk = current;
28053@@ -194,7 +206,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28054 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28055 printk_ratelimit()) {
28056 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28057- tsk->comm, tsk->pid, str,
28058+ tsk->comm, task_pid_nr(tsk), str,
28059 regs->ip, regs->sp, error_code);
28060 print_vma_addr(" in ", regs->ip);
28061 pr_cont("\n");
28062@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28063 tsk->thread.error_code = error_code;
28064 tsk->thread.trap_nr = X86_TRAP_DF;
28065
28066+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28067+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28068+ die("grsec: kernel stack overflow detected", regs, error_code);
28069+#endif
28070+
28071 #ifdef CONFIG_DOUBLEFAULT
28072 df_debug(regs, error_code);
28073 #endif
28074@@ -300,7 +317,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
28075 goto exit;
28076 conditional_sti(regs);
28077
28078- if (!user_mode_vm(regs))
28079+ if (!user_mode(regs))
28080 die("bounds", regs, error_code);
28081
28082 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
28083@@ -379,7 +396,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28084 conditional_sti(regs);
28085
28086 #ifdef CONFIG_X86_32
28087- if (regs->flags & X86_VM_MASK) {
28088+ if (v8086_mode(regs)) {
28089 local_irq_enable();
28090 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28091 goto exit;
28092@@ -387,18 +404,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28093 #endif
28094
28095 tsk = current;
28096- if (!user_mode(regs)) {
28097+ if (!user_mode_novm(regs)) {
28098 if (fixup_exception(regs))
28099 goto exit;
28100
28101 tsk->thread.error_code = error_code;
28102 tsk->thread.trap_nr = X86_TRAP_GP;
28103 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28104- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28105+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28106+
28107+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28108+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28109+ die("PAX: suspicious general protection fault", regs, error_code);
28110+ else
28111+#endif
28112+
28113 die("general protection fault", regs, error_code);
28114+ }
28115 goto exit;
28116 }
28117
28118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28119+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28120+ struct mm_struct *mm = tsk->mm;
28121+ unsigned long limit;
28122+
28123+ down_write(&mm->mmap_sem);
28124+ limit = mm->context.user_cs_limit;
28125+ if (limit < TASK_SIZE) {
28126+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28127+ up_write(&mm->mmap_sem);
28128+ return;
28129+ }
28130+ up_write(&mm->mmap_sem);
28131+ }
28132+#endif
28133+
28134 tsk->thread.error_code = error_code;
28135 tsk->thread.trap_nr = X86_TRAP_GP;
28136
28137@@ -510,13 +551,16 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28138 container_of(task_pt_regs(current),
28139 struct bad_iret_stack, regs);
28140
28141+ if ((current->thread.sp0 ^ (unsigned long)s) < THREAD_SIZE)
28142+ new_stack = s;
28143+
28144 /* Copy the IRET target to the new stack. */
28145 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
28146
28147 /* Copy the remainder of the stack from the current stack. */
28148 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28149
28150- BUG_ON(!user_mode_vm(&new_stack->regs));
28151+ BUG_ON(!user_mode(&new_stack->regs));
28152 return new_stack;
28153 }
28154 NOKPROBE_SYMBOL(fixup_bad_iret);
28155@@ -566,7 +610,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28156 * then it's very likely the result of an icebp/int01 trap.
28157 * User wants a sigtrap for that.
28158 */
28159- if (!dr6 && user_mode_vm(regs))
28160+ if (!dr6 && user_mode(regs))
28161 user_icebp = 1;
28162
28163 /* Catch kmemcheck conditions first of all! */
28164@@ -602,7 +646,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28165 /* It's safe to allow irq's after DR6 has been saved */
28166 preempt_conditional_sti(regs);
28167
28168- if (regs->flags & X86_VM_MASK) {
28169+ if (v8086_mode(regs)) {
28170 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28171 X86_TRAP_DB);
28172 preempt_conditional_cli(regs);
28173@@ -617,7 +661,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28174 * We already checked v86 mode above, so we can check for kernel mode
28175 * by just checking the CPL of CS.
28176 */
28177- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28178+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28179 tsk->thread.debugreg6 &= ~DR_STEP;
28180 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28181 regs->flags &= ~X86_EFLAGS_TF;
28182@@ -650,7 +694,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28183 return;
28184 conditional_sti(regs);
28185
28186- if (!user_mode_vm(regs))
28187+ if (!user_mode(regs))
28188 {
28189 if (!fixup_exception(regs)) {
28190 task->thread.error_code = error_code;
28191diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28192index 5054497..139f8f8 100644
28193--- a/arch/x86/kernel/tsc.c
28194+++ b/arch/x86/kernel/tsc.c
28195@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28196 */
28197 smp_wmb();
28198
28199- ACCESS_ONCE(c2n->head) = data;
28200+ ACCESS_ONCE_RW(c2n->head) = data;
28201 }
28202
28203 /*
28204diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28205index 8b96a94..792b410 100644
28206--- a/arch/x86/kernel/uprobes.c
28207+++ b/arch/x86/kernel/uprobes.c
28208@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28209 int ret = NOTIFY_DONE;
28210
28211 /* We are only interested in userspace traps */
28212- if (regs && !user_mode_vm(regs))
28213+ if (regs && !user_mode(regs))
28214 return NOTIFY_DONE;
28215
28216 switch (val) {
28217@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28218
28219 if (nleft != rasize) {
28220 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28221- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28222+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28223
28224 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28225 }
28226diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28227index b9242ba..50c5edd 100644
28228--- a/arch/x86/kernel/verify_cpu.S
28229+++ b/arch/x86/kernel/verify_cpu.S
28230@@ -20,6 +20,7 @@
28231 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28232 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28233 * arch/x86/kernel/head_32.S: processor startup
28234+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28235 *
28236 * verify_cpu, returns the status of longmode and SSE in register %eax.
28237 * 0: Success 1: Failure
28238diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28239index e8edcf5..27f9344 100644
28240--- a/arch/x86/kernel/vm86_32.c
28241+++ b/arch/x86/kernel/vm86_32.c
28242@@ -44,6 +44,7 @@
28243 #include <linux/ptrace.h>
28244 #include <linux/audit.h>
28245 #include <linux/stddef.h>
28246+#include <linux/grsecurity.h>
28247
28248 #include <asm/uaccess.h>
28249 #include <asm/io.h>
28250@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28251 do_exit(SIGSEGV);
28252 }
28253
28254- tss = &per_cpu(init_tss, get_cpu());
28255+ tss = init_tss + get_cpu();
28256 current->thread.sp0 = current->thread.saved_sp0;
28257 current->thread.sysenter_cs = __KERNEL_CS;
28258 load_sp0(tss, &current->thread);
28259@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28260
28261 if (tsk->thread.saved_sp0)
28262 return -EPERM;
28263+
28264+#ifdef CONFIG_GRKERNSEC_VM86
28265+ if (!capable(CAP_SYS_RAWIO)) {
28266+ gr_handle_vm86();
28267+ return -EPERM;
28268+ }
28269+#endif
28270+
28271 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28272 offsetof(struct kernel_vm86_struct, vm86plus) -
28273 sizeof(info.regs));
28274@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28275 int tmp;
28276 struct vm86plus_struct __user *v86;
28277
28278+#ifdef CONFIG_GRKERNSEC_VM86
28279+ if (!capable(CAP_SYS_RAWIO)) {
28280+ gr_handle_vm86();
28281+ return -EPERM;
28282+ }
28283+#endif
28284+
28285 tsk = current;
28286 switch (cmd) {
28287 case VM86_REQUEST_IRQ:
28288@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28289 tsk->thread.saved_fs = info->regs32->fs;
28290 tsk->thread.saved_gs = get_user_gs(info->regs32);
28291
28292- tss = &per_cpu(init_tss, get_cpu());
28293+ tss = init_tss + get_cpu();
28294 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28295 if (cpu_has_sep)
28296 tsk->thread.sysenter_cs = 0;
28297@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28298 goto cannot_handle;
28299 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28300 goto cannot_handle;
28301- intr_ptr = (unsigned long __user *) (i << 2);
28302+ intr_ptr = (__force unsigned long __user *) (i << 2);
28303 if (get_user(segoffs, intr_ptr))
28304 goto cannot_handle;
28305 if ((segoffs >> 16) == BIOSSEG)
28306diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28307index 00bf300..129df8e 100644
28308--- a/arch/x86/kernel/vmlinux.lds.S
28309+++ b/arch/x86/kernel/vmlinux.lds.S
28310@@ -26,6 +26,13 @@
28311 #include <asm/page_types.h>
28312 #include <asm/cache.h>
28313 #include <asm/boot.h>
28314+#include <asm/segment.h>
28315+
28316+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28317+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28318+#else
28319+#define __KERNEL_TEXT_OFFSET 0
28320+#endif
28321
28322 #undef i386 /* in case the preprocessor is a 32bit one */
28323
28324@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28325
28326 PHDRS {
28327 text PT_LOAD FLAGS(5); /* R_E */
28328+#ifdef CONFIG_X86_32
28329+ module PT_LOAD FLAGS(5); /* R_E */
28330+#endif
28331+#ifdef CONFIG_XEN
28332+ rodata PT_LOAD FLAGS(5); /* R_E */
28333+#else
28334+ rodata PT_LOAD FLAGS(4); /* R__ */
28335+#endif
28336 data PT_LOAD FLAGS(6); /* RW_ */
28337-#ifdef CONFIG_X86_64
28338+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28339 #ifdef CONFIG_SMP
28340 percpu PT_LOAD FLAGS(6); /* RW_ */
28341 #endif
28342+ text.init PT_LOAD FLAGS(5); /* R_E */
28343+ text.exit PT_LOAD FLAGS(5); /* R_E */
28344 init PT_LOAD FLAGS(7); /* RWE */
28345-#endif
28346 note PT_NOTE FLAGS(0); /* ___ */
28347 }
28348
28349 SECTIONS
28350 {
28351 #ifdef CONFIG_X86_32
28352- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28353- phys_startup_32 = startup_32 - LOAD_OFFSET;
28354+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28355 #else
28356- . = __START_KERNEL;
28357- phys_startup_64 = startup_64 - LOAD_OFFSET;
28358+ . = __START_KERNEL;
28359 #endif
28360
28361 /* Text and read-only data */
28362- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28363- _text = .;
28364+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28365 /* bootstrapping code */
28366+#ifdef CONFIG_X86_32
28367+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28368+#else
28369+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28370+#endif
28371+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28372+ _text = .;
28373 HEAD_TEXT
28374 . = ALIGN(8);
28375 _stext = .;
28376@@ -104,13 +124,47 @@ SECTIONS
28377 IRQENTRY_TEXT
28378 *(.fixup)
28379 *(.gnu.warning)
28380- /* End of text section */
28381- _etext = .;
28382 } :text = 0x9090
28383
28384- NOTES :text :note
28385+ . += __KERNEL_TEXT_OFFSET;
28386
28387- EXCEPTION_TABLE(16) :text = 0x9090
28388+#ifdef CONFIG_X86_32
28389+ . = ALIGN(PAGE_SIZE);
28390+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28391+
28392+#ifdef CONFIG_PAX_KERNEXEC
28393+ MODULES_EXEC_VADDR = .;
28394+ BYTE(0)
28395+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28396+ . = ALIGN(HPAGE_SIZE) - 1;
28397+ MODULES_EXEC_END = .;
28398+#endif
28399+
28400+ } :module
28401+#endif
28402+
28403+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28404+ /* End of text section */
28405+ BYTE(0)
28406+ _etext = . - __KERNEL_TEXT_OFFSET;
28407+ }
28408+
28409+#ifdef CONFIG_X86_32
28410+ . = ALIGN(PAGE_SIZE);
28411+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28412+ . = ALIGN(PAGE_SIZE);
28413+ *(.empty_zero_page)
28414+ *(.initial_pg_fixmap)
28415+ *(.initial_pg_pmd)
28416+ *(.initial_page_table)
28417+ *(.swapper_pg_dir)
28418+ } :rodata
28419+#endif
28420+
28421+ . = ALIGN(PAGE_SIZE);
28422+ NOTES :rodata :note
28423+
28424+ EXCEPTION_TABLE(16) :rodata
28425
28426 #if defined(CONFIG_DEBUG_RODATA)
28427 /* .text should occupy whole number of pages */
28428@@ -122,16 +176,20 @@ SECTIONS
28429
28430 /* Data */
28431 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28432+
28433+#ifdef CONFIG_PAX_KERNEXEC
28434+ . = ALIGN(HPAGE_SIZE);
28435+#else
28436+ . = ALIGN(PAGE_SIZE);
28437+#endif
28438+
28439 /* Start of data section */
28440 _sdata = .;
28441
28442 /* init_task */
28443 INIT_TASK_DATA(THREAD_SIZE)
28444
28445-#ifdef CONFIG_X86_32
28446- /* 32 bit has nosave before _edata */
28447 NOSAVE_DATA
28448-#endif
28449
28450 PAGE_ALIGNED_DATA(PAGE_SIZE)
28451
28452@@ -174,12 +232,19 @@ SECTIONS
28453 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28454
28455 /* Init code and data - will be freed after init */
28456- . = ALIGN(PAGE_SIZE);
28457 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28458+ BYTE(0)
28459+
28460+#ifdef CONFIG_PAX_KERNEXEC
28461+ . = ALIGN(HPAGE_SIZE);
28462+#else
28463+ . = ALIGN(PAGE_SIZE);
28464+#endif
28465+
28466 __init_begin = .; /* paired with __init_end */
28467- }
28468+ } :init.begin
28469
28470-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
28471+#ifdef CONFIG_SMP
28472 /*
28473 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
28474 * output PHDR, so the next output section - .init.text - should
28475@@ -190,12 +255,27 @@ SECTIONS
28476 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
28477 #endif
28478
28479- INIT_TEXT_SECTION(PAGE_SIZE)
28480-#ifdef CONFIG_X86_64
28481- :init
28482-#endif
28483+ . = ALIGN(PAGE_SIZE);
28484+ init_begin = .;
28485+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
28486+ VMLINUX_SYMBOL(_sinittext) = .;
28487+ INIT_TEXT
28488+ . = ALIGN(PAGE_SIZE);
28489+ } :text.init
28490
28491- INIT_DATA_SECTION(16)
28492+ /*
28493+ * .exit.text is discard at runtime, not link time, to deal with
28494+ * references from .altinstructions and .eh_frame
28495+ */
28496+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28497+ EXIT_TEXT
28498+ VMLINUX_SYMBOL(_einittext) = .;
28499+ . = ALIGN(16);
28500+ } :text.exit
28501+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
28502+
28503+ . = ALIGN(PAGE_SIZE);
28504+ INIT_DATA_SECTION(16) :init
28505
28506 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
28507 __x86_cpu_dev_start = .;
28508@@ -266,19 +346,12 @@ SECTIONS
28509 }
28510
28511 . = ALIGN(8);
28512- /*
28513- * .exit.text is discard at runtime, not link time, to deal with
28514- * references from .altinstructions and .eh_frame
28515- */
28516- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
28517- EXIT_TEXT
28518- }
28519
28520 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
28521 EXIT_DATA
28522 }
28523
28524-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
28525+#ifndef CONFIG_SMP
28526 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
28527 #endif
28528
28529@@ -297,16 +370,10 @@ SECTIONS
28530 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
28531 __smp_locks = .;
28532 *(.smp_locks)
28533- . = ALIGN(PAGE_SIZE);
28534 __smp_locks_end = .;
28535+ . = ALIGN(PAGE_SIZE);
28536 }
28537
28538-#ifdef CONFIG_X86_64
28539- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
28540- NOSAVE_DATA
28541- }
28542-#endif
28543-
28544 /* BSS */
28545 . = ALIGN(PAGE_SIZE);
28546 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
28547@@ -322,6 +389,7 @@ SECTIONS
28548 __brk_base = .;
28549 . += 64 * 1024; /* 64k alignment slop space */
28550 *(.brk_reservation) /* areas brk users have reserved */
28551+ . = ALIGN(HPAGE_SIZE);
28552 __brk_limit = .;
28553 }
28554
28555@@ -348,13 +416,12 @@ SECTIONS
28556 * for the boot processor.
28557 */
28558 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
28559-INIT_PER_CPU(gdt_page);
28560 INIT_PER_CPU(irq_stack_union);
28561
28562 /*
28563 * Build-time check on the image size:
28564 */
28565-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
28566+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
28567 "kernel image bigger than KERNEL_IMAGE_SIZE");
28568
28569 #ifdef CONFIG_SMP
28570diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
28571index 2dcc6ff..082dc7a 100644
28572--- a/arch/x86/kernel/vsyscall_64.c
28573+++ b/arch/x86/kernel/vsyscall_64.c
28574@@ -38,15 +38,13 @@
28575 #define CREATE_TRACE_POINTS
28576 #include "vsyscall_trace.h"
28577
28578-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
28579+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
28580
28581 static int __init vsyscall_setup(char *str)
28582 {
28583 if (str) {
28584 if (!strcmp("emulate", str))
28585 vsyscall_mode = EMULATE;
28586- else if (!strcmp("native", str))
28587- vsyscall_mode = NATIVE;
28588 else if (!strcmp("none", str))
28589 vsyscall_mode = NONE;
28590 else
28591@@ -264,8 +262,7 @@ do_ret:
28592 return true;
28593
28594 sigsegv:
28595- force_sig(SIGSEGV, current);
28596- return true;
28597+ do_group_exit(SIGKILL);
28598 }
28599
28600 /*
28601@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
28602 static struct vm_area_struct gate_vma = {
28603 .vm_start = VSYSCALL_ADDR,
28604 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
28605- .vm_page_prot = PAGE_READONLY_EXEC,
28606- .vm_flags = VM_READ | VM_EXEC,
28607+ .vm_page_prot = PAGE_READONLY,
28608+ .vm_flags = VM_READ,
28609 .vm_ops = &gate_vma_ops,
28610 };
28611
28612@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
28613 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
28614
28615 if (vsyscall_mode != NONE)
28616- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
28617- vsyscall_mode == NATIVE
28618- ? PAGE_KERNEL_VSYSCALL
28619- : PAGE_KERNEL_VVAR);
28620+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
28621
28622 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
28623 (unsigned long)VSYSCALL_ADDR);
28624diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
28625index 04068192..4d75aa6 100644
28626--- a/arch/x86/kernel/x8664_ksyms_64.c
28627+++ b/arch/x86/kernel/x8664_ksyms_64.c
28628@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
28629 EXPORT_SYMBOL(copy_user_generic_unrolled);
28630 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
28631 EXPORT_SYMBOL(__copy_user_nocache);
28632-EXPORT_SYMBOL(_copy_from_user);
28633-EXPORT_SYMBOL(_copy_to_user);
28634
28635 EXPORT_SYMBOL(copy_page);
28636 EXPORT_SYMBOL(clear_page);
28637@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
28638 EXPORT_SYMBOL(___preempt_schedule_context);
28639 #endif
28640 #endif
28641+
28642+#ifdef CONFIG_PAX_PER_CPU_PGD
28643+EXPORT_SYMBOL(cpu_pgd);
28644+#endif
28645diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
28646index 234b072..b7ab191 100644
28647--- a/arch/x86/kernel/x86_init.c
28648+++ b/arch/x86/kernel/x86_init.c
28649@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
28650 static void default_nmi_init(void) { };
28651 static int default_i8042_detect(void) { return 1; };
28652
28653-struct x86_platform_ops x86_platform = {
28654+struct x86_platform_ops x86_platform __read_only = {
28655 .calibrate_tsc = native_calibrate_tsc,
28656 .get_wallclock = mach_get_cmos_time,
28657 .set_wallclock = mach_set_rtc_mmss,
28658@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
28659 EXPORT_SYMBOL_GPL(x86_platform);
28660
28661 #if defined(CONFIG_PCI_MSI)
28662-struct x86_msi_ops x86_msi = {
28663+struct x86_msi_ops x86_msi __read_only = {
28664 .setup_msi_irqs = native_setup_msi_irqs,
28665 .compose_msi_msg = native_compose_msi_msg,
28666 .teardown_msi_irq = native_teardown_msi_irq,
28667@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
28668 }
28669 #endif
28670
28671-struct x86_io_apic_ops x86_io_apic_ops = {
28672+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
28673 .init = native_io_apic_init_mappings,
28674 .read = native_io_apic_read,
28675 .write = native_io_apic_write,
28676diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
28677index 8be1e17..07dd990 100644
28678--- a/arch/x86/kernel/xsave.c
28679+++ b/arch/x86/kernel/xsave.c
28680@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28681
28682 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
28683 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
28684- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28685+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
28686
28687 if (!use_xsave())
28688 return err;
28689
28690- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
28691+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
28692
28693 /*
28694 * Read the xstate_bv which we copied (directly from the cpu or
28695 * from the state in task struct) to the user buffers.
28696 */
28697- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28698+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28699
28700 /*
28701 * For legacy compatible, we always set FP/SSE bits in the bit
28702@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
28703 */
28704 xstate_bv |= XSTATE_FPSSE;
28705
28706- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
28707+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
28708
28709 return err;
28710 }
28711@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
28712 {
28713 int err;
28714
28715+ buf = (struct xsave_struct __user *)____m(buf);
28716 if (use_xsave())
28717 err = xsave_user(buf);
28718 else if (use_fxsr())
28719@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
28720 */
28721 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
28722 {
28723+ buf = (void __user *)____m(buf);
28724 if (use_xsave()) {
28725 if ((unsigned long)buf % 64 || fx_only) {
28726 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
28727diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
28728index 8a80737..bac4961 100644
28729--- a/arch/x86/kvm/cpuid.c
28730+++ b/arch/x86/kvm/cpuid.c
28731@@ -182,15 +182,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
28732 struct kvm_cpuid2 *cpuid,
28733 struct kvm_cpuid_entry2 __user *entries)
28734 {
28735- int r;
28736+ int r, i;
28737
28738 r = -E2BIG;
28739 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
28740 goto out;
28741 r = -EFAULT;
28742- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
28743- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28744+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
28745 goto out;
28746+ for (i = 0; i < cpuid->nent; ++i) {
28747+ struct kvm_cpuid_entry2 cpuid_entry;
28748+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
28749+ goto out;
28750+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
28751+ }
28752 vcpu->arch.cpuid_nent = cpuid->nent;
28753 kvm_apic_set_version(vcpu);
28754 kvm_x86_ops->cpuid_update(vcpu);
28755@@ -203,15 +208,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
28756 struct kvm_cpuid2 *cpuid,
28757 struct kvm_cpuid_entry2 __user *entries)
28758 {
28759- int r;
28760+ int r, i;
28761
28762 r = -E2BIG;
28763 if (cpuid->nent < vcpu->arch.cpuid_nent)
28764 goto out;
28765 r = -EFAULT;
28766- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
28767- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28768+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
28769 goto out;
28770+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
28771+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
28772+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
28773+ goto out;
28774+ }
28775 return 0;
28776
28777 out:
28778diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
28779index b24c2d8..e1e4e259 100644
28780--- a/arch/x86/kvm/emulate.c
28781+++ b/arch/x86/kvm/emulate.c
28782@@ -3503,7 +3503,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
28783 int cr = ctxt->modrm_reg;
28784 u64 efer = 0;
28785
28786- static u64 cr_reserved_bits[] = {
28787+ static const u64 cr_reserved_bits[] = {
28788 0xffffffff00000000ULL,
28789 0, 0, 0, /* CR3 checked later */
28790 CR4_RESERVED_BITS,
28791diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
28792index d52dcf0..cec7e84 100644
28793--- a/arch/x86/kvm/lapic.c
28794+++ b/arch/x86/kvm/lapic.c
28795@@ -55,7 +55,7 @@
28796 #define APIC_BUS_CYCLE_NS 1
28797
28798 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
28799-#define apic_debug(fmt, arg...)
28800+#define apic_debug(fmt, arg...) do {} while (0)
28801
28802 #define APIC_LVT_NUM 6
28803 /* 14 is the version for Xeon and Pentium 8.4.8*/
28804diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
28805index fd49c86..77e1aa0 100644
28806--- a/arch/x86/kvm/paging_tmpl.h
28807+++ b/arch/x86/kvm/paging_tmpl.h
28808@@ -343,7 +343,7 @@ retry_walk:
28809 if (unlikely(kvm_is_error_hva(host_addr)))
28810 goto error;
28811
28812- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
28813+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
28814 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
28815 goto error;
28816 walker->ptep_user[walker->level - 1] = ptep_user;
28817diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
28818index 41dd038..de331cf 100644
28819--- a/arch/x86/kvm/svm.c
28820+++ b/arch/x86/kvm/svm.c
28821@@ -3568,7 +3568,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
28822 int cpu = raw_smp_processor_id();
28823
28824 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
28825+
28826+ pax_open_kernel();
28827 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
28828+ pax_close_kernel();
28829+
28830 load_TR_desc();
28831 }
28832
28833@@ -3969,6 +3973,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
28834 #endif
28835 #endif
28836
28837+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28838+ __set_fs(current_thread_info()->addr_limit);
28839+#endif
28840+
28841 reload_tss(vcpu);
28842
28843 local_irq_disable();
28844diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
28845index d4c58d8..eaf2568 100644
28846--- a/arch/x86/kvm/vmx.c
28847+++ b/arch/x86/kvm/vmx.c
28848@@ -1380,12 +1380,12 @@ static void vmcs_write64(unsigned long field, u64 value)
28849 #endif
28850 }
28851
28852-static void vmcs_clear_bits(unsigned long field, u32 mask)
28853+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
28854 {
28855 vmcs_writel(field, vmcs_readl(field) & ~mask);
28856 }
28857
28858-static void vmcs_set_bits(unsigned long field, u32 mask)
28859+static void vmcs_set_bits(unsigned long field, unsigned long mask)
28860 {
28861 vmcs_writel(field, vmcs_readl(field) | mask);
28862 }
28863@@ -1645,7 +1645,11 @@ static void reload_tss(void)
28864 struct desc_struct *descs;
28865
28866 descs = (void *)gdt->address;
28867+
28868+ pax_open_kernel();
28869 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
28870+ pax_close_kernel();
28871+
28872 load_TR_desc();
28873 }
28874
28875@@ -1881,6 +1885,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
28876 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
28877 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
28878
28879+#ifdef CONFIG_PAX_PER_CPU_PGD
28880+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28881+#endif
28882+
28883 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
28884 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
28885 vmx->loaded_vmcs->cpu = cpu;
28886@@ -2170,7 +2178,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
28887 * reads and returns guest's timestamp counter "register"
28888 * guest_tsc = host_tsc + tsc_offset -- 21.3
28889 */
28890-static u64 guest_read_tsc(void)
28891+static u64 __intentional_overflow(-1) guest_read_tsc(void)
28892 {
28893 u64 host_tsc, tsc_offset;
28894
28895@@ -4252,7 +4260,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28896 unsigned long cr4;
28897
28898 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
28899+
28900+#ifndef CONFIG_PAX_PER_CPU_PGD
28901 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
28902+#endif
28903
28904 /* Save the most likely value for this task's CR4 in the VMCS. */
28905 cr4 = read_cr4();
28906@@ -4279,7 +4290,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
28907 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
28908 vmx->host_idt_base = dt.address;
28909
28910- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
28911+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
28912
28913 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
28914 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
28915@@ -5876,11 +5887,16 @@ static __init int hardware_setup(void)
28916 * page upon invalidation. No need to do anything if the
28917 * processor does not have the APIC_ACCESS_ADDR VMCS field.
28918 */
28919- kvm_x86_ops->set_apic_access_page_addr = NULL;
28920+ pax_open_kernel();
28921+ *(void **)&kvm_x86_ops->set_apic_access_page_addr = NULL;
28922+ pax_close_kernel();
28923 }
28924
28925- if (!cpu_has_vmx_tpr_shadow())
28926- kvm_x86_ops->update_cr8_intercept = NULL;
28927+ if (!cpu_has_vmx_tpr_shadow()) {
28928+ pax_open_kernel();
28929+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28930+ pax_close_kernel();
28931+ }
28932
28933 if (enable_ept && !cpu_has_vmx_ept_2m_page())
28934 kvm_disable_largepages();
28935@@ -5891,13 +5907,15 @@ static __init int hardware_setup(void)
28936 if (!cpu_has_vmx_apicv())
28937 enable_apicv = 0;
28938
28939+ pax_open_kernel();
28940 if (enable_apicv)
28941- kvm_x86_ops->update_cr8_intercept = NULL;
28942+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
28943 else {
28944- kvm_x86_ops->hwapic_irr_update = NULL;
28945- kvm_x86_ops->deliver_posted_interrupt = NULL;
28946- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28947+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
28948+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
28949+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
28950 }
28951+ pax_close_kernel();
28952
28953 if (nested)
28954 nested_vmx_setup_ctls_msrs();
28955@@ -7846,6 +7864,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28956 "jmp 2f \n\t"
28957 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
28958 "2: "
28959+
28960+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28961+ "ljmp %[cs],$3f\n\t"
28962+ "3: "
28963+#endif
28964+
28965 /* Save guest registers, load host registers, keep flags */
28966 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
28967 "pop %0 \n\t"
28968@@ -7898,6 +7922,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28969 #endif
28970 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
28971 [wordsize]"i"(sizeof(ulong))
28972+
28973+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28974+ ,[cs]"i"(__KERNEL_CS)
28975+#endif
28976+
28977 : "cc", "memory"
28978 #ifdef CONFIG_X86_64
28979 , "rax", "rbx", "rdi", "rsi"
28980@@ -7911,7 +7940,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28981 if (debugctlmsr)
28982 update_debugctlmsr(debugctlmsr);
28983
28984-#ifndef CONFIG_X86_64
28985+#ifdef CONFIG_X86_32
28986 /*
28987 * The sysexit path does not restore ds/es, so we must set them to
28988 * a reasonable value ourselves.
28989@@ -7920,8 +7949,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
28990 * may be executed in interrupt context, which saves and restore segments
28991 * around it, nullifying its effect.
28992 */
28993- loadsegment(ds, __USER_DS);
28994- loadsegment(es, __USER_DS);
28995+ loadsegment(ds, __KERNEL_DS);
28996+ loadsegment(es, __KERNEL_DS);
28997+ loadsegment(ss, __KERNEL_DS);
28998+
28999+#ifdef CONFIG_PAX_KERNEXEC
29000+ loadsegment(fs, __KERNEL_PERCPU);
29001+#endif
29002+
29003+#ifdef CONFIG_PAX_MEMORY_UDEREF
29004+ __set_fs(current_thread_info()->addr_limit);
29005+#endif
29006+
29007 #endif
29008
29009 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29010diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29011index 64d76c1..e20a4c1 100644
29012--- a/arch/x86/kvm/x86.c
29013+++ b/arch/x86/kvm/x86.c
29014@@ -1882,8 +1882,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29015 {
29016 struct kvm *kvm = vcpu->kvm;
29017 int lm = is_long_mode(vcpu);
29018- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29019- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29020+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29021+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29022 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29023 : kvm->arch.xen_hvm_config.blob_size_32;
29024 u32 page_num = data & ~PAGE_MASK;
29025@@ -2809,6 +2809,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29026 if (n < msr_list.nmsrs)
29027 goto out;
29028 r = -EFAULT;
29029+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29030+ goto out;
29031 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29032 num_msrs_to_save * sizeof(u32)))
29033 goto out;
29034@@ -5745,7 +5747,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29035 };
29036 #endif
29037
29038-int kvm_arch_init(void *opaque)
29039+int kvm_arch_init(const void *opaque)
29040 {
29041 int r;
29042 struct kvm_x86_ops *ops = opaque;
29043diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29044index c1c1544..f90c9d5 100644
29045--- a/arch/x86/lguest/boot.c
29046+++ b/arch/x86/lguest/boot.c
29047@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29048 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29049 * Launcher to reboot us.
29050 */
29051-static void lguest_restart(char *reason)
29052+static __noreturn void lguest_restart(char *reason)
29053 {
29054 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29055+ BUG();
29056 }
29057
29058 /*G:050
29059diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29060index 00933d5..3a64af9 100644
29061--- a/arch/x86/lib/atomic64_386_32.S
29062+++ b/arch/x86/lib/atomic64_386_32.S
29063@@ -48,6 +48,10 @@ BEGIN(read)
29064 movl (v), %eax
29065 movl 4(v), %edx
29066 RET_ENDP
29067+BEGIN(read_unchecked)
29068+ movl (v), %eax
29069+ movl 4(v), %edx
29070+RET_ENDP
29071 #undef v
29072
29073 #define v %esi
29074@@ -55,6 +59,10 @@ BEGIN(set)
29075 movl %ebx, (v)
29076 movl %ecx, 4(v)
29077 RET_ENDP
29078+BEGIN(set_unchecked)
29079+ movl %ebx, (v)
29080+ movl %ecx, 4(v)
29081+RET_ENDP
29082 #undef v
29083
29084 #define v %esi
29085@@ -70,6 +78,20 @@ RET_ENDP
29086 BEGIN(add)
29087 addl %eax, (v)
29088 adcl %edx, 4(v)
29089+
29090+#ifdef CONFIG_PAX_REFCOUNT
29091+ jno 0f
29092+ subl %eax, (v)
29093+ sbbl %edx, 4(v)
29094+ int $4
29095+0:
29096+ _ASM_EXTABLE(0b, 0b)
29097+#endif
29098+
29099+RET_ENDP
29100+BEGIN(add_unchecked)
29101+ addl %eax, (v)
29102+ adcl %edx, 4(v)
29103 RET_ENDP
29104 #undef v
29105
29106@@ -77,6 +99,24 @@ RET_ENDP
29107 BEGIN(add_return)
29108 addl (v), %eax
29109 adcl 4(v), %edx
29110+
29111+#ifdef CONFIG_PAX_REFCOUNT
29112+ into
29113+1234:
29114+ _ASM_EXTABLE(1234b, 2f)
29115+#endif
29116+
29117+ movl %eax, (v)
29118+ movl %edx, 4(v)
29119+
29120+#ifdef CONFIG_PAX_REFCOUNT
29121+2:
29122+#endif
29123+
29124+RET_ENDP
29125+BEGIN(add_return_unchecked)
29126+ addl (v), %eax
29127+ adcl 4(v), %edx
29128 movl %eax, (v)
29129 movl %edx, 4(v)
29130 RET_ENDP
29131@@ -86,6 +126,20 @@ RET_ENDP
29132 BEGIN(sub)
29133 subl %eax, (v)
29134 sbbl %edx, 4(v)
29135+
29136+#ifdef CONFIG_PAX_REFCOUNT
29137+ jno 0f
29138+ addl %eax, (v)
29139+ adcl %edx, 4(v)
29140+ int $4
29141+0:
29142+ _ASM_EXTABLE(0b, 0b)
29143+#endif
29144+
29145+RET_ENDP
29146+BEGIN(sub_unchecked)
29147+ subl %eax, (v)
29148+ sbbl %edx, 4(v)
29149 RET_ENDP
29150 #undef v
29151
29152@@ -96,6 +150,27 @@ BEGIN(sub_return)
29153 sbbl $0, %edx
29154 addl (v), %eax
29155 adcl 4(v), %edx
29156+
29157+#ifdef CONFIG_PAX_REFCOUNT
29158+ into
29159+1234:
29160+ _ASM_EXTABLE(1234b, 2f)
29161+#endif
29162+
29163+ movl %eax, (v)
29164+ movl %edx, 4(v)
29165+
29166+#ifdef CONFIG_PAX_REFCOUNT
29167+2:
29168+#endif
29169+
29170+RET_ENDP
29171+BEGIN(sub_return_unchecked)
29172+ negl %edx
29173+ negl %eax
29174+ sbbl $0, %edx
29175+ addl (v), %eax
29176+ adcl 4(v), %edx
29177 movl %eax, (v)
29178 movl %edx, 4(v)
29179 RET_ENDP
29180@@ -105,6 +180,20 @@ RET_ENDP
29181 BEGIN(inc)
29182 addl $1, (v)
29183 adcl $0, 4(v)
29184+
29185+#ifdef CONFIG_PAX_REFCOUNT
29186+ jno 0f
29187+ subl $1, (v)
29188+ sbbl $0, 4(v)
29189+ int $4
29190+0:
29191+ _ASM_EXTABLE(0b, 0b)
29192+#endif
29193+
29194+RET_ENDP
29195+BEGIN(inc_unchecked)
29196+ addl $1, (v)
29197+ adcl $0, 4(v)
29198 RET_ENDP
29199 #undef v
29200
29201@@ -114,6 +203,26 @@ BEGIN(inc_return)
29202 movl 4(v), %edx
29203 addl $1, %eax
29204 adcl $0, %edx
29205+
29206+#ifdef CONFIG_PAX_REFCOUNT
29207+ into
29208+1234:
29209+ _ASM_EXTABLE(1234b, 2f)
29210+#endif
29211+
29212+ movl %eax, (v)
29213+ movl %edx, 4(v)
29214+
29215+#ifdef CONFIG_PAX_REFCOUNT
29216+2:
29217+#endif
29218+
29219+RET_ENDP
29220+BEGIN(inc_return_unchecked)
29221+ movl (v), %eax
29222+ movl 4(v), %edx
29223+ addl $1, %eax
29224+ adcl $0, %edx
29225 movl %eax, (v)
29226 movl %edx, 4(v)
29227 RET_ENDP
29228@@ -123,6 +232,20 @@ RET_ENDP
29229 BEGIN(dec)
29230 subl $1, (v)
29231 sbbl $0, 4(v)
29232+
29233+#ifdef CONFIG_PAX_REFCOUNT
29234+ jno 0f
29235+ addl $1, (v)
29236+ adcl $0, 4(v)
29237+ int $4
29238+0:
29239+ _ASM_EXTABLE(0b, 0b)
29240+#endif
29241+
29242+RET_ENDP
29243+BEGIN(dec_unchecked)
29244+ subl $1, (v)
29245+ sbbl $0, 4(v)
29246 RET_ENDP
29247 #undef v
29248
29249@@ -132,6 +255,26 @@ BEGIN(dec_return)
29250 movl 4(v), %edx
29251 subl $1, %eax
29252 sbbl $0, %edx
29253+
29254+#ifdef CONFIG_PAX_REFCOUNT
29255+ into
29256+1234:
29257+ _ASM_EXTABLE(1234b, 2f)
29258+#endif
29259+
29260+ movl %eax, (v)
29261+ movl %edx, 4(v)
29262+
29263+#ifdef CONFIG_PAX_REFCOUNT
29264+2:
29265+#endif
29266+
29267+RET_ENDP
29268+BEGIN(dec_return_unchecked)
29269+ movl (v), %eax
29270+ movl 4(v), %edx
29271+ subl $1, %eax
29272+ sbbl $0, %edx
29273 movl %eax, (v)
29274 movl %edx, 4(v)
29275 RET_ENDP
29276@@ -143,6 +286,13 @@ BEGIN(add_unless)
29277 adcl %edx, %edi
29278 addl (v), %eax
29279 adcl 4(v), %edx
29280+
29281+#ifdef CONFIG_PAX_REFCOUNT
29282+ into
29283+1234:
29284+ _ASM_EXTABLE(1234b, 2f)
29285+#endif
29286+
29287 cmpl %eax, %ecx
29288 je 3f
29289 1:
29290@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29291 1:
29292 addl $1, %eax
29293 adcl $0, %edx
29294+
29295+#ifdef CONFIG_PAX_REFCOUNT
29296+ into
29297+1234:
29298+ _ASM_EXTABLE(1234b, 2f)
29299+#endif
29300+
29301 movl %eax, (v)
29302 movl %edx, 4(v)
29303 movl $1, %eax
29304@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29305 movl 4(v), %edx
29306 subl $1, %eax
29307 sbbl $0, %edx
29308+
29309+#ifdef CONFIG_PAX_REFCOUNT
29310+ into
29311+1234:
29312+ _ASM_EXTABLE(1234b, 1f)
29313+#endif
29314+
29315 js 1f
29316 movl %eax, (v)
29317 movl %edx, 4(v)
29318diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29319index f5cc9eb..51fa319 100644
29320--- a/arch/x86/lib/atomic64_cx8_32.S
29321+++ b/arch/x86/lib/atomic64_cx8_32.S
29322@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29323 CFI_STARTPROC
29324
29325 read64 %ecx
29326+ pax_force_retaddr
29327 ret
29328 CFI_ENDPROC
29329 ENDPROC(atomic64_read_cx8)
29330
29331+ENTRY(atomic64_read_unchecked_cx8)
29332+ CFI_STARTPROC
29333+
29334+ read64 %ecx
29335+ pax_force_retaddr
29336+ ret
29337+ CFI_ENDPROC
29338+ENDPROC(atomic64_read_unchecked_cx8)
29339+
29340 ENTRY(atomic64_set_cx8)
29341 CFI_STARTPROC
29342
29343@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29344 cmpxchg8b (%esi)
29345 jne 1b
29346
29347+ pax_force_retaddr
29348 ret
29349 CFI_ENDPROC
29350 ENDPROC(atomic64_set_cx8)
29351
29352+ENTRY(atomic64_set_unchecked_cx8)
29353+ CFI_STARTPROC
29354+
29355+1:
29356+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29357+ * are atomic on 586 and newer */
29358+ cmpxchg8b (%esi)
29359+ jne 1b
29360+
29361+ pax_force_retaddr
29362+ ret
29363+ CFI_ENDPROC
29364+ENDPROC(atomic64_set_unchecked_cx8)
29365+
29366 ENTRY(atomic64_xchg_cx8)
29367 CFI_STARTPROC
29368
29369@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29370 cmpxchg8b (%esi)
29371 jne 1b
29372
29373+ pax_force_retaddr
29374 ret
29375 CFI_ENDPROC
29376 ENDPROC(atomic64_xchg_cx8)
29377
29378-.macro addsub_return func ins insc
29379-ENTRY(atomic64_\func\()_return_cx8)
29380+.macro addsub_return func ins insc unchecked=""
29381+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29382 CFI_STARTPROC
29383 SAVE ebp
29384 SAVE ebx
29385@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29386 movl %edx, %ecx
29387 \ins\()l %esi, %ebx
29388 \insc\()l %edi, %ecx
29389+
29390+.ifb \unchecked
29391+#ifdef CONFIG_PAX_REFCOUNT
29392+ into
29393+2:
29394+ _ASM_EXTABLE(2b, 3f)
29395+#endif
29396+.endif
29397+
29398 LOCK_PREFIX
29399 cmpxchg8b (%ebp)
29400 jne 1b
29401-
29402-10:
29403 movl %ebx, %eax
29404 movl %ecx, %edx
29405+
29406+.ifb \unchecked
29407+#ifdef CONFIG_PAX_REFCOUNT
29408+3:
29409+#endif
29410+.endif
29411+
29412 RESTORE edi
29413 RESTORE esi
29414 RESTORE ebx
29415 RESTORE ebp
29416+ pax_force_retaddr
29417 ret
29418 CFI_ENDPROC
29419-ENDPROC(atomic64_\func\()_return_cx8)
29420+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29421 .endm
29422
29423 addsub_return add add adc
29424 addsub_return sub sub sbb
29425+addsub_return add add adc _unchecked
29426+addsub_return sub sub sbb _unchecked
29427
29428-.macro incdec_return func ins insc
29429-ENTRY(atomic64_\func\()_return_cx8)
29430+.macro incdec_return func ins insc unchecked=""
29431+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29432 CFI_STARTPROC
29433 SAVE ebx
29434
29435@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29436 movl %edx, %ecx
29437 \ins\()l $1, %ebx
29438 \insc\()l $0, %ecx
29439+
29440+.ifb \unchecked
29441+#ifdef CONFIG_PAX_REFCOUNT
29442+ into
29443+2:
29444+ _ASM_EXTABLE(2b, 3f)
29445+#endif
29446+.endif
29447+
29448 LOCK_PREFIX
29449 cmpxchg8b (%esi)
29450 jne 1b
29451
29452-10:
29453 movl %ebx, %eax
29454 movl %ecx, %edx
29455+
29456+.ifb \unchecked
29457+#ifdef CONFIG_PAX_REFCOUNT
29458+3:
29459+#endif
29460+.endif
29461+
29462 RESTORE ebx
29463+ pax_force_retaddr
29464 ret
29465 CFI_ENDPROC
29466-ENDPROC(atomic64_\func\()_return_cx8)
29467+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29468 .endm
29469
29470 incdec_return inc add adc
29471 incdec_return dec sub sbb
29472+incdec_return inc add adc _unchecked
29473+incdec_return dec sub sbb _unchecked
29474
29475 ENTRY(atomic64_dec_if_positive_cx8)
29476 CFI_STARTPROC
29477@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29478 movl %edx, %ecx
29479 subl $1, %ebx
29480 sbb $0, %ecx
29481+
29482+#ifdef CONFIG_PAX_REFCOUNT
29483+ into
29484+1234:
29485+ _ASM_EXTABLE(1234b, 2f)
29486+#endif
29487+
29488 js 2f
29489 LOCK_PREFIX
29490 cmpxchg8b (%esi)
29491@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
29492 movl %ebx, %eax
29493 movl %ecx, %edx
29494 RESTORE ebx
29495+ pax_force_retaddr
29496 ret
29497 CFI_ENDPROC
29498 ENDPROC(atomic64_dec_if_positive_cx8)
29499@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
29500 movl %edx, %ecx
29501 addl %ebp, %ebx
29502 adcl %edi, %ecx
29503+
29504+#ifdef CONFIG_PAX_REFCOUNT
29505+ into
29506+1234:
29507+ _ASM_EXTABLE(1234b, 3f)
29508+#endif
29509+
29510 LOCK_PREFIX
29511 cmpxchg8b (%esi)
29512 jne 1b
29513@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
29514 CFI_ADJUST_CFA_OFFSET -8
29515 RESTORE ebx
29516 RESTORE ebp
29517+ pax_force_retaddr
29518 ret
29519 4:
29520 cmpl %edx, 4(%esp)
29521@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
29522 xorl %ecx, %ecx
29523 addl $1, %ebx
29524 adcl %edx, %ecx
29525+
29526+#ifdef CONFIG_PAX_REFCOUNT
29527+ into
29528+1234:
29529+ _ASM_EXTABLE(1234b, 3f)
29530+#endif
29531+
29532 LOCK_PREFIX
29533 cmpxchg8b (%esi)
29534 jne 1b
29535@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
29536 movl $1, %eax
29537 3:
29538 RESTORE ebx
29539+ pax_force_retaddr
29540 ret
29541 CFI_ENDPROC
29542 ENDPROC(atomic64_inc_not_zero_cx8)
29543diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
29544index e78b8eee..7e173a8 100644
29545--- a/arch/x86/lib/checksum_32.S
29546+++ b/arch/x86/lib/checksum_32.S
29547@@ -29,7 +29,8 @@
29548 #include <asm/dwarf2.h>
29549 #include <asm/errno.h>
29550 #include <asm/asm.h>
29551-
29552+#include <asm/segment.h>
29553+
29554 /*
29555 * computes a partial checksum, e.g. for TCP/UDP fragments
29556 */
29557@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
29558
29559 #define ARGBASE 16
29560 #define FP 12
29561-
29562-ENTRY(csum_partial_copy_generic)
29563+
29564+ENTRY(csum_partial_copy_generic_to_user)
29565 CFI_STARTPROC
29566+
29567+#ifdef CONFIG_PAX_MEMORY_UDEREF
29568+ pushl_cfi %gs
29569+ popl_cfi %es
29570+ jmp csum_partial_copy_generic
29571+#endif
29572+
29573+ENTRY(csum_partial_copy_generic_from_user)
29574+
29575+#ifdef CONFIG_PAX_MEMORY_UDEREF
29576+ pushl_cfi %gs
29577+ popl_cfi %ds
29578+#endif
29579+
29580+ENTRY(csum_partial_copy_generic)
29581 subl $4,%esp
29582 CFI_ADJUST_CFA_OFFSET 4
29583 pushl_cfi %edi
29584@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
29585 jmp 4f
29586 SRC(1: movw (%esi), %bx )
29587 addl $2, %esi
29588-DST( movw %bx, (%edi) )
29589+DST( movw %bx, %es:(%edi) )
29590 addl $2, %edi
29591 addw %bx, %ax
29592 adcl $0, %eax
29593@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
29594 SRC(1: movl (%esi), %ebx )
29595 SRC( movl 4(%esi), %edx )
29596 adcl %ebx, %eax
29597-DST( movl %ebx, (%edi) )
29598+DST( movl %ebx, %es:(%edi) )
29599 adcl %edx, %eax
29600-DST( movl %edx, 4(%edi) )
29601+DST( movl %edx, %es:4(%edi) )
29602
29603 SRC( movl 8(%esi), %ebx )
29604 SRC( movl 12(%esi), %edx )
29605 adcl %ebx, %eax
29606-DST( movl %ebx, 8(%edi) )
29607+DST( movl %ebx, %es:8(%edi) )
29608 adcl %edx, %eax
29609-DST( movl %edx, 12(%edi) )
29610+DST( movl %edx, %es:12(%edi) )
29611
29612 SRC( movl 16(%esi), %ebx )
29613 SRC( movl 20(%esi), %edx )
29614 adcl %ebx, %eax
29615-DST( movl %ebx, 16(%edi) )
29616+DST( movl %ebx, %es:16(%edi) )
29617 adcl %edx, %eax
29618-DST( movl %edx, 20(%edi) )
29619+DST( movl %edx, %es:20(%edi) )
29620
29621 SRC( movl 24(%esi), %ebx )
29622 SRC( movl 28(%esi), %edx )
29623 adcl %ebx, %eax
29624-DST( movl %ebx, 24(%edi) )
29625+DST( movl %ebx, %es:24(%edi) )
29626 adcl %edx, %eax
29627-DST( movl %edx, 28(%edi) )
29628+DST( movl %edx, %es:28(%edi) )
29629
29630 lea 32(%esi), %esi
29631 lea 32(%edi), %edi
29632@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
29633 shrl $2, %edx # This clears CF
29634 SRC(3: movl (%esi), %ebx )
29635 adcl %ebx, %eax
29636-DST( movl %ebx, (%edi) )
29637+DST( movl %ebx, %es:(%edi) )
29638 lea 4(%esi), %esi
29639 lea 4(%edi), %edi
29640 dec %edx
29641@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
29642 jb 5f
29643 SRC( movw (%esi), %cx )
29644 leal 2(%esi), %esi
29645-DST( movw %cx, (%edi) )
29646+DST( movw %cx, %es:(%edi) )
29647 leal 2(%edi), %edi
29648 je 6f
29649 shll $16,%ecx
29650 SRC(5: movb (%esi), %cl )
29651-DST( movb %cl, (%edi) )
29652+DST( movb %cl, %es:(%edi) )
29653 6: addl %ecx, %eax
29654 adcl $0, %eax
29655 7:
29656@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
29657
29658 6001:
29659 movl ARGBASE+20(%esp), %ebx # src_err_ptr
29660- movl $-EFAULT, (%ebx)
29661+ movl $-EFAULT, %ss:(%ebx)
29662
29663 # zero the complete destination - computing the rest
29664 # is too much work
29665@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
29666
29667 6002:
29668 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29669- movl $-EFAULT,(%ebx)
29670+ movl $-EFAULT,%ss:(%ebx)
29671 jmp 5000b
29672
29673 .previous
29674
29675+ pushl_cfi %ss
29676+ popl_cfi %ds
29677+ pushl_cfi %ss
29678+ popl_cfi %es
29679 popl_cfi %ebx
29680 CFI_RESTORE ebx
29681 popl_cfi %esi
29682@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
29683 popl_cfi %ecx # equivalent to addl $4,%esp
29684 ret
29685 CFI_ENDPROC
29686-ENDPROC(csum_partial_copy_generic)
29687+ENDPROC(csum_partial_copy_generic_to_user)
29688
29689 #else
29690
29691 /* Version for PentiumII/PPro */
29692
29693 #define ROUND1(x) \
29694+ nop; nop; nop; \
29695 SRC(movl x(%esi), %ebx ) ; \
29696 addl %ebx, %eax ; \
29697- DST(movl %ebx, x(%edi) ) ;
29698+ DST(movl %ebx, %es:x(%edi)) ;
29699
29700 #define ROUND(x) \
29701+ nop; nop; nop; \
29702 SRC(movl x(%esi), %ebx ) ; \
29703 adcl %ebx, %eax ; \
29704- DST(movl %ebx, x(%edi) ) ;
29705+ DST(movl %ebx, %es:x(%edi)) ;
29706
29707 #define ARGBASE 12
29708-
29709-ENTRY(csum_partial_copy_generic)
29710+
29711+ENTRY(csum_partial_copy_generic_to_user)
29712 CFI_STARTPROC
29713+
29714+#ifdef CONFIG_PAX_MEMORY_UDEREF
29715+ pushl_cfi %gs
29716+ popl_cfi %es
29717+ jmp csum_partial_copy_generic
29718+#endif
29719+
29720+ENTRY(csum_partial_copy_generic_from_user)
29721+
29722+#ifdef CONFIG_PAX_MEMORY_UDEREF
29723+ pushl_cfi %gs
29724+ popl_cfi %ds
29725+#endif
29726+
29727+ENTRY(csum_partial_copy_generic)
29728 pushl_cfi %ebx
29729 CFI_REL_OFFSET ebx, 0
29730 pushl_cfi %edi
29731@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
29732 subl %ebx, %edi
29733 lea -1(%esi),%edx
29734 andl $-32,%edx
29735- lea 3f(%ebx,%ebx), %ebx
29736+ lea 3f(%ebx,%ebx,2), %ebx
29737 testl %esi, %esi
29738 jmp *%ebx
29739 1: addl $64,%esi
29740@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
29741 jb 5f
29742 SRC( movw (%esi), %dx )
29743 leal 2(%esi), %esi
29744-DST( movw %dx, (%edi) )
29745+DST( movw %dx, %es:(%edi) )
29746 leal 2(%edi), %edi
29747 je 6f
29748 shll $16,%edx
29749 5:
29750 SRC( movb (%esi), %dl )
29751-DST( movb %dl, (%edi) )
29752+DST( movb %dl, %es:(%edi) )
29753 6: addl %edx, %eax
29754 adcl $0, %eax
29755 7:
29756 .section .fixup, "ax"
29757 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
29758- movl $-EFAULT, (%ebx)
29759+ movl $-EFAULT, %ss:(%ebx)
29760 # zero the complete destination (computing the rest is too much work)
29761 movl ARGBASE+8(%esp),%edi # dst
29762 movl ARGBASE+12(%esp),%ecx # len
29763@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
29764 rep; stosb
29765 jmp 7b
29766 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
29767- movl $-EFAULT, (%ebx)
29768+ movl $-EFAULT, %ss:(%ebx)
29769 jmp 7b
29770 .previous
29771
29772+#ifdef CONFIG_PAX_MEMORY_UDEREF
29773+ pushl_cfi %ss
29774+ popl_cfi %ds
29775+ pushl_cfi %ss
29776+ popl_cfi %es
29777+#endif
29778+
29779 popl_cfi %esi
29780 CFI_RESTORE esi
29781 popl_cfi %edi
29782@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
29783 CFI_RESTORE ebx
29784 ret
29785 CFI_ENDPROC
29786-ENDPROC(csum_partial_copy_generic)
29787+ENDPROC(csum_partial_copy_generic_to_user)
29788
29789 #undef ROUND
29790 #undef ROUND1
29791diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
29792index f2145cf..cea889d 100644
29793--- a/arch/x86/lib/clear_page_64.S
29794+++ b/arch/x86/lib/clear_page_64.S
29795@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
29796 movl $4096/8,%ecx
29797 xorl %eax,%eax
29798 rep stosq
29799+ pax_force_retaddr
29800 ret
29801 CFI_ENDPROC
29802 ENDPROC(clear_page_c)
29803@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
29804 movl $4096,%ecx
29805 xorl %eax,%eax
29806 rep stosb
29807+ pax_force_retaddr
29808 ret
29809 CFI_ENDPROC
29810 ENDPROC(clear_page_c_e)
29811@@ -43,6 +45,7 @@ ENTRY(clear_page)
29812 leaq 64(%rdi),%rdi
29813 jnz .Lloop
29814 nop
29815+ pax_force_retaddr
29816 ret
29817 CFI_ENDPROC
29818 .Lclear_page_end:
29819@@ -58,7 +61,7 @@ ENDPROC(clear_page)
29820
29821 #include <asm/cpufeature.h>
29822
29823- .section .altinstr_replacement,"ax"
29824+ .section .altinstr_replacement,"a"
29825 1: .byte 0xeb /* jmp <disp8> */
29826 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
29827 2: .byte 0xeb /* jmp <disp8> */
29828diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
29829index 40a1725..5d12ac4 100644
29830--- a/arch/x86/lib/cmpxchg16b_emu.S
29831+++ b/arch/x86/lib/cmpxchg16b_emu.S
29832@@ -8,6 +8,7 @@
29833 #include <linux/linkage.h>
29834 #include <asm/dwarf2.h>
29835 #include <asm/percpu.h>
29836+#include <asm/alternative-asm.h>
29837
29838 .text
29839
29840@@ -46,12 +47,14 @@ CFI_STARTPROC
29841 CFI_REMEMBER_STATE
29842 popfq_cfi
29843 mov $1, %al
29844+ pax_force_retaddr
29845 ret
29846
29847 CFI_RESTORE_STATE
29848 .Lnot_same:
29849 popfq_cfi
29850 xor %al,%al
29851+ pax_force_retaddr
29852 ret
29853
29854 CFI_ENDPROC
29855diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
29856index 176cca6..e0d658e 100644
29857--- a/arch/x86/lib/copy_page_64.S
29858+++ b/arch/x86/lib/copy_page_64.S
29859@@ -9,6 +9,7 @@ copy_page_rep:
29860 CFI_STARTPROC
29861 movl $4096/8, %ecx
29862 rep movsq
29863+ pax_force_retaddr
29864 ret
29865 CFI_ENDPROC
29866 ENDPROC(copy_page_rep)
29867@@ -24,8 +25,8 @@ ENTRY(copy_page)
29868 CFI_ADJUST_CFA_OFFSET 2*8
29869 movq %rbx, (%rsp)
29870 CFI_REL_OFFSET rbx, 0
29871- movq %r12, 1*8(%rsp)
29872- CFI_REL_OFFSET r12, 1*8
29873+ movq %r13, 1*8(%rsp)
29874+ CFI_REL_OFFSET r13, 1*8
29875
29876 movl $(4096/64)-5, %ecx
29877 .p2align 4
29878@@ -38,7 +39,7 @@ ENTRY(copy_page)
29879 movq 0x8*4(%rsi), %r9
29880 movq 0x8*5(%rsi), %r10
29881 movq 0x8*6(%rsi), %r11
29882- movq 0x8*7(%rsi), %r12
29883+ movq 0x8*7(%rsi), %r13
29884
29885 prefetcht0 5*64(%rsi)
29886
29887@@ -49,7 +50,7 @@ ENTRY(copy_page)
29888 movq %r9, 0x8*4(%rdi)
29889 movq %r10, 0x8*5(%rdi)
29890 movq %r11, 0x8*6(%rdi)
29891- movq %r12, 0x8*7(%rdi)
29892+ movq %r13, 0x8*7(%rdi)
29893
29894 leaq 64 (%rsi), %rsi
29895 leaq 64 (%rdi), %rdi
29896@@ -68,7 +69,7 @@ ENTRY(copy_page)
29897 movq 0x8*4(%rsi), %r9
29898 movq 0x8*5(%rsi), %r10
29899 movq 0x8*6(%rsi), %r11
29900- movq 0x8*7(%rsi), %r12
29901+ movq 0x8*7(%rsi), %r13
29902
29903 movq %rax, 0x8*0(%rdi)
29904 movq %rbx, 0x8*1(%rdi)
29905@@ -77,7 +78,7 @@ ENTRY(copy_page)
29906 movq %r9, 0x8*4(%rdi)
29907 movq %r10, 0x8*5(%rdi)
29908 movq %r11, 0x8*6(%rdi)
29909- movq %r12, 0x8*7(%rdi)
29910+ movq %r13, 0x8*7(%rdi)
29911
29912 leaq 64(%rdi), %rdi
29913 leaq 64(%rsi), %rsi
29914@@ -85,10 +86,11 @@ ENTRY(copy_page)
29915
29916 movq (%rsp), %rbx
29917 CFI_RESTORE rbx
29918- movq 1*8(%rsp), %r12
29919- CFI_RESTORE r12
29920+ movq 1*8(%rsp), %r13
29921+ CFI_RESTORE r13
29922 addq $2*8, %rsp
29923 CFI_ADJUST_CFA_OFFSET -2*8
29924+ pax_force_retaddr
29925 ret
29926 .Lcopy_page_end:
29927 CFI_ENDPROC
29928@@ -99,7 +101,7 @@ ENDPROC(copy_page)
29929
29930 #include <asm/cpufeature.h>
29931
29932- .section .altinstr_replacement,"ax"
29933+ .section .altinstr_replacement,"a"
29934 1: .byte 0xeb /* jmp <disp8> */
29935 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
29936 2:
29937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
29938index dee945d..a84067b 100644
29939--- a/arch/x86/lib/copy_user_64.S
29940+++ b/arch/x86/lib/copy_user_64.S
29941@@ -18,31 +18,7 @@
29942 #include <asm/alternative-asm.h>
29943 #include <asm/asm.h>
29944 #include <asm/smap.h>
29945-
29946-/*
29947- * By placing feature2 after feature1 in altinstructions section, we logically
29948- * implement:
29949- * If CPU has feature2, jmp to alt2 is used
29950- * else if CPU has feature1, jmp to alt1 is used
29951- * else jmp to orig is used.
29952- */
29953- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
29954-0:
29955- .byte 0xe9 /* 32bit jump */
29956- .long \orig-1f /* by default jump to orig */
29957-1:
29958- .section .altinstr_replacement,"ax"
29959-2: .byte 0xe9 /* near jump with 32bit immediate */
29960- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
29961-3: .byte 0xe9 /* near jump with 32bit immediate */
29962- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
29963- .previous
29964-
29965- .section .altinstructions,"a"
29966- altinstruction_entry 0b,2b,\feature1,5,5
29967- altinstruction_entry 0b,3b,\feature2,5,5
29968- .previous
29969- .endm
29970+#include <asm/pgtable.h>
29971
29972 .macro ALIGN_DESTINATION
29973 #ifdef FIX_ALIGNMENT
29974@@ -70,52 +46,6 @@
29975 #endif
29976 .endm
29977
29978-/* Standard copy_to_user with segment limit checking */
29979-ENTRY(_copy_to_user)
29980- CFI_STARTPROC
29981- GET_THREAD_INFO(%rax)
29982- movq %rdi,%rcx
29983- addq %rdx,%rcx
29984- jc bad_to_user
29985- cmpq TI_addr_limit(%rax),%rcx
29986- ja bad_to_user
29987- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
29988- copy_user_generic_unrolled,copy_user_generic_string, \
29989- copy_user_enhanced_fast_string
29990- CFI_ENDPROC
29991-ENDPROC(_copy_to_user)
29992-
29993-/* Standard copy_from_user with segment limit checking */
29994-ENTRY(_copy_from_user)
29995- CFI_STARTPROC
29996- GET_THREAD_INFO(%rax)
29997- movq %rsi,%rcx
29998- addq %rdx,%rcx
29999- jc bad_from_user
30000- cmpq TI_addr_limit(%rax),%rcx
30001- ja bad_from_user
30002- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30003- copy_user_generic_unrolled,copy_user_generic_string, \
30004- copy_user_enhanced_fast_string
30005- CFI_ENDPROC
30006-ENDPROC(_copy_from_user)
30007-
30008- .section .fixup,"ax"
30009- /* must zero dest */
30010-ENTRY(bad_from_user)
30011-bad_from_user:
30012- CFI_STARTPROC
30013- movl %edx,%ecx
30014- xorl %eax,%eax
30015- rep
30016- stosb
30017-bad_to_user:
30018- movl %edx,%eax
30019- ret
30020- CFI_ENDPROC
30021-ENDPROC(bad_from_user)
30022- .previous
30023-
30024 /*
30025 * copy_user_generic_unrolled - memory copy with exception handling.
30026 * This version is for CPUs like P4 that don't have efficient micro
30027@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30028 */
30029 ENTRY(copy_user_generic_unrolled)
30030 CFI_STARTPROC
30031+ ASM_PAX_OPEN_USERLAND
30032 ASM_STAC
30033 cmpl $8,%edx
30034 jb 20f /* less then 8 bytes, go to byte copy loop */
30035@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30036 jnz 21b
30037 23: xor %eax,%eax
30038 ASM_CLAC
30039+ ASM_PAX_CLOSE_USERLAND
30040+ pax_force_retaddr
30041 ret
30042
30043 .section .fixup,"ax"
30044@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30045 */
30046 ENTRY(copy_user_generic_string)
30047 CFI_STARTPROC
30048+ ASM_PAX_OPEN_USERLAND
30049 ASM_STAC
30050 cmpl $8,%edx
30051 jb 2f /* less than 8 bytes, go to byte copy loop */
30052@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30053 movsb
30054 xorl %eax,%eax
30055 ASM_CLAC
30056+ ASM_PAX_CLOSE_USERLAND
30057+ pax_force_retaddr
30058 ret
30059
30060 .section .fixup,"ax"
30061@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30062 */
30063 ENTRY(copy_user_enhanced_fast_string)
30064 CFI_STARTPROC
30065+ ASM_PAX_OPEN_USERLAND
30066 ASM_STAC
30067 movl %edx,%ecx
30068 1: rep
30069 movsb
30070 xorl %eax,%eax
30071 ASM_CLAC
30072+ ASM_PAX_CLOSE_USERLAND
30073+ pax_force_retaddr
30074 ret
30075
30076 .section .fixup,"ax"
30077diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30078index 6a4f43c..c70fb52 100644
30079--- a/arch/x86/lib/copy_user_nocache_64.S
30080+++ b/arch/x86/lib/copy_user_nocache_64.S
30081@@ -8,6 +8,7 @@
30082
30083 #include <linux/linkage.h>
30084 #include <asm/dwarf2.h>
30085+#include <asm/alternative-asm.h>
30086
30087 #define FIX_ALIGNMENT 1
30088
30089@@ -16,6 +17,7 @@
30090 #include <asm/thread_info.h>
30091 #include <asm/asm.h>
30092 #include <asm/smap.h>
30093+#include <asm/pgtable.h>
30094
30095 .macro ALIGN_DESTINATION
30096 #ifdef FIX_ALIGNMENT
30097@@ -49,6 +51,16 @@
30098 */
30099 ENTRY(__copy_user_nocache)
30100 CFI_STARTPROC
30101+
30102+#ifdef CONFIG_PAX_MEMORY_UDEREF
30103+ mov pax_user_shadow_base,%rcx
30104+ cmp %rcx,%rsi
30105+ jae 1f
30106+ add %rcx,%rsi
30107+1:
30108+#endif
30109+
30110+ ASM_PAX_OPEN_USERLAND
30111 ASM_STAC
30112 cmpl $8,%edx
30113 jb 20f /* less then 8 bytes, go to byte copy loop */
30114@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30115 jnz 21b
30116 23: xorl %eax,%eax
30117 ASM_CLAC
30118+ ASM_PAX_CLOSE_USERLAND
30119 sfence
30120+ pax_force_retaddr
30121 ret
30122
30123 .section .fixup,"ax"
30124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30125index 2419d5f..fe52d0e 100644
30126--- a/arch/x86/lib/csum-copy_64.S
30127+++ b/arch/x86/lib/csum-copy_64.S
30128@@ -9,6 +9,7 @@
30129 #include <asm/dwarf2.h>
30130 #include <asm/errno.h>
30131 #include <asm/asm.h>
30132+#include <asm/alternative-asm.h>
30133
30134 /*
30135 * Checksum copy with exception handling.
30136@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30137 CFI_ADJUST_CFA_OFFSET 7*8
30138 movq %rbx, 2*8(%rsp)
30139 CFI_REL_OFFSET rbx, 2*8
30140- movq %r12, 3*8(%rsp)
30141- CFI_REL_OFFSET r12, 3*8
30142+ movq %r15, 3*8(%rsp)
30143+ CFI_REL_OFFSET r15, 3*8
30144 movq %r14, 4*8(%rsp)
30145 CFI_REL_OFFSET r14, 4*8
30146 movq %r13, 5*8(%rsp)
30147@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30148 movl %edx, %ecx
30149
30150 xorl %r9d, %r9d
30151- movq %rcx, %r12
30152+ movq %rcx, %r15
30153
30154- shrq $6, %r12
30155+ shrq $6, %r15
30156 jz .Lhandle_tail /* < 64 */
30157
30158 clc
30159
30160 /* main loop. clear in 64 byte blocks */
30161 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30162- /* r11: temp3, rdx: temp4, r12 loopcnt */
30163+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30164 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30165 .p2align 4
30166 .Lloop:
30167@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30168 adcq %r14, %rax
30169 adcq %r13, %rax
30170
30171- decl %r12d
30172+ decl %r15d
30173
30174 dest
30175 movq %rbx, (%rsi)
30176@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30177 .Lende:
30178 movq 2*8(%rsp), %rbx
30179 CFI_RESTORE rbx
30180- movq 3*8(%rsp), %r12
30181- CFI_RESTORE r12
30182+ movq 3*8(%rsp), %r15
30183+ CFI_RESTORE r15
30184 movq 4*8(%rsp), %r14
30185 CFI_RESTORE r14
30186 movq 5*8(%rsp), %r13
30187@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30188 CFI_RESTORE rbp
30189 addq $7*8, %rsp
30190 CFI_ADJUST_CFA_OFFSET -7*8
30191+ pax_force_retaddr
30192 ret
30193 CFI_RESTORE_STATE
30194
30195diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30196index 1318f75..44c30fd 100644
30197--- a/arch/x86/lib/csum-wrappers_64.c
30198+++ b/arch/x86/lib/csum-wrappers_64.c
30199@@ -52,10 +52,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30200 len -= 2;
30201 }
30202 }
30203+ pax_open_userland();
30204 stac();
30205- isum = csum_partial_copy_generic((__force const void *)src,
30206+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30207 dst, len, isum, errp, NULL);
30208 clac();
30209+ pax_close_userland();
30210 if (unlikely(*errp))
30211 goto out_err;
30212
30213@@ -109,10 +111,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30214 }
30215
30216 *errp = 0;
30217+ pax_open_userland();
30218 stac();
30219- ret = csum_partial_copy_generic(src, (void __force *)dst,
30220+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30221 len, isum, NULL, errp);
30222 clac();
30223+ pax_close_userland();
30224 return ret;
30225 }
30226 EXPORT_SYMBOL(csum_partial_copy_to_user);
30227diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30228index a451235..a74bfa3 100644
30229--- a/arch/x86/lib/getuser.S
30230+++ b/arch/x86/lib/getuser.S
30231@@ -33,17 +33,40 @@
30232 #include <asm/thread_info.h>
30233 #include <asm/asm.h>
30234 #include <asm/smap.h>
30235+#include <asm/segment.h>
30236+#include <asm/pgtable.h>
30237+#include <asm/alternative-asm.h>
30238+
30239+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30240+#define __copyuser_seg gs;
30241+#else
30242+#define __copyuser_seg
30243+#endif
30244
30245 .text
30246 ENTRY(__get_user_1)
30247 CFI_STARTPROC
30248+
30249+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30250 GET_THREAD_INFO(%_ASM_DX)
30251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30252 jae bad_get_user
30253+
30254+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30255+ mov pax_user_shadow_base,%_ASM_DX
30256+ cmp %_ASM_DX,%_ASM_AX
30257+ jae 1234f
30258+ add %_ASM_DX,%_ASM_AX
30259+1234:
30260+#endif
30261+
30262+#endif
30263+
30264 ASM_STAC
30265-1: movzbl (%_ASM_AX),%edx
30266+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30267 xor %eax,%eax
30268 ASM_CLAC
30269+ pax_force_retaddr
30270 ret
30271 CFI_ENDPROC
30272 ENDPROC(__get_user_1)
30273@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30274 ENTRY(__get_user_2)
30275 CFI_STARTPROC
30276 add $1,%_ASM_AX
30277+
30278+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30279 jc bad_get_user
30280 GET_THREAD_INFO(%_ASM_DX)
30281 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30282 jae bad_get_user
30283+
30284+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30285+ mov pax_user_shadow_base,%_ASM_DX
30286+ cmp %_ASM_DX,%_ASM_AX
30287+ jae 1234f
30288+ add %_ASM_DX,%_ASM_AX
30289+1234:
30290+#endif
30291+
30292+#endif
30293+
30294 ASM_STAC
30295-2: movzwl -1(%_ASM_AX),%edx
30296+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30297 xor %eax,%eax
30298 ASM_CLAC
30299+ pax_force_retaddr
30300 ret
30301 CFI_ENDPROC
30302 ENDPROC(__get_user_2)
30303@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30304 ENTRY(__get_user_4)
30305 CFI_STARTPROC
30306 add $3,%_ASM_AX
30307+
30308+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30309 jc bad_get_user
30310 GET_THREAD_INFO(%_ASM_DX)
30311 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30312 jae bad_get_user
30313+
30314+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30315+ mov pax_user_shadow_base,%_ASM_DX
30316+ cmp %_ASM_DX,%_ASM_AX
30317+ jae 1234f
30318+ add %_ASM_DX,%_ASM_AX
30319+1234:
30320+#endif
30321+
30322+#endif
30323+
30324 ASM_STAC
30325-3: movl -3(%_ASM_AX),%edx
30326+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30327 xor %eax,%eax
30328 ASM_CLAC
30329+ pax_force_retaddr
30330 ret
30331 CFI_ENDPROC
30332 ENDPROC(__get_user_4)
30333@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30334 GET_THREAD_INFO(%_ASM_DX)
30335 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30336 jae bad_get_user
30337+
30338+#ifdef CONFIG_PAX_MEMORY_UDEREF
30339+ mov pax_user_shadow_base,%_ASM_DX
30340+ cmp %_ASM_DX,%_ASM_AX
30341+ jae 1234f
30342+ add %_ASM_DX,%_ASM_AX
30343+1234:
30344+#endif
30345+
30346 ASM_STAC
30347 4: movq -7(%_ASM_AX),%rdx
30348 xor %eax,%eax
30349 ASM_CLAC
30350+ pax_force_retaddr
30351 ret
30352 #else
30353 add $7,%_ASM_AX
30354@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30355 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30356 jae bad_get_user_8
30357 ASM_STAC
30358-4: movl -7(%_ASM_AX),%edx
30359-5: movl -3(%_ASM_AX),%ecx
30360+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30361+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30362 xor %eax,%eax
30363 ASM_CLAC
30364+ pax_force_retaddr
30365 ret
30366 #endif
30367 CFI_ENDPROC
30368@@ -113,6 +175,7 @@ bad_get_user:
30369 xor %edx,%edx
30370 mov $(-EFAULT),%_ASM_AX
30371 ASM_CLAC
30372+ pax_force_retaddr
30373 ret
30374 CFI_ENDPROC
30375 END(bad_get_user)
30376@@ -124,6 +187,7 @@ bad_get_user_8:
30377 xor %ecx,%ecx
30378 mov $(-EFAULT),%_ASM_AX
30379 ASM_CLAC
30380+ pax_force_retaddr
30381 ret
30382 CFI_ENDPROC
30383 END(bad_get_user_8)
30384diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30385index 1313ae6..84f25ea 100644
30386--- a/arch/x86/lib/insn.c
30387+++ b/arch/x86/lib/insn.c
30388@@ -20,8 +20,10 @@
30389
30390 #ifdef __KERNEL__
30391 #include <linux/string.h>
30392+#include <asm/pgtable_types.h>
30393 #else
30394 #include <string.h>
30395+#define ktla_ktva(addr) addr
30396 #endif
30397 #include <asm/inat.h>
30398 #include <asm/insn.h>
30399@@ -53,9 +55,9 @@
30400 void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
30401 {
30402 memset(insn, 0, sizeof(*insn));
30403- insn->kaddr = kaddr;
30404- insn->end_kaddr = kaddr + buf_len;
30405- insn->next_byte = kaddr;
30406+ insn->kaddr = ktla_ktva(kaddr);
30407+ insn->end_kaddr = insn->kaddr + buf_len;
30408+ insn->next_byte = insn->kaddr;
30409 insn->x86_64 = x86_64 ? 1 : 0;
30410 insn->opnd_bytes = 4;
30411 if (x86_64)
30412diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30413index 05a95e7..326f2fa 100644
30414--- a/arch/x86/lib/iomap_copy_64.S
30415+++ b/arch/x86/lib/iomap_copy_64.S
30416@@ -17,6 +17,7 @@
30417
30418 #include <linux/linkage.h>
30419 #include <asm/dwarf2.h>
30420+#include <asm/alternative-asm.h>
30421
30422 /*
30423 * override generic version in lib/iomap_copy.c
30424@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30425 CFI_STARTPROC
30426 movl %edx,%ecx
30427 rep movsd
30428+ pax_force_retaddr
30429 ret
30430 CFI_ENDPROC
30431 ENDPROC(__iowrite32_copy)
30432diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30433index 56313a3..0db417e 100644
30434--- a/arch/x86/lib/memcpy_64.S
30435+++ b/arch/x86/lib/memcpy_64.S
30436@@ -24,7 +24,7 @@
30437 * This gets patched over the unrolled variant (below) via the
30438 * alternative instructions framework:
30439 */
30440- .section .altinstr_replacement, "ax", @progbits
30441+ .section .altinstr_replacement, "a", @progbits
30442 .Lmemcpy_c:
30443 movq %rdi, %rax
30444 movq %rdx, %rcx
30445@@ -33,6 +33,7 @@
30446 rep movsq
30447 movl %edx, %ecx
30448 rep movsb
30449+ pax_force_retaddr
30450 ret
30451 .Lmemcpy_e:
30452 .previous
30453@@ -44,11 +45,12 @@
30454 * This gets patched over the unrolled variant (below) via the
30455 * alternative instructions framework:
30456 */
30457- .section .altinstr_replacement, "ax", @progbits
30458+ .section .altinstr_replacement, "a", @progbits
30459 .Lmemcpy_c_e:
30460 movq %rdi, %rax
30461 movq %rdx, %rcx
30462 rep movsb
30463+ pax_force_retaddr
30464 ret
30465 .Lmemcpy_e_e:
30466 .previous
30467@@ -136,6 +138,7 @@ ENTRY(memcpy)
30468 movq %r9, 1*8(%rdi)
30469 movq %r10, -2*8(%rdi, %rdx)
30470 movq %r11, -1*8(%rdi, %rdx)
30471+ pax_force_retaddr
30472 retq
30473 .p2align 4
30474 .Lless_16bytes:
30475@@ -148,6 +151,7 @@ ENTRY(memcpy)
30476 movq -1*8(%rsi, %rdx), %r9
30477 movq %r8, 0*8(%rdi)
30478 movq %r9, -1*8(%rdi, %rdx)
30479+ pax_force_retaddr
30480 retq
30481 .p2align 4
30482 .Lless_8bytes:
30483@@ -161,6 +165,7 @@ ENTRY(memcpy)
30484 movl -4(%rsi, %rdx), %r8d
30485 movl %ecx, (%rdi)
30486 movl %r8d, -4(%rdi, %rdx)
30487+ pax_force_retaddr
30488 retq
30489 .p2align 4
30490 .Lless_3bytes:
30491@@ -179,6 +184,7 @@ ENTRY(memcpy)
30492 movb %cl, (%rdi)
30493
30494 .Lend:
30495+ pax_force_retaddr
30496 retq
30497 CFI_ENDPROC
30498 ENDPROC(memcpy)
30499diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
30500index 65268a6..dd1de11 100644
30501--- a/arch/x86/lib/memmove_64.S
30502+++ b/arch/x86/lib/memmove_64.S
30503@@ -202,14 +202,16 @@ ENTRY(memmove)
30504 movb (%rsi), %r11b
30505 movb %r11b, (%rdi)
30506 13:
30507+ pax_force_retaddr
30508 retq
30509 CFI_ENDPROC
30510
30511- .section .altinstr_replacement,"ax"
30512+ .section .altinstr_replacement,"a"
30513 .Lmemmove_begin_forward_efs:
30514 /* Forward moving data. */
30515 movq %rdx, %rcx
30516 rep movsb
30517+ pax_force_retaddr
30518 retq
30519 .Lmemmove_end_forward_efs:
30520 .previous
30521diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
30522index 2dcb380..2eb79fe 100644
30523--- a/arch/x86/lib/memset_64.S
30524+++ b/arch/x86/lib/memset_64.S
30525@@ -16,7 +16,7 @@
30526 *
30527 * rax original destination
30528 */
30529- .section .altinstr_replacement, "ax", @progbits
30530+ .section .altinstr_replacement, "a", @progbits
30531 .Lmemset_c:
30532 movq %rdi,%r9
30533 movq %rdx,%rcx
30534@@ -30,6 +30,7 @@
30535 movl %edx,%ecx
30536 rep stosb
30537 movq %r9,%rax
30538+ pax_force_retaddr
30539 ret
30540 .Lmemset_e:
30541 .previous
30542@@ -45,13 +46,14 @@
30543 *
30544 * rax original destination
30545 */
30546- .section .altinstr_replacement, "ax", @progbits
30547+ .section .altinstr_replacement, "a", @progbits
30548 .Lmemset_c_e:
30549 movq %rdi,%r9
30550 movb %sil,%al
30551 movq %rdx,%rcx
30552 rep stosb
30553 movq %r9,%rax
30554+ pax_force_retaddr
30555 ret
30556 .Lmemset_e_e:
30557 .previous
30558@@ -118,6 +120,7 @@ ENTRY(__memset)
30559
30560 .Lende:
30561 movq %r10,%rax
30562+ pax_force_retaddr
30563 ret
30564
30565 CFI_RESTORE_STATE
30566diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
30567index c9f2d9b..e7fd2c0 100644
30568--- a/arch/x86/lib/mmx_32.c
30569+++ b/arch/x86/lib/mmx_32.c
30570@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30571 {
30572 void *p;
30573 int i;
30574+ unsigned long cr0;
30575
30576 if (unlikely(in_interrupt()))
30577 return __memcpy(to, from, len);
30578@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
30579 kernel_fpu_begin();
30580
30581 __asm__ __volatile__ (
30582- "1: prefetch (%0)\n" /* This set is 28 bytes */
30583- " prefetch 64(%0)\n"
30584- " prefetch 128(%0)\n"
30585- " prefetch 192(%0)\n"
30586- " prefetch 256(%0)\n"
30587+ "1: prefetch (%1)\n" /* This set is 28 bytes */
30588+ " prefetch 64(%1)\n"
30589+ " prefetch 128(%1)\n"
30590+ " prefetch 192(%1)\n"
30591+ " prefetch 256(%1)\n"
30592 "2: \n"
30593 ".section .fixup, \"ax\"\n"
30594- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30595+ "3: \n"
30596+
30597+#ifdef CONFIG_PAX_KERNEXEC
30598+ " movl %%cr0, %0\n"
30599+ " movl %0, %%eax\n"
30600+ " andl $0xFFFEFFFF, %%eax\n"
30601+ " movl %%eax, %%cr0\n"
30602+#endif
30603+
30604+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30605+
30606+#ifdef CONFIG_PAX_KERNEXEC
30607+ " movl %0, %%cr0\n"
30608+#endif
30609+
30610 " jmp 2b\n"
30611 ".previous\n"
30612 _ASM_EXTABLE(1b, 3b)
30613- : : "r" (from));
30614+ : "=&r" (cr0) : "r" (from) : "ax");
30615
30616 for ( ; i > 5; i--) {
30617 __asm__ __volatile__ (
30618- "1: prefetch 320(%0)\n"
30619- "2: movq (%0), %%mm0\n"
30620- " movq 8(%0), %%mm1\n"
30621- " movq 16(%0), %%mm2\n"
30622- " movq 24(%0), %%mm3\n"
30623- " movq %%mm0, (%1)\n"
30624- " movq %%mm1, 8(%1)\n"
30625- " movq %%mm2, 16(%1)\n"
30626- " movq %%mm3, 24(%1)\n"
30627- " movq 32(%0), %%mm0\n"
30628- " movq 40(%0), %%mm1\n"
30629- " movq 48(%0), %%mm2\n"
30630- " movq 56(%0), %%mm3\n"
30631- " movq %%mm0, 32(%1)\n"
30632- " movq %%mm1, 40(%1)\n"
30633- " movq %%mm2, 48(%1)\n"
30634- " movq %%mm3, 56(%1)\n"
30635+ "1: prefetch 320(%1)\n"
30636+ "2: movq (%1), %%mm0\n"
30637+ " movq 8(%1), %%mm1\n"
30638+ " movq 16(%1), %%mm2\n"
30639+ " movq 24(%1), %%mm3\n"
30640+ " movq %%mm0, (%2)\n"
30641+ " movq %%mm1, 8(%2)\n"
30642+ " movq %%mm2, 16(%2)\n"
30643+ " movq %%mm3, 24(%2)\n"
30644+ " movq 32(%1), %%mm0\n"
30645+ " movq 40(%1), %%mm1\n"
30646+ " movq 48(%1), %%mm2\n"
30647+ " movq 56(%1), %%mm3\n"
30648+ " movq %%mm0, 32(%2)\n"
30649+ " movq %%mm1, 40(%2)\n"
30650+ " movq %%mm2, 48(%2)\n"
30651+ " movq %%mm3, 56(%2)\n"
30652 ".section .fixup, \"ax\"\n"
30653- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30654+ "3:\n"
30655+
30656+#ifdef CONFIG_PAX_KERNEXEC
30657+ " movl %%cr0, %0\n"
30658+ " movl %0, %%eax\n"
30659+ " andl $0xFFFEFFFF, %%eax\n"
30660+ " movl %%eax, %%cr0\n"
30661+#endif
30662+
30663+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30664+
30665+#ifdef CONFIG_PAX_KERNEXEC
30666+ " movl %0, %%cr0\n"
30667+#endif
30668+
30669 " jmp 2b\n"
30670 ".previous\n"
30671 _ASM_EXTABLE(1b, 3b)
30672- : : "r" (from), "r" (to) : "memory");
30673+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30674
30675 from += 64;
30676 to += 64;
30677@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
30678 static void fast_copy_page(void *to, void *from)
30679 {
30680 int i;
30681+ unsigned long cr0;
30682
30683 kernel_fpu_begin();
30684
30685@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
30686 * but that is for later. -AV
30687 */
30688 __asm__ __volatile__(
30689- "1: prefetch (%0)\n"
30690- " prefetch 64(%0)\n"
30691- " prefetch 128(%0)\n"
30692- " prefetch 192(%0)\n"
30693- " prefetch 256(%0)\n"
30694+ "1: prefetch (%1)\n"
30695+ " prefetch 64(%1)\n"
30696+ " prefetch 128(%1)\n"
30697+ " prefetch 192(%1)\n"
30698+ " prefetch 256(%1)\n"
30699 "2: \n"
30700 ".section .fixup, \"ax\"\n"
30701- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30702+ "3: \n"
30703+
30704+#ifdef CONFIG_PAX_KERNEXEC
30705+ " movl %%cr0, %0\n"
30706+ " movl %0, %%eax\n"
30707+ " andl $0xFFFEFFFF, %%eax\n"
30708+ " movl %%eax, %%cr0\n"
30709+#endif
30710+
30711+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30712+
30713+#ifdef CONFIG_PAX_KERNEXEC
30714+ " movl %0, %%cr0\n"
30715+#endif
30716+
30717 " jmp 2b\n"
30718 ".previous\n"
30719- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30720+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30721
30722 for (i = 0; i < (4096-320)/64; i++) {
30723 __asm__ __volatile__ (
30724- "1: prefetch 320(%0)\n"
30725- "2: movq (%0), %%mm0\n"
30726- " movntq %%mm0, (%1)\n"
30727- " movq 8(%0), %%mm1\n"
30728- " movntq %%mm1, 8(%1)\n"
30729- " movq 16(%0), %%mm2\n"
30730- " movntq %%mm2, 16(%1)\n"
30731- " movq 24(%0), %%mm3\n"
30732- " movntq %%mm3, 24(%1)\n"
30733- " movq 32(%0), %%mm4\n"
30734- " movntq %%mm4, 32(%1)\n"
30735- " movq 40(%0), %%mm5\n"
30736- " movntq %%mm5, 40(%1)\n"
30737- " movq 48(%0), %%mm6\n"
30738- " movntq %%mm6, 48(%1)\n"
30739- " movq 56(%0), %%mm7\n"
30740- " movntq %%mm7, 56(%1)\n"
30741+ "1: prefetch 320(%1)\n"
30742+ "2: movq (%1), %%mm0\n"
30743+ " movntq %%mm0, (%2)\n"
30744+ " movq 8(%1), %%mm1\n"
30745+ " movntq %%mm1, 8(%2)\n"
30746+ " movq 16(%1), %%mm2\n"
30747+ " movntq %%mm2, 16(%2)\n"
30748+ " movq 24(%1), %%mm3\n"
30749+ " movntq %%mm3, 24(%2)\n"
30750+ " movq 32(%1), %%mm4\n"
30751+ " movntq %%mm4, 32(%2)\n"
30752+ " movq 40(%1), %%mm5\n"
30753+ " movntq %%mm5, 40(%2)\n"
30754+ " movq 48(%1), %%mm6\n"
30755+ " movntq %%mm6, 48(%2)\n"
30756+ " movq 56(%1), %%mm7\n"
30757+ " movntq %%mm7, 56(%2)\n"
30758 ".section .fixup, \"ax\"\n"
30759- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30760+ "3:\n"
30761+
30762+#ifdef CONFIG_PAX_KERNEXEC
30763+ " movl %%cr0, %0\n"
30764+ " movl %0, %%eax\n"
30765+ " andl $0xFFFEFFFF, %%eax\n"
30766+ " movl %%eax, %%cr0\n"
30767+#endif
30768+
30769+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30770+
30771+#ifdef CONFIG_PAX_KERNEXEC
30772+ " movl %0, %%cr0\n"
30773+#endif
30774+
30775 " jmp 2b\n"
30776 ".previous\n"
30777- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
30778+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30779
30780 from += 64;
30781 to += 64;
30782@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
30783 static void fast_copy_page(void *to, void *from)
30784 {
30785 int i;
30786+ unsigned long cr0;
30787
30788 kernel_fpu_begin();
30789
30790 __asm__ __volatile__ (
30791- "1: prefetch (%0)\n"
30792- " prefetch 64(%0)\n"
30793- " prefetch 128(%0)\n"
30794- " prefetch 192(%0)\n"
30795- " prefetch 256(%0)\n"
30796+ "1: prefetch (%1)\n"
30797+ " prefetch 64(%1)\n"
30798+ " prefetch 128(%1)\n"
30799+ " prefetch 192(%1)\n"
30800+ " prefetch 256(%1)\n"
30801 "2: \n"
30802 ".section .fixup, \"ax\"\n"
30803- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30804+ "3: \n"
30805+
30806+#ifdef CONFIG_PAX_KERNEXEC
30807+ " movl %%cr0, %0\n"
30808+ " movl %0, %%eax\n"
30809+ " andl $0xFFFEFFFF, %%eax\n"
30810+ " movl %%eax, %%cr0\n"
30811+#endif
30812+
30813+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
30814+
30815+#ifdef CONFIG_PAX_KERNEXEC
30816+ " movl %0, %%cr0\n"
30817+#endif
30818+
30819 " jmp 2b\n"
30820 ".previous\n"
30821- _ASM_EXTABLE(1b, 3b) : : "r" (from));
30822+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
30823
30824 for (i = 0; i < 4096/64; i++) {
30825 __asm__ __volatile__ (
30826- "1: prefetch 320(%0)\n"
30827- "2: movq (%0), %%mm0\n"
30828- " movq 8(%0), %%mm1\n"
30829- " movq 16(%0), %%mm2\n"
30830- " movq 24(%0), %%mm3\n"
30831- " movq %%mm0, (%1)\n"
30832- " movq %%mm1, 8(%1)\n"
30833- " movq %%mm2, 16(%1)\n"
30834- " movq %%mm3, 24(%1)\n"
30835- " movq 32(%0), %%mm0\n"
30836- " movq 40(%0), %%mm1\n"
30837- " movq 48(%0), %%mm2\n"
30838- " movq 56(%0), %%mm3\n"
30839- " movq %%mm0, 32(%1)\n"
30840- " movq %%mm1, 40(%1)\n"
30841- " movq %%mm2, 48(%1)\n"
30842- " movq %%mm3, 56(%1)\n"
30843+ "1: prefetch 320(%1)\n"
30844+ "2: movq (%1), %%mm0\n"
30845+ " movq 8(%1), %%mm1\n"
30846+ " movq 16(%1), %%mm2\n"
30847+ " movq 24(%1), %%mm3\n"
30848+ " movq %%mm0, (%2)\n"
30849+ " movq %%mm1, 8(%2)\n"
30850+ " movq %%mm2, 16(%2)\n"
30851+ " movq %%mm3, 24(%2)\n"
30852+ " movq 32(%1), %%mm0\n"
30853+ " movq 40(%1), %%mm1\n"
30854+ " movq 48(%1), %%mm2\n"
30855+ " movq 56(%1), %%mm3\n"
30856+ " movq %%mm0, 32(%2)\n"
30857+ " movq %%mm1, 40(%2)\n"
30858+ " movq %%mm2, 48(%2)\n"
30859+ " movq %%mm3, 56(%2)\n"
30860 ".section .fixup, \"ax\"\n"
30861- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30862+ "3:\n"
30863+
30864+#ifdef CONFIG_PAX_KERNEXEC
30865+ " movl %%cr0, %0\n"
30866+ " movl %0, %%eax\n"
30867+ " andl $0xFFFEFFFF, %%eax\n"
30868+ " movl %%eax, %%cr0\n"
30869+#endif
30870+
30871+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
30872+
30873+#ifdef CONFIG_PAX_KERNEXEC
30874+ " movl %0, %%cr0\n"
30875+#endif
30876+
30877 " jmp 2b\n"
30878 ".previous\n"
30879 _ASM_EXTABLE(1b, 3b)
30880- : : "r" (from), "r" (to) : "memory");
30881+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
30882
30883 from += 64;
30884 to += 64;
30885diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
30886index f6d13ee..d789440 100644
30887--- a/arch/x86/lib/msr-reg.S
30888+++ b/arch/x86/lib/msr-reg.S
30889@@ -3,6 +3,7 @@
30890 #include <asm/dwarf2.h>
30891 #include <asm/asm.h>
30892 #include <asm/msr.h>
30893+#include <asm/alternative-asm.h>
30894
30895 #ifdef CONFIG_X86_64
30896 /*
30897@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
30898 movl %edi, 28(%r10)
30899 popq_cfi %rbp
30900 popq_cfi %rbx
30901+ pax_force_retaddr
30902 ret
30903 3:
30904 CFI_RESTORE_STATE
30905diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
30906index fc6ba17..14ad9a5 100644
30907--- a/arch/x86/lib/putuser.S
30908+++ b/arch/x86/lib/putuser.S
30909@@ -16,7 +16,9 @@
30910 #include <asm/errno.h>
30911 #include <asm/asm.h>
30912 #include <asm/smap.h>
30913-
30914+#include <asm/segment.h>
30915+#include <asm/pgtable.h>
30916+#include <asm/alternative-asm.h>
30917
30918 /*
30919 * __put_user_X
30920@@ -30,57 +32,125 @@
30921 * as they get called from within inline assembly.
30922 */
30923
30924-#define ENTER CFI_STARTPROC ; \
30925- GET_THREAD_INFO(%_ASM_BX)
30926-#define EXIT ASM_CLAC ; \
30927- ret ; \
30928+#define ENTER CFI_STARTPROC
30929+#define EXIT ASM_CLAC ; \
30930+ pax_force_retaddr ; \
30931+ ret ; \
30932 CFI_ENDPROC
30933
30934+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30935+#define _DEST %_ASM_CX,%_ASM_BX
30936+#else
30937+#define _DEST %_ASM_CX
30938+#endif
30939+
30940+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30941+#define __copyuser_seg gs;
30942+#else
30943+#define __copyuser_seg
30944+#endif
30945+
30946 .text
30947 ENTRY(__put_user_1)
30948 ENTER
30949+
30950+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30951+ GET_THREAD_INFO(%_ASM_BX)
30952 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
30953 jae bad_put_user
30954+
30955+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30956+ mov pax_user_shadow_base,%_ASM_BX
30957+ cmp %_ASM_BX,%_ASM_CX
30958+ jb 1234f
30959+ xor %ebx,%ebx
30960+1234:
30961+#endif
30962+
30963+#endif
30964+
30965 ASM_STAC
30966-1: movb %al,(%_ASM_CX)
30967+1: __copyuser_seg movb %al,(_DEST)
30968 xor %eax,%eax
30969 EXIT
30970 ENDPROC(__put_user_1)
30971
30972 ENTRY(__put_user_2)
30973 ENTER
30974+
30975+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30976+ GET_THREAD_INFO(%_ASM_BX)
30977 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
30978 sub $1,%_ASM_BX
30979 cmp %_ASM_BX,%_ASM_CX
30980 jae bad_put_user
30981+
30982+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30983+ mov pax_user_shadow_base,%_ASM_BX
30984+ cmp %_ASM_BX,%_ASM_CX
30985+ jb 1234f
30986+ xor %ebx,%ebx
30987+1234:
30988+#endif
30989+
30990+#endif
30991+
30992 ASM_STAC
30993-2: movw %ax,(%_ASM_CX)
30994+2: __copyuser_seg movw %ax,(_DEST)
30995 xor %eax,%eax
30996 EXIT
30997 ENDPROC(__put_user_2)
30998
30999 ENTRY(__put_user_4)
31000 ENTER
31001+
31002+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31003+ GET_THREAD_INFO(%_ASM_BX)
31004 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31005 sub $3,%_ASM_BX
31006 cmp %_ASM_BX,%_ASM_CX
31007 jae bad_put_user
31008+
31009+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31010+ mov pax_user_shadow_base,%_ASM_BX
31011+ cmp %_ASM_BX,%_ASM_CX
31012+ jb 1234f
31013+ xor %ebx,%ebx
31014+1234:
31015+#endif
31016+
31017+#endif
31018+
31019 ASM_STAC
31020-3: movl %eax,(%_ASM_CX)
31021+3: __copyuser_seg movl %eax,(_DEST)
31022 xor %eax,%eax
31023 EXIT
31024 ENDPROC(__put_user_4)
31025
31026 ENTRY(__put_user_8)
31027 ENTER
31028+
31029+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31030+ GET_THREAD_INFO(%_ASM_BX)
31031 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31032 sub $7,%_ASM_BX
31033 cmp %_ASM_BX,%_ASM_CX
31034 jae bad_put_user
31035+
31036+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31037+ mov pax_user_shadow_base,%_ASM_BX
31038+ cmp %_ASM_BX,%_ASM_CX
31039+ jb 1234f
31040+ xor %ebx,%ebx
31041+1234:
31042+#endif
31043+
31044+#endif
31045+
31046 ASM_STAC
31047-4: mov %_ASM_AX,(%_ASM_CX)
31048+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31049 #ifdef CONFIG_X86_32
31050-5: movl %edx,4(%_ASM_CX)
31051+5: __copyuser_seg movl %edx,4(_DEST)
31052 #endif
31053 xor %eax,%eax
31054 EXIT
31055diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31056index 5dff5f0..cadebf4 100644
31057--- a/arch/x86/lib/rwsem.S
31058+++ b/arch/x86/lib/rwsem.S
31059@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31060 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31061 CFI_RESTORE __ASM_REG(dx)
31062 restore_common_regs
31063+ pax_force_retaddr
31064 ret
31065 CFI_ENDPROC
31066 ENDPROC(call_rwsem_down_read_failed)
31067@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31068 movq %rax,%rdi
31069 call rwsem_down_write_failed
31070 restore_common_regs
31071+ pax_force_retaddr
31072 ret
31073 CFI_ENDPROC
31074 ENDPROC(call_rwsem_down_write_failed)
31075@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31076 movq %rax,%rdi
31077 call rwsem_wake
31078 restore_common_regs
31079-1: ret
31080+1: pax_force_retaddr
31081+ ret
31082 CFI_ENDPROC
31083 ENDPROC(call_rwsem_wake)
31084
31085@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31086 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31087 CFI_RESTORE __ASM_REG(dx)
31088 restore_common_regs
31089+ pax_force_retaddr
31090 ret
31091 CFI_ENDPROC
31092 ENDPROC(call_rwsem_downgrade_wake)
31093diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31094index b30b5eb..2b57052 100644
31095--- a/arch/x86/lib/thunk_64.S
31096+++ b/arch/x86/lib/thunk_64.S
31097@@ -9,6 +9,7 @@
31098 #include <asm/dwarf2.h>
31099 #include <asm/calling.h>
31100 #include <asm/asm.h>
31101+#include <asm/alternative-asm.h>
31102
31103 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31104 .macro THUNK name, func, put_ret_addr_in_rdi=0
31105@@ -16,11 +17,11 @@
31106 \name:
31107 CFI_STARTPROC
31108
31109- /* this one pushes 9 elems, the next one would be %rIP */
31110- SAVE_ARGS
31111+ /* this one pushes 15+1 elems, the next one would be %rIP */
31112+ SAVE_ARGS 8
31113
31114 .if \put_ret_addr_in_rdi
31115- movq_cfi_restore 9*8, rdi
31116+ movq_cfi_restore RIP, rdi
31117 .endif
31118
31119 call \func
31120@@ -47,9 +48,10 @@
31121
31122 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31123 CFI_STARTPROC
31124- SAVE_ARGS
31125+ SAVE_ARGS 8
31126 restore:
31127- RESTORE_ARGS
31128+ RESTORE_ARGS 1,8
31129+ pax_force_retaddr
31130 ret
31131 CFI_ENDPROC
31132 _ASM_NOKPROBE(restore)
31133diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31134index e2f5e21..4b22130 100644
31135--- a/arch/x86/lib/usercopy_32.c
31136+++ b/arch/x86/lib/usercopy_32.c
31137@@ -42,11 +42,13 @@ do { \
31138 int __d0; \
31139 might_fault(); \
31140 __asm__ __volatile__( \
31141+ __COPYUSER_SET_ES \
31142 ASM_STAC "\n" \
31143 "0: rep; stosl\n" \
31144 " movl %2,%0\n" \
31145 "1: rep; stosb\n" \
31146 "2: " ASM_CLAC "\n" \
31147+ __COPYUSER_RESTORE_ES \
31148 ".section .fixup,\"ax\"\n" \
31149 "3: lea 0(%2,%0,4),%0\n" \
31150 " jmp 2b\n" \
31151@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31152
31153 #ifdef CONFIG_X86_INTEL_USERCOPY
31154 static unsigned long
31155-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31156+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31157 {
31158 int d0, d1;
31159 __asm__ __volatile__(
31160@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31161 " .align 2,0x90\n"
31162 "3: movl 0(%4), %%eax\n"
31163 "4: movl 4(%4), %%edx\n"
31164- "5: movl %%eax, 0(%3)\n"
31165- "6: movl %%edx, 4(%3)\n"
31166+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31167+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31168 "7: movl 8(%4), %%eax\n"
31169 "8: movl 12(%4),%%edx\n"
31170- "9: movl %%eax, 8(%3)\n"
31171- "10: movl %%edx, 12(%3)\n"
31172+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31173+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31174 "11: movl 16(%4), %%eax\n"
31175 "12: movl 20(%4), %%edx\n"
31176- "13: movl %%eax, 16(%3)\n"
31177- "14: movl %%edx, 20(%3)\n"
31178+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31179+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31180 "15: movl 24(%4), %%eax\n"
31181 "16: movl 28(%4), %%edx\n"
31182- "17: movl %%eax, 24(%3)\n"
31183- "18: movl %%edx, 28(%3)\n"
31184+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31185+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31186 "19: movl 32(%4), %%eax\n"
31187 "20: movl 36(%4), %%edx\n"
31188- "21: movl %%eax, 32(%3)\n"
31189- "22: movl %%edx, 36(%3)\n"
31190+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31191+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31192 "23: movl 40(%4), %%eax\n"
31193 "24: movl 44(%4), %%edx\n"
31194- "25: movl %%eax, 40(%3)\n"
31195- "26: movl %%edx, 44(%3)\n"
31196+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31197+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31198 "27: movl 48(%4), %%eax\n"
31199 "28: movl 52(%4), %%edx\n"
31200- "29: movl %%eax, 48(%3)\n"
31201- "30: movl %%edx, 52(%3)\n"
31202+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31203+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31204 "31: movl 56(%4), %%eax\n"
31205 "32: movl 60(%4), %%edx\n"
31206- "33: movl %%eax, 56(%3)\n"
31207- "34: movl %%edx, 60(%3)\n"
31208+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31209+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31210 " addl $-64, %0\n"
31211 " addl $64, %4\n"
31212 " addl $64, %3\n"
31213@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31214 " shrl $2, %0\n"
31215 " andl $3, %%eax\n"
31216 " cld\n"
31217+ __COPYUSER_SET_ES
31218 "99: rep; movsl\n"
31219 "36: movl %%eax, %0\n"
31220 "37: rep; movsb\n"
31221 "100:\n"
31222+ __COPYUSER_RESTORE_ES
31223+ ".section .fixup,\"ax\"\n"
31224+ "101: lea 0(%%eax,%0,4),%0\n"
31225+ " jmp 100b\n"
31226+ ".previous\n"
31227+ _ASM_EXTABLE(1b,100b)
31228+ _ASM_EXTABLE(2b,100b)
31229+ _ASM_EXTABLE(3b,100b)
31230+ _ASM_EXTABLE(4b,100b)
31231+ _ASM_EXTABLE(5b,100b)
31232+ _ASM_EXTABLE(6b,100b)
31233+ _ASM_EXTABLE(7b,100b)
31234+ _ASM_EXTABLE(8b,100b)
31235+ _ASM_EXTABLE(9b,100b)
31236+ _ASM_EXTABLE(10b,100b)
31237+ _ASM_EXTABLE(11b,100b)
31238+ _ASM_EXTABLE(12b,100b)
31239+ _ASM_EXTABLE(13b,100b)
31240+ _ASM_EXTABLE(14b,100b)
31241+ _ASM_EXTABLE(15b,100b)
31242+ _ASM_EXTABLE(16b,100b)
31243+ _ASM_EXTABLE(17b,100b)
31244+ _ASM_EXTABLE(18b,100b)
31245+ _ASM_EXTABLE(19b,100b)
31246+ _ASM_EXTABLE(20b,100b)
31247+ _ASM_EXTABLE(21b,100b)
31248+ _ASM_EXTABLE(22b,100b)
31249+ _ASM_EXTABLE(23b,100b)
31250+ _ASM_EXTABLE(24b,100b)
31251+ _ASM_EXTABLE(25b,100b)
31252+ _ASM_EXTABLE(26b,100b)
31253+ _ASM_EXTABLE(27b,100b)
31254+ _ASM_EXTABLE(28b,100b)
31255+ _ASM_EXTABLE(29b,100b)
31256+ _ASM_EXTABLE(30b,100b)
31257+ _ASM_EXTABLE(31b,100b)
31258+ _ASM_EXTABLE(32b,100b)
31259+ _ASM_EXTABLE(33b,100b)
31260+ _ASM_EXTABLE(34b,100b)
31261+ _ASM_EXTABLE(35b,100b)
31262+ _ASM_EXTABLE(36b,100b)
31263+ _ASM_EXTABLE(37b,100b)
31264+ _ASM_EXTABLE(99b,101b)
31265+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31266+ : "1"(to), "2"(from), "0"(size)
31267+ : "eax", "edx", "memory");
31268+ return size;
31269+}
31270+
31271+static unsigned long
31272+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31273+{
31274+ int d0, d1;
31275+ __asm__ __volatile__(
31276+ " .align 2,0x90\n"
31277+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31278+ " cmpl $67, %0\n"
31279+ " jbe 3f\n"
31280+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31281+ " .align 2,0x90\n"
31282+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31283+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31284+ "5: movl %%eax, 0(%3)\n"
31285+ "6: movl %%edx, 4(%3)\n"
31286+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31287+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31288+ "9: movl %%eax, 8(%3)\n"
31289+ "10: movl %%edx, 12(%3)\n"
31290+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31291+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31292+ "13: movl %%eax, 16(%3)\n"
31293+ "14: movl %%edx, 20(%3)\n"
31294+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31295+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31296+ "17: movl %%eax, 24(%3)\n"
31297+ "18: movl %%edx, 28(%3)\n"
31298+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31299+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31300+ "21: movl %%eax, 32(%3)\n"
31301+ "22: movl %%edx, 36(%3)\n"
31302+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31303+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31304+ "25: movl %%eax, 40(%3)\n"
31305+ "26: movl %%edx, 44(%3)\n"
31306+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31307+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31308+ "29: movl %%eax, 48(%3)\n"
31309+ "30: movl %%edx, 52(%3)\n"
31310+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31311+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31312+ "33: movl %%eax, 56(%3)\n"
31313+ "34: movl %%edx, 60(%3)\n"
31314+ " addl $-64, %0\n"
31315+ " addl $64, %4\n"
31316+ " addl $64, %3\n"
31317+ " cmpl $63, %0\n"
31318+ " ja 1b\n"
31319+ "35: movl %0, %%eax\n"
31320+ " shrl $2, %0\n"
31321+ " andl $3, %%eax\n"
31322+ " cld\n"
31323+ "99: rep; "__copyuser_seg" movsl\n"
31324+ "36: movl %%eax, %0\n"
31325+ "37: rep; "__copyuser_seg" movsb\n"
31326+ "100:\n"
31327 ".section .fixup,\"ax\"\n"
31328 "101: lea 0(%%eax,%0,4),%0\n"
31329 " jmp 100b\n"
31330@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31331 int d0, d1;
31332 __asm__ __volatile__(
31333 " .align 2,0x90\n"
31334- "0: movl 32(%4), %%eax\n"
31335+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31336 " cmpl $67, %0\n"
31337 " jbe 2f\n"
31338- "1: movl 64(%4), %%eax\n"
31339+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31340 " .align 2,0x90\n"
31341- "2: movl 0(%4), %%eax\n"
31342- "21: movl 4(%4), %%edx\n"
31343+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31344+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31345 " movl %%eax, 0(%3)\n"
31346 " movl %%edx, 4(%3)\n"
31347- "3: movl 8(%4), %%eax\n"
31348- "31: movl 12(%4),%%edx\n"
31349+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31350+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31351 " movl %%eax, 8(%3)\n"
31352 " movl %%edx, 12(%3)\n"
31353- "4: movl 16(%4), %%eax\n"
31354- "41: movl 20(%4), %%edx\n"
31355+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31356+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31357 " movl %%eax, 16(%3)\n"
31358 " movl %%edx, 20(%3)\n"
31359- "10: movl 24(%4), %%eax\n"
31360- "51: movl 28(%4), %%edx\n"
31361+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31362+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31363 " movl %%eax, 24(%3)\n"
31364 " movl %%edx, 28(%3)\n"
31365- "11: movl 32(%4), %%eax\n"
31366- "61: movl 36(%4), %%edx\n"
31367+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31368+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31369 " movl %%eax, 32(%3)\n"
31370 " movl %%edx, 36(%3)\n"
31371- "12: movl 40(%4), %%eax\n"
31372- "71: movl 44(%4), %%edx\n"
31373+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31374+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31375 " movl %%eax, 40(%3)\n"
31376 " movl %%edx, 44(%3)\n"
31377- "13: movl 48(%4), %%eax\n"
31378- "81: movl 52(%4), %%edx\n"
31379+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31380+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31381 " movl %%eax, 48(%3)\n"
31382 " movl %%edx, 52(%3)\n"
31383- "14: movl 56(%4), %%eax\n"
31384- "91: movl 60(%4), %%edx\n"
31385+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31386+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31387 " movl %%eax, 56(%3)\n"
31388 " movl %%edx, 60(%3)\n"
31389 " addl $-64, %0\n"
31390@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31391 " shrl $2, %0\n"
31392 " andl $3, %%eax\n"
31393 " cld\n"
31394- "6: rep; movsl\n"
31395+ "6: rep; "__copyuser_seg" movsl\n"
31396 " movl %%eax,%0\n"
31397- "7: rep; movsb\n"
31398+ "7: rep; "__copyuser_seg" movsb\n"
31399 "8:\n"
31400 ".section .fixup,\"ax\"\n"
31401 "9: lea 0(%%eax,%0,4),%0\n"
31402@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31403
31404 __asm__ __volatile__(
31405 " .align 2,0x90\n"
31406- "0: movl 32(%4), %%eax\n"
31407+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31408 " cmpl $67, %0\n"
31409 " jbe 2f\n"
31410- "1: movl 64(%4), %%eax\n"
31411+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31412 " .align 2,0x90\n"
31413- "2: movl 0(%4), %%eax\n"
31414- "21: movl 4(%4), %%edx\n"
31415+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31416+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31417 " movnti %%eax, 0(%3)\n"
31418 " movnti %%edx, 4(%3)\n"
31419- "3: movl 8(%4), %%eax\n"
31420- "31: movl 12(%4),%%edx\n"
31421+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31422+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31423 " movnti %%eax, 8(%3)\n"
31424 " movnti %%edx, 12(%3)\n"
31425- "4: movl 16(%4), %%eax\n"
31426- "41: movl 20(%4), %%edx\n"
31427+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31428+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31429 " movnti %%eax, 16(%3)\n"
31430 " movnti %%edx, 20(%3)\n"
31431- "10: movl 24(%4), %%eax\n"
31432- "51: movl 28(%4), %%edx\n"
31433+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31434+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31435 " movnti %%eax, 24(%3)\n"
31436 " movnti %%edx, 28(%3)\n"
31437- "11: movl 32(%4), %%eax\n"
31438- "61: movl 36(%4), %%edx\n"
31439+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31440+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31441 " movnti %%eax, 32(%3)\n"
31442 " movnti %%edx, 36(%3)\n"
31443- "12: movl 40(%4), %%eax\n"
31444- "71: movl 44(%4), %%edx\n"
31445+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31446+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31447 " movnti %%eax, 40(%3)\n"
31448 " movnti %%edx, 44(%3)\n"
31449- "13: movl 48(%4), %%eax\n"
31450- "81: movl 52(%4), %%edx\n"
31451+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31452+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31453 " movnti %%eax, 48(%3)\n"
31454 " movnti %%edx, 52(%3)\n"
31455- "14: movl 56(%4), %%eax\n"
31456- "91: movl 60(%4), %%edx\n"
31457+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31458+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31459 " movnti %%eax, 56(%3)\n"
31460 " movnti %%edx, 60(%3)\n"
31461 " addl $-64, %0\n"
31462@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31463 " shrl $2, %0\n"
31464 " andl $3, %%eax\n"
31465 " cld\n"
31466- "6: rep; movsl\n"
31467+ "6: rep; "__copyuser_seg" movsl\n"
31468 " movl %%eax,%0\n"
31469- "7: rep; movsb\n"
31470+ "7: rep; "__copyuser_seg" movsb\n"
31471 "8:\n"
31472 ".section .fixup,\"ax\"\n"
31473 "9: lea 0(%%eax,%0,4),%0\n"
31474@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
31475
31476 __asm__ __volatile__(
31477 " .align 2,0x90\n"
31478- "0: movl 32(%4), %%eax\n"
31479+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31480 " cmpl $67, %0\n"
31481 " jbe 2f\n"
31482- "1: movl 64(%4), %%eax\n"
31483+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31484 " .align 2,0x90\n"
31485- "2: movl 0(%4), %%eax\n"
31486- "21: movl 4(%4), %%edx\n"
31487+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31488+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31489 " movnti %%eax, 0(%3)\n"
31490 " movnti %%edx, 4(%3)\n"
31491- "3: movl 8(%4), %%eax\n"
31492- "31: movl 12(%4),%%edx\n"
31493+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31494+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31495 " movnti %%eax, 8(%3)\n"
31496 " movnti %%edx, 12(%3)\n"
31497- "4: movl 16(%4), %%eax\n"
31498- "41: movl 20(%4), %%edx\n"
31499+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31500+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31501 " movnti %%eax, 16(%3)\n"
31502 " movnti %%edx, 20(%3)\n"
31503- "10: movl 24(%4), %%eax\n"
31504- "51: movl 28(%4), %%edx\n"
31505+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31506+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31507 " movnti %%eax, 24(%3)\n"
31508 " movnti %%edx, 28(%3)\n"
31509- "11: movl 32(%4), %%eax\n"
31510- "61: movl 36(%4), %%edx\n"
31511+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31512+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31513 " movnti %%eax, 32(%3)\n"
31514 " movnti %%edx, 36(%3)\n"
31515- "12: movl 40(%4), %%eax\n"
31516- "71: movl 44(%4), %%edx\n"
31517+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31518+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31519 " movnti %%eax, 40(%3)\n"
31520 " movnti %%edx, 44(%3)\n"
31521- "13: movl 48(%4), %%eax\n"
31522- "81: movl 52(%4), %%edx\n"
31523+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31524+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31525 " movnti %%eax, 48(%3)\n"
31526 " movnti %%edx, 52(%3)\n"
31527- "14: movl 56(%4), %%eax\n"
31528- "91: movl 60(%4), %%edx\n"
31529+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31530+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31531 " movnti %%eax, 56(%3)\n"
31532 " movnti %%edx, 60(%3)\n"
31533 " addl $-64, %0\n"
31534@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
31535 " shrl $2, %0\n"
31536 " andl $3, %%eax\n"
31537 " cld\n"
31538- "6: rep; movsl\n"
31539+ "6: rep; "__copyuser_seg" movsl\n"
31540 " movl %%eax,%0\n"
31541- "7: rep; movsb\n"
31542+ "7: rep; "__copyuser_seg" movsb\n"
31543 "8:\n"
31544 ".section .fixup,\"ax\"\n"
31545 "9: lea 0(%%eax,%0,4),%0\n"
31546@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
31547 */
31548 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
31549 unsigned long size);
31550-unsigned long __copy_user_intel(void __user *to, const void *from,
31551+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
31552+ unsigned long size);
31553+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
31554 unsigned long size);
31555 unsigned long __copy_user_zeroing_intel_nocache(void *to,
31556 const void __user *from, unsigned long size);
31557 #endif /* CONFIG_X86_INTEL_USERCOPY */
31558
31559 /* Generic arbitrary sized copy. */
31560-#define __copy_user(to, from, size) \
31561+#define __copy_user(to, from, size, prefix, set, restore) \
31562 do { \
31563 int __d0, __d1, __d2; \
31564 __asm__ __volatile__( \
31565+ set \
31566 " cmp $7,%0\n" \
31567 " jbe 1f\n" \
31568 " movl %1,%0\n" \
31569 " negl %0\n" \
31570 " andl $7,%0\n" \
31571 " subl %0,%3\n" \
31572- "4: rep; movsb\n" \
31573+ "4: rep; "prefix"movsb\n" \
31574 " movl %3,%0\n" \
31575 " shrl $2,%0\n" \
31576 " andl $3,%3\n" \
31577 " .align 2,0x90\n" \
31578- "0: rep; movsl\n" \
31579+ "0: rep; "prefix"movsl\n" \
31580 " movl %3,%0\n" \
31581- "1: rep; movsb\n" \
31582+ "1: rep; "prefix"movsb\n" \
31583 "2:\n" \
31584+ restore \
31585 ".section .fixup,\"ax\"\n" \
31586 "5: addl %3,%0\n" \
31587 " jmp 2b\n" \
31588@@ -538,14 +650,14 @@ do { \
31589 " negl %0\n" \
31590 " andl $7,%0\n" \
31591 " subl %0,%3\n" \
31592- "4: rep; movsb\n" \
31593+ "4: rep; "__copyuser_seg"movsb\n" \
31594 " movl %3,%0\n" \
31595 " shrl $2,%0\n" \
31596 " andl $3,%3\n" \
31597 " .align 2,0x90\n" \
31598- "0: rep; movsl\n" \
31599+ "0: rep; "__copyuser_seg"movsl\n" \
31600 " movl %3,%0\n" \
31601- "1: rep; movsb\n" \
31602+ "1: rep; "__copyuser_seg"movsb\n" \
31603 "2:\n" \
31604 ".section .fixup,\"ax\"\n" \
31605 "5: addl %3,%0\n" \
31606@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
31607 {
31608 stac();
31609 if (movsl_is_ok(to, from, n))
31610- __copy_user(to, from, n);
31611+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
31612 else
31613- n = __copy_user_intel(to, from, n);
31614+ n = __generic_copy_to_user_intel(to, from, n);
31615 clac();
31616 return n;
31617 }
31618@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
31619 {
31620 stac();
31621 if (movsl_is_ok(to, from, n))
31622- __copy_user(to, from, n);
31623+ __copy_user(to, from, n, __copyuser_seg, "", "");
31624 else
31625- n = __copy_user_intel((void __user *)to,
31626- (const void *)from, n);
31627+ n = __generic_copy_from_user_intel(to, from, n);
31628 clac();
31629 return n;
31630 }
31631@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
31632 if (n > 64 && cpu_has_xmm2)
31633 n = __copy_user_intel_nocache(to, from, n);
31634 else
31635- __copy_user(to, from, n);
31636+ __copy_user(to, from, n, __copyuser_seg, "", "");
31637 #else
31638- __copy_user(to, from, n);
31639+ __copy_user(to, from, n, __copyuser_seg, "", "");
31640 #endif
31641 clac();
31642 return n;
31643 }
31644 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
31645
31646-/**
31647- * copy_to_user: - Copy a block of data into user space.
31648- * @to: Destination address, in user space.
31649- * @from: Source address, in kernel space.
31650- * @n: Number of bytes to copy.
31651- *
31652- * Context: User context only. This function may sleep.
31653- *
31654- * Copy data from kernel space to user space.
31655- *
31656- * Returns number of bytes that could not be copied.
31657- * On success, this will be zero.
31658- */
31659-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
31660+#ifdef CONFIG_PAX_MEMORY_UDEREF
31661+void __set_fs(mm_segment_t x)
31662 {
31663- if (access_ok(VERIFY_WRITE, to, n))
31664- n = __copy_to_user(to, from, n);
31665- return n;
31666+ switch (x.seg) {
31667+ case 0:
31668+ loadsegment(gs, 0);
31669+ break;
31670+ case TASK_SIZE_MAX:
31671+ loadsegment(gs, __USER_DS);
31672+ break;
31673+ case -1UL:
31674+ loadsegment(gs, __KERNEL_DS);
31675+ break;
31676+ default:
31677+ BUG();
31678+ }
31679 }
31680-EXPORT_SYMBOL(_copy_to_user);
31681+EXPORT_SYMBOL(__set_fs);
31682
31683-/**
31684- * copy_from_user: - Copy a block of data from user space.
31685- * @to: Destination address, in kernel space.
31686- * @from: Source address, in user space.
31687- * @n: Number of bytes to copy.
31688- *
31689- * Context: User context only. This function may sleep.
31690- *
31691- * Copy data from user space to kernel space.
31692- *
31693- * Returns number of bytes that could not be copied.
31694- * On success, this will be zero.
31695- *
31696- * If some data could not be copied, this function will pad the copied
31697- * data to the requested size using zero bytes.
31698- */
31699-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
31700+void set_fs(mm_segment_t x)
31701 {
31702- if (access_ok(VERIFY_READ, from, n))
31703- n = __copy_from_user(to, from, n);
31704- else
31705- memset(to, 0, n);
31706- return n;
31707+ current_thread_info()->addr_limit = x;
31708+ __set_fs(x);
31709 }
31710-EXPORT_SYMBOL(_copy_from_user);
31711+EXPORT_SYMBOL(set_fs);
31712+#endif
31713diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
31714index c905e89..01ab928 100644
31715--- a/arch/x86/lib/usercopy_64.c
31716+++ b/arch/x86/lib/usercopy_64.c
31717@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31718 might_fault();
31719 /* no memory constraint because it doesn't change any memory gcc knows
31720 about */
31721+ pax_open_userland();
31722 stac();
31723 asm volatile(
31724 " testq %[size8],%[size8]\n"
31725@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
31726 _ASM_EXTABLE(0b,3b)
31727 _ASM_EXTABLE(1b,2b)
31728 : [size8] "=&c"(size), [dst] "=&D" (__d0)
31729- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
31730+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
31731 [zero] "r" (0UL), [eight] "r" (8UL));
31732 clac();
31733+ pax_close_userland();
31734 return size;
31735 }
31736 EXPORT_SYMBOL(__clear_user);
31737@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
31738 }
31739 EXPORT_SYMBOL(clear_user);
31740
31741-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
31742+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
31743 {
31744- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
31745- return copy_user_generic((__force void *)to, (__force void *)from, len);
31746- }
31747- return len;
31748+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
31749+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
31750+ return len;
31751 }
31752 EXPORT_SYMBOL(copy_in_user);
31753
31754@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
31755 * it is not necessary to optimize tail handling.
31756 */
31757 __visible unsigned long
31758-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31759+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
31760 {
31761 char c;
31762 unsigned zero_len;
31763
31764+ clac();
31765+ pax_close_userland();
31766 for (; len; --len, to++) {
31767 if (__get_user_nocheck(c, from++, sizeof(char)))
31768 break;
31769@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
31770 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
31771 if (__put_user_nocheck(c, to++, sizeof(char)))
31772 break;
31773- clac();
31774 return len;
31775 }
31776diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
31777index ecfdc46..55b9309 100644
31778--- a/arch/x86/mm/Makefile
31779+++ b/arch/x86/mm/Makefile
31780@@ -32,3 +32,7 @@ obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
31781 obj-$(CONFIG_MEMTEST) += memtest.o
31782
31783 obj-$(CONFIG_X86_INTEL_MPX) += mpx.o
31784+
31785+quote:="
31786+obj-$(CONFIG_X86_64) += uderef_64.o
31787+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
31788diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
31789index 903ec1e..c4166b2 100644
31790--- a/arch/x86/mm/extable.c
31791+++ b/arch/x86/mm/extable.c
31792@@ -6,12 +6,24 @@
31793 static inline unsigned long
31794 ex_insn_addr(const struct exception_table_entry *x)
31795 {
31796- return (unsigned long)&x->insn + x->insn;
31797+ unsigned long reloc = 0;
31798+
31799+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31800+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31801+#endif
31802+
31803+ return (unsigned long)&x->insn + x->insn + reloc;
31804 }
31805 static inline unsigned long
31806 ex_fixup_addr(const struct exception_table_entry *x)
31807 {
31808- return (unsigned long)&x->fixup + x->fixup;
31809+ unsigned long reloc = 0;
31810+
31811+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31812+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31813+#endif
31814+
31815+ return (unsigned long)&x->fixup + x->fixup + reloc;
31816 }
31817
31818 int fixup_exception(struct pt_regs *regs)
31819@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
31820 unsigned long new_ip;
31821
31822 #ifdef CONFIG_PNPBIOS
31823- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
31824+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
31825 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
31826 extern u32 pnp_bios_is_utter_crap;
31827 pnp_bios_is_utter_crap = 1;
31828@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
31829 i += 4;
31830 p->fixup -= i;
31831 i += 4;
31832+
31833+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31834+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
31835+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31836+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
31837+#endif
31838+
31839 }
31840 }
31841
31842diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
31843index e3ff27a..f38f7c0 100644
31844--- a/arch/x86/mm/fault.c
31845+++ b/arch/x86/mm/fault.c
31846@@ -13,12 +13,19 @@
31847 #include <linux/hugetlb.h> /* hstate_index_to_shift */
31848 #include <linux/prefetch.h> /* prefetchw */
31849 #include <linux/context_tracking.h> /* exception_enter(), ... */
31850+#include <linux/unistd.h>
31851+#include <linux/compiler.h>
31852
31853 #include <asm/traps.h> /* dotraplinkage, ... */
31854 #include <asm/pgalloc.h> /* pgd_*(), ... */
31855 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
31856 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
31857 #include <asm/vsyscall.h> /* emulate_vsyscall */
31858+#include <asm/tlbflush.h>
31859+
31860+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31861+#include <asm/stacktrace.h>
31862+#endif
31863
31864 #define CREATE_TRACE_POINTS
31865 #include <asm/trace/exceptions.h>
31866@@ -59,7 +66,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
31867 int ret = 0;
31868
31869 /* kprobe_running() needs smp_processor_id() */
31870- if (kprobes_built_in() && !user_mode_vm(regs)) {
31871+ if (kprobes_built_in() && !user_mode(regs)) {
31872 preempt_disable();
31873 if (kprobe_running() && kprobe_fault_handler(regs, 14))
31874 ret = 1;
31875@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
31876 return !instr_lo || (instr_lo>>1) == 1;
31877 case 0x00:
31878 /* Prefetch instruction is 0x0F0D or 0x0F18 */
31879- if (probe_kernel_address(instr, opcode))
31880+ if (user_mode(regs)) {
31881+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31882+ return 0;
31883+ } else if (probe_kernel_address(instr, opcode))
31884 return 0;
31885
31886 *prefetch = (instr_lo == 0xF) &&
31887@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
31888 while (instr < max_instr) {
31889 unsigned char opcode;
31890
31891- if (probe_kernel_address(instr, opcode))
31892+ if (user_mode(regs)) {
31893+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
31894+ break;
31895+ } else if (probe_kernel_address(instr, opcode))
31896 break;
31897
31898 instr++;
31899@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
31900 force_sig_info(si_signo, &info, tsk);
31901 }
31902
31903+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31904+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
31905+#endif
31906+
31907+#ifdef CONFIG_PAX_EMUTRAMP
31908+static int pax_handle_fetch_fault(struct pt_regs *regs);
31909+#endif
31910+
31911+#ifdef CONFIG_PAX_PAGEEXEC
31912+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
31913+{
31914+ pgd_t *pgd;
31915+ pud_t *pud;
31916+ pmd_t *pmd;
31917+
31918+ pgd = pgd_offset(mm, address);
31919+ if (!pgd_present(*pgd))
31920+ return NULL;
31921+ pud = pud_offset(pgd, address);
31922+ if (!pud_present(*pud))
31923+ return NULL;
31924+ pmd = pmd_offset(pud, address);
31925+ if (!pmd_present(*pmd))
31926+ return NULL;
31927+ return pmd;
31928+}
31929+#endif
31930+
31931 DEFINE_SPINLOCK(pgd_lock);
31932 LIST_HEAD(pgd_list);
31933
31934@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
31935 for (address = VMALLOC_START & PMD_MASK;
31936 address >= TASK_SIZE && address < FIXADDR_TOP;
31937 address += PMD_SIZE) {
31938+
31939+#ifdef CONFIG_PAX_PER_CPU_PGD
31940+ unsigned long cpu;
31941+#else
31942 struct page *page;
31943+#endif
31944
31945 spin_lock(&pgd_lock);
31946+
31947+#ifdef CONFIG_PAX_PER_CPU_PGD
31948+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31949+ pgd_t *pgd = get_cpu_pgd(cpu, user);
31950+ pmd_t *ret;
31951+
31952+ ret = vmalloc_sync_one(pgd, address);
31953+ if (!ret)
31954+ break;
31955+ pgd = get_cpu_pgd(cpu, kernel);
31956+#else
31957 list_for_each_entry(page, &pgd_list, lru) {
31958+ pgd_t *pgd;
31959 spinlock_t *pgt_lock;
31960 pmd_t *ret;
31961
31962@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
31963 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31964
31965 spin_lock(pgt_lock);
31966- ret = vmalloc_sync_one(page_address(page), address);
31967+ pgd = page_address(page);
31968+#endif
31969+
31970+ ret = vmalloc_sync_one(pgd, address);
31971+
31972+#ifndef CONFIG_PAX_PER_CPU_PGD
31973 spin_unlock(pgt_lock);
31974+#endif
31975
31976 if (!ret)
31977 break;
31978@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
31979 * an interrupt in the middle of a task switch..
31980 */
31981 pgd_paddr = read_cr3();
31982+
31983+#ifdef CONFIG_PAX_PER_CPU_PGD
31984+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
31985+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
31986+#endif
31987+
31988 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
31989 if (!pmd_k)
31990 return -1;
31991@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
31992 * happen within a race in page table update. In the later
31993 * case just flush:
31994 */
31995- pgd = pgd_offset(current->active_mm, address);
31996+
31997 pgd_ref = pgd_offset_k(address);
31998 if (pgd_none(*pgd_ref))
31999 return -1;
32000
32001+#ifdef CONFIG_PAX_PER_CPU_PGD
32002+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32003+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32004+ if (pgd_none(*pgd)) {
32005+ set_pgd(pgd, *pgd_ref);
32006+ arch_flush_lazy_mmu_mode();
32007+ } else {
32008+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32009+ }
32010+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32011+#else
32012+ pgd = pgd_offset(current->active_mm, address);
32013+#endif
32014+
32015 if (pgd_none(*pgd)) {
32016 set_pgd(pgd, *pgd_ref);
32017 arch_flush_lazy_mmu_mode();
32018@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32019 static int is_errata100(struct pt_regs *regs, unsigned long address)
32020 {
32021 #ifdef CONFIG_X86_64
32022- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32023+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32024 return 1;
32025 #endif
32026 return 0;
32027@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32028 }
32029
32030 static const char nx_warning[] = KERN_CRIT
32031-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32032+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32033 static const char smep_warning[] = KERN_CRIT
32034-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32035+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32036
32037 static void
32038 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32039@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32040 if (!oops_may_print())
32041 return;
32042
32043- if (error_code & PF_INSTR) {
32044+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32045 unsigned int level;
32046 pgd_t *pgd;
32047 pte_t *pte;
32048@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32049 pte = lookup_address_in_pgd(pgd, address, &level);
32050
32051 if (pte && pte_present(*pte) && !pte_exec(*pte))
32052- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32053+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32054 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32055 (pgd_flags(*pgd) & _PAGE_USER) &&
32056 (read_cr4() & X86_CR4_SMEP))
32057- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32058+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32059 }
32060
32061+#ifdef CONFIG_PAX_KERNEXEC
32062+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32063+ if (current->signal->curr_ip)
32064+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32065+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32066+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32067+ else
32068+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32069+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32070+ }
32071+#endif
32072+
32073 printk(KERN_ALERT "BUG: unable to handle kernel ");
32074 if (address < PAGE_SIZE)
32075 printk(KERN_CONT "NULL pointer dereference");
32076@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32077 return;
32078 }
32079 #endif
32080+
32081+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32082+ if (pax_is_fetch_fault(regs, error_code, address)) {
32083+
32084+#ifdef CONFIG_PAX_EMUTRAMP
32085+ switch (pax_handle_fetch_fault(regs)) {
32086+ case 2:
32087+ return;
32088+ }
32089+#endif
32090+
32091+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32092+ do_group_exit(SIGKILL);
32093+ }
32094+#endif
32095+
32096 /* Kernel addresses are always protection faults: */
32097 if (address >= TASK_SIZE)
32098 error_code |= PF_PROT;
32099@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32100 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32101 printk(KERN_ERR
32102 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32103- tsk->comm, tsk->pid, address);
32104+ tsk->comm, task_pid_nr(tsk), address);
32105 code = BUS_MCEERR_AR;
32106 }
32107 #endif
32108@@ -916,6 +1028,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32109 return 1;
32110 }
32111
32112+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32113+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32114+{
32115+ pte_t *pte;
32116+ pmd_t *pmd;
32117+ spinlock_t *ptl;
32118+ unsigned char pte_mask;
32119+
32120+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32121+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32122+ return 0;
32123+
32124+ /* PaX: it's our fault, let's handle it if we can */
32125+
32126+ /* PaX: take a look at read faults before acquiring any locks */
32127+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32128+ /* instruction fetch attempt from a protected page in user mode */
32129+ up_read(&mm->mmap_sem);
32130+
32131+#ifdef CONFIG_PAX_EMUTRAMP
32132+ switch (pax_handle_fetch_fault(regs)) {
32133+ case 2:
32134+ return 1;
32135+ }
32136+#endif
32137+
32138+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32139+ do_group_exit(SIGKILL);
32140+ }
32141+
32142+ pmd = pax_get_pmd(mm, address);
32143+ if (unlikely(!pmd))
32144+ return 0;
32145+
32146+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32147+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32148+ pte_unmap_unlock(pte, ptl);
32149+ return 0;
32150+ }
32151+
32152+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32153+ /* write attempt to a protected page in user mode */
32154+ pte_unmap_unlock(pte, ptl);
32155+ return 0;
32156+ }
32157+
32158+#ifdef CONFIG_SMP
32159+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32160+#else
32161+ if (likely(address > get_limit(regs->cs)))
32162+#endif
32163+ {
32164+ set_pte(pte, pte_mkread(*pte));
32165+ __flush_tlb_one(address);
32166+ pte_unmap_unlock(pte, ptl);
32167+ up_read(&mm->mmap_sem);
32168+ return 1;
32169+ }
32170+
32171+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32172+
32173+ /*
32174+ * PaX: fill DTLB with user rights and retry
32175+ */
32176+ __asm__ __volatile__ (
32177+ "orb %2,(%1)\n"
32178+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32179+/*
32180+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32181+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32182+ * page fault when examined during a TLB load attempt. this is true not only
32183+ * for PTEs holding a non-present entry but also present entries that will
32184+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32185+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32186+ * for our target pages since their PTEs are simply not in the TLBs at all.
32187+
32188+ * the best thing in omitting it is that we gain around 15-20% speed in the
32189+ * fast path of the page fault handler and can get rid of tracing since we
32190+ * can no longer flush unintended entries.
32191+ */
32192+ "invlpg (%0)\n"
32193+#endif
32194+ __copyuser_seg"testb $0,(%0)\n"
32195+ "xorb %3,(%1)\n"
32196+ :
32197+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32198+ : "memory", "cc");
32199+ pte_unmap_unlock(pte, ptl);
32200+ up_read(&mm->mmap_sem);
32201+ return 1;
32202+}
32203+#endif
32204+
32205 /*
32206 * Handle a spurious fault caused by a stale TLB entry.
32207 *
32208@@ -1001,6 +1206,9 @@ int show_unhandled_signals = 1;
32209 static inline int
32210 access_error(unsigned long error_code, struct vm_area_struct *vma)
32211 {
32212+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32213+ return 1;
32214+
32215 if (error_code & PF_WRITE) {
32216 /* write, present and write, not present: */
32217 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32218@@ -1035,7 +1243,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32219 if (error_code & PF_USER)
32220 return false;
32221
32222- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32223+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32224 return false;
32225
32226 return true;
32227@@ -1063,6 +1271,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32228 tsk = current;
32229 mm = tsk->mm;
32230
32231+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32232+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32233+ if (!search_exception_tables(regs->ip)) {
32234+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32235+ bad_area_nosemaphore(regs, error_code, address);
32236+ return;
32237+ }
32238+ if (address < pax_user_shadow_base) {
32239+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32240+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32241+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32242+ } else
32243+ address -= pax_user_shadow_base;
32244+ }
32245+#endif
32246+
32247 /*
32248 * Detect and handle instructions that would cause a page fault for
32249 * both a tracked kernel page and a userspace page.
32250@@ -1140,7 +1364,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32251 * User-mode registers count as a user access even for any
32252 * potential system fault or CPU buglet:
32253 */
32254- if (user_mode_vm(regs)) {
32255+ if (user_mode(regs)) {
32256 local_irq_enable();
32257 error_code |= PF_USER;
32258 flags |= FAULT_FLAG_USER;
32259@@ -1187,6 +1411,11 @@ retry:
32260 might_sleep();
32261 }
32262
32263+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32264+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32265+ return;
32266+#endif
32267+
32268 vma = find_vma(mm, address);
32269 if (unlikely(!vma)) {
32270 bad_area(regs, error_code, address);
32271@@ -1198,18 +1427,24 @@ retry:
32272 bad_area(regs, error_code, address);
32273 return;
32274 }
32275- if (error_code & PF_USER) {
32276- /*
32277- * Accessing the stack below %sp is always a bug.
32278- * The large cushion allows instructions like enter
32279- * and pusha to work. ("enter $65535, $31" pushes
32280- * 32 pointers and then decrements %sp by 65535.)
32281- */
32282- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32283- bad_area(regs, error_code, address);
32284- return;
32285- }
32286+ /*
32287+ * Accessing the stack below %sp is always a bug.
32288+ * The large cushion allows instructions like enter
32289+ * and pusha to work. ("enter $65535, $31" pushes
32290+ * 32 pointers and then decrements %sp by 65535.)
32291+ */
32292+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32293+ bad_area(regs, error_code, address);
32294+ return;
32295 }
32296+
32297+#ifdef CONFIG_PAX_SEGMEXEC
32298+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32299+ bad_area(regs, error_code, address);
32300+ return;
32301+ }
32302+#endif
32303+
32304 if (unlikely(expand_stack(vma, address))) {
32305 bad_area(regs, error_code, address);
32306 return;
32307@@ -1329,3 +1564,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32308 }
32309 NOKPROBE_SYMBOL(trace_do_page_fault);
32310 #endif /* CONFIG_TRACING */
32311+
32312+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32313+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32314+{
32315+ struct mm_struct *mm = current->mm;
32316+ unsigned long ip = regs->ip;
32317+
32318+ if (v8086_mode(regs))
32319+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32320+
32321+#ifdef CONFIG_PAX_PAGEEXEC
32322+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32323+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32324+ return true;
32325+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32326+ return true;
32327+ return false;
32328+ }
32329+#endif
32330+
32331+#ifdef CONFIG_PAX_SEGMEXEC
32332+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32333+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32334+ return true;
32335+ return false;
32336+ }
32337+#endif
32338+
32339+ return false;
32340+}
32341+#endif
32342+
32343+#ifdef CONFIG_PAX_EMUTRAMP
32344+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32345+{
32346+ int err;
32347+
32348+ do { /* PaX: libffi trampoline emulation */
32349+ unsigned char mov, jmp;
32350+ unsigned int addr1, addr2;
32351+
32352+#ifdef CONFIG_X86_64
32353+ if ((regs->ip + 9) >> 32)
32354+ break;
32355+#endif
32356+
32357+ err = get_user(mov, (unsigned char __user *)regs->ip);
32358+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32359+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32360+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32361+
32362+ if (err)
32363+ break;
32364+
32365+ if (mov == 0xB8 && jmp == 0xE9) {
32366+ regs->ax = addr1;
32367+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32368+ return 2;
32369+ }
32370+ } while (0);
32371+
32372+ do { /* PaX: gcc trampoline emulation #1 */
32373+ unsigned char mov1, mov2;
32374+ unsigned short jmp;
32375+ unsigned int addr1, addr2;
32376+
32377+#ifdef CONFIG_X86_64
32378+ if ((regs->ip + 11) >> 32)
32379+ break;
32380+#endif
32381+
32382+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32383+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32384+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32385+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32386+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32387+
32388+ if (err)
32389+ break;
32390+
32391+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32392+ regs->cx = addr1;
32393+ regs->ax = addr2;
32394+ regs->ip = addr2;
32395+ return 2;
32396+ }
32397+ } while (0);
32398+
32399+ do { /* PaX: gcc trampoline emulation #2 */
32400+ unsigned char mov, jmp;
32401+ unsigned int addr1, addr2;
32402+
32403+#ifdef CONFIG_X86_64
32404+ if ((regs->ip + 9) >> 32)
32405+ break;
32406+#endif
32407+
32408+ err = get_user(mov, (unsigned char __user *)regs->ip);
32409+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32410+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32411+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32412+
32413+ if (err)
32414+ break;
32415+
32416+ if (mov == 0xB9 && jmp == 0xE9) {
32417+ regs->cx = addr1;
32418+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32419+ return 2;
32420+ }
32421+ } while (0);
32422+
32423+ return 1; /* PaX in action */
32424+}
32425+
32426+#ifdef CONFIG_X86_64
32427+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
32428+{
32429+ int err;
32430+
32431+ do { /* PaX: libffi trampoline emulation */
32432+ unsigned short mov1, mov2, jmp1;
32433+ unsigned char stcclc, jmp2;
32434+ unsigned long addr1, addr2;
32435+
32436+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32437+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32438+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32439+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32440+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
32441+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
32442+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
32443+
32444+ if (err)
32445+ break;
32446+
32447+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32448+ regs->r11 = addr1;
32449+ regs->r10 = addr2;
32450+ if (stcclc == 0xF8)
32451+ regs->flags &= ~X86_EFLAGS_CF;
32452+ else
32453+ regs->flags |= X86_EFLAGS_CF;
32454+ regs->ip = addr1;
32455+ return 2;
32456+ }
32457+ } while (0);
32458+
32459+ do { /* PaX: gcc trampoline emulation #1 */
32460+ unsigned short mov1, mov2, jmp1;
32461+ unsigned char jmp2;
32462+ unsigned int addr1;
32463+ unsigned long addr2;
32464+
32465+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32466+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
32467+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
32468+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
32469+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
32470+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
32471+
32472+ if (err)
32473+ break;
32474+
32475+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32476+ regs->r11 = addr1;
32477+ regs->r10 = addr2;
32478+ regs->ip = addr1;
32479+ return 2;
32480+ }
32481+ } while (0);
32482+
32483+ do { /* PaX: gcc trampoline emulation #2 */
32484+ unsigned short mov1, mov2, jmp1;
32485+ unsigned char jmp2;
32486+ unsigned long addr1, addr2;
32487+
32488+ err = get_user(mov1, (unsigned short __user *)regs->ip);
32489+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
32490+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
32491+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
32492+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
32493+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
32494+
32495+ if (err)
32496+ break;
32497+
32498+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
32499+ regs->r11 = addr1;
32500+ regs->r10 = addr2;
32501+ regs->ip = addr1;
32502+ return 2;
32503+ }
32504+ } while (0);
32505+
32506+ return 1; /* PaX in action */
32507+}
32508+#endif
32509+
32510+/*
32511+ * PaX: decide what to do with offenders (regs->ip = fault address)
32512+ *
32513+ * returns 1 when task should be killed
32514+ * 2 when gcc trampoline was detected
32515+ */
32516+static int pax_handle_fetch_fault(struct pt_regs *regs)
32517+{
32518+ if (v8086_mode(regs))
32519+ return 1;
32520+
32521+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
32522+ return 1;
32523+
32524+#ifdef CONFIG_X86_32
32525+ return pax_handle_fetch_fault_32(regs);
32526+#else
32527+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
32528+ return pax_handle_fetch_fault_32(regs);
32529+ else
32530+ return pax_handle_fetch_fault_64(regs);
32531+#endif
32532+}
32533+#endif
32534+
32535+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32536+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
32537+{
32538+ long i;
32539+
32540+ printk(KERN_ERR "PAX: bytes at PC: ");
32541+ for (i = 0; i < 20; i++) {
32542+ unsigned char c;
32543+ if (get_user(c, (unsigned char __force_user *)pc+i))
32544+ printk(KERN_CONT "?? ");
32545+ else
32546+ printk(KERN_CONT "%02x ", c);
32547+ }
32548+ printk("\n");
32549+
32550+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
32551+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
32552+ unsigned long c;
32553+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
32554+#ifdef CONFIG_X86_32
32555+ printk(KERN_CONT "???????? ");
32556+#else
32557+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
32558+ printk(KERN_CONT "???????? ???????? ");
32559+ else
32560+ printk(KERN_CONT "???????????????? ");
32561+#endif
32562+ } else {
32563+#ifdef CONFIG_X86_64
32564+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
32565+ printk(KERN_CONT "%08x ", (unsigned int)c);
32566+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
32567+ } else
32568+#endif
32569+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
32570+ }
32571+ }
32572+ printk("\n");
32573+}
32574+#endif
32575+
32576+/**
32577+ * probe_kernel_write(): safely attempt to write to a location
32578+ * @dst: address to write to
32579+ * @src: pointer to the data that shall be written
32580+ * @size: size of the data chunk
32581+ *
32582+ * Safely write to address @dst from the buffer at @src. If a kernel fault
32583+ * happens, handle that and return -EFAULT.
32584+ */
32585+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
32586+{
32587+ long ret;
32588+ mm_segment_t old_fs = get_fs();
32589+
32590+ set_fs(KERNEL_DS);
32591+ pagefault_disable();
32592+ pax_open_kernel();
32593+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
32594+ pax_close_kernel();
32595+ pagefault_enable();
32596+ set_fs(old_fs);
32597+
32598+ return ret ? -EFAULT : 0;
32599+}
32600diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
32601index 224b142..c2c9423 100644
32602--- a/arch/x86/mm/gup.c
32603+++ b/arch/x86/mm/gup.c
32604@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
32605 addr = start;
32606 len = (unsigned long) nr_pages << PAGE_SHIFT;
32607 end = start + len;
32608- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
32609+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32610 (void __user *)start, len)))
32611 return 0;
32612
32613@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
32614 goto slow_irqon;
32615 #endif
32616
32617+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
32618+ (void __user *)start, len)))
32619+ return 0;
32620+
32621 /*
32622 * XXX: batch / limit 'nr', to avoid large irq off latency
32623 * needs some instrumenting to determine the common sizes used by
32624diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
32625index 4500142..53a363c 100644
32626--- a/arch/x86/mm/highmem_32.c
32627+++ b/arch/x86/mm/highmem_32.c
32628@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
32629 idx = type + KM_TYPE_NR*smp_processor_id();
32630 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
32631 BUG_ON(!pte_none(*(kmap_pte-idx)));
32632+
32633+ pax_open_kernel();
32634 set_pte(kmap_pte-idx, mk_pte(page, prot));
32635+ pax_close_kernel();
32636+
32637 arch_flush_lazy_mmu_mode();
32638
32639 return (void *)vaddr;
32640diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
32641index 006cc91..bf05a83 100644
32642--- a/arch/x86/mm/hugetlbpage.c
32643+++ b/arch/x86/mm/hugetlbpage.c
32644@@ -86,23 +86,24 @@ int pud_huge(pud_t pud)
32645 #ifdef CONFIG_HUGETLB_PAGE
32646 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
32647 unsigned long addr, unsigned long len,
32648- unsigned long pgoff, unsigned long flags)
32649+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32650 {
32651 struct hstate *h = hstate_file(file);
32652 struct vm_unmapped_area_info info;
32653-
32654+
32655 info.flags = 0;
32656 info.length = len;
32657 info.low_limit = current->mm->mmap_legacy_base;
32658 info.high_limit = TASK_SIZE;
32659 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32660 info.align_offset = 0;
32661+ info.threadstack_offset = offset;
32662 return vm_unmapped_area(&info);
32663 }
32664
32665 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32666 unsigned long addr0, unsigned long len,
32667- unsigned long pgoff, unsigned long flags)
32668+ unsigned long pgoff, unsigned long flags, unsigned long offset)
32669 {
32670 struct hstate *h = hstate_file(file);
32671 struct vm_unmapped_area_info info;
32672@@ -114,6 +115,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32673 info.high_limit = current->mm->mmap_base;
32674 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
32675 info.align_offset = 0;
32676+ info.threadstack_offset = offset;
32677 addr = vm_unmapped_area(&info);
32678
32679 /*
32680@@ -126,6 +128,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
32681 VM_BUG_ON(addr != -ENOMEM);
32682 info.flags = 0;
32683 info.low_limit = TASK_UNMAPPED_BASE;
32684+
32685+#ifdef CONFIG_PAX_RANDMMAP
32686+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
32687+ info.low_limit += current->mm->delta_mmap;
32688+#endif
32689+
32690 info.high_limit = TASK_SIZE;
32691 addr = vm_unmapped_area(&info);
32692 }
32693@@ -140,10 +148,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32694 struct hstate *h = hstate_file(file);
32695 struct mm_struct *mm = current->mm;
32696 struct vm_area_struct *vma;
32697+ unsigned long pax_task_size = TASK_SIZE;
32698+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
32699
32700 if (len & ~huge_page_mask(h))
32701 return -EINVAL;
32702- if (len > TASK_SIZE)
32703+
32704+#ifdef CONFIG_PAX_SEGMEXEC
32705+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
32706+ pax_task_size = SEGMEXEC_TASK_SIZE;
32707+#endif
32708+
32709+ pax_task_size -= PAGE_SIZE;
32710+
32711+ if (len > pax_task_size)
32712 return -ENOMEM;
32713
32714 if (flags & MAP_FIXED) {
32715@@ -152,19 +170,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
32716 return addr;
32717 }
32718
32719+#ifdef CONFIG_PAX_RANDMMAP
32720+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
32721+#endif
32722+
32723 if (addr) {
32724 addr = ALIGN(addr, huge_page_size(h));
32725 vma = find_vma(mm, addr);
32726- if (TASK_SIZE - len >= addr &&
32727- (!vma || addr + len <= vma->vm_start))
32728+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
32729 return addr;
32730 }
32731 if (mm->get_unmapped_area == arch_get_unmapped_area)
32732 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
32733- pgoff, flags);
32734+ pgoff, flags, offset);
32735 else
32736 return hugetlb_get_unmapped_area_topdown(file, addr, len,
32737- pgoff, flags);
32738+ pgoff, flags, offset);
32739 }
32740 #endif /* CONFIG_HUGETLB_PAGE */
32741
32742diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
32743index 079c3b6..7069023 100644
32744--- a/arch/x86/mm/init.c
32745+++ b/arch/x86/mm/init.c
32746@@ -4,6 +4,7 @@
32747 #include <linux/swap.h>
32748 #include <linux/memblock.h>
32749 #include <linux/bootmem.h> /* for max_low_pfn */
32750+#include <linux/tboot.h>
32751
32752 #include <asm/cacheflush.h>
32753 #include <asm/e820.h>
32754@@ -17,6 +18,8 @@
32755 #include <asm/proto.h>
32756 #include <asm/dma.h> /* for MAX_DMA_PFN */
32757 #include <asm/microcode.h>
32758+#include <asm/desc.h>
32759+#include <asm/bios_ebda.h>
32760
32761 /*
32762 * We need to define the tracepoints somewhere, and tlb.c
32763@@ -596,7 +599,18 @@ void __init init_mem_mapping(void)
32764 early_ioremap_page_table_range_init();
32765 #endif
32766
32767+#ifdef CONFIG_PAX_PER_CPU_PGD
32768+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
32769+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32770+ KERNEL_PGD_PTRS);
32771+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
32772+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
32773+ KERNEL_PGD_PTRS);
32774+ load_cr3(get_cpu_pgd(0, kernel));
32775+#else
32776 load_cr3(swapper_pg_dir);
32777+#endif
32778+
32779 __flush_tlb_all();
32780
32781 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
32782@@ -612,10 +626,40 @@ void __init init_mem_mapping(void)
32783 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
32784 * mmio resources as well as potential bios/acpi data regions.
32785 */
32786+
32787+#ifdef CONFIG_GRKERNSEC_KMEM
32788+static unsigned int ebda_start __read_only;
32789+static unsigned int ebda_end __read_only;
32790+#endif
32791+
32792 int devmem_is_allowed(unsigned long pagenr)
32793 {
32794- if (pagenr < 256)
32795+#ifdef CONFIG_GRKERNSEC_KMEM
32796+ /* allow BDA */
32797+ if (!pagenr)
32798 return 1;
32799+ /* allow EBDA */
32800+ if (pagenr >= ebda_start && pagenr < ebda_end)
32801+ return 1;
32802+ /* if tboot is in use, allow access to its hardcoded serial log range */
32803+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
32804+ return 1;
32805+#else
32806+ if (!pagenr)
32807+ return 1;
32808+#ifdef CONFIG_VM86
32809+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
32810+ return 1;
32811+#endif
32812+#endif
32813+
32814+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
32815+ return 1;
32816+#ifdef CONFIG_GRKERNSEC_KMEM
32817+ /* throw out everything else below 1MB */
32818+ if (pagenr <= 256)
32819+ return 0;
32820+#endif
32821 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
32822 return 0;
32823 if (!page_is_ram(pagenr))
32824@@ -661,8 +705,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
32825 #endif
32826 }
32827
32828+#ifdef CONFIG_GRKERNSEC_KMEM
32829+static inline void gr_init_ebda(void)
32830+{
32831+ unsigned int ebda_addr;
32832+ unsigned int ebda_size = 0;
32833+
32834+ ebda_addr = get_bios_ebda();
32835+ if (ebda_addr) {
32836+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
32837+ ebda_size <<= 10;
32838+ }
32839+ if (ebda_addr && ebda_size) {
32840+ ebda_start = ebda_addr >> PAGE_SHIFT;
32841+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
32842+ } else {
32843+ ebda_start = 0x9f000 >> PAGE_SHIFT;
32844+ ebda_end = 0xa0000 >> PAGE_SHIFT;
32845+ }
32846+}
32847+#else
32848+static inline void gr_init_ebda(void) { }
32849+#endif
32850+
32851 void free_initmem(void)
32852 {
32853+#ifdef CONFIG_PAX_KERNEXEC
32854+#ifdef CONFIG_X86_32
32855+ /* PaX: limit KERNEL_CS to actual size */
32856+ unsigned long addr, limit;
32857+ struct desc_struct d;
32858+ int cpu;
32859+#else
32860+ pgd_t *pgd;
32861+ pud_t *pud;
32862+ pmd_t *pmd;
32863+ unsigned long addr, end;
32864+#endif
32865+#endif
32866+
32867+ gr_init_ebda();
32868+
32869+#ifdef CONFIG_PAX_KERNEXEC
32870+#ifdef CONFIG_X86_32
32871+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
32872+ limit = (limit - 1UL) >> PAGE_SHIFT;
32873+
32874+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
32875+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
32876+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
32877+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
32878+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
32879+ }
32880+
32881+ /* PaX: make KERNEL_CS read-only */
32882+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
32883+ if (!paravirt_enabled())
32884+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
32885+/*
32886+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
32887+ pgd = pgd_offset_k(addr);
32888+ pud = pud_offset(pgd, addr);
32889+ pmd = pmd_offset(pud, addr);
32890+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32891+ }
32892+*/
32893+#ifdef CONFIG_X86_PAE
32894+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
32895+/*
32896+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
32897+ pgd = pgd_offset_k(addr);
32898+ pud = pud_offset(pgd, addr);
32899+ pmd = pmd_offset(pud, addr);
32900+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32901+ }
32902+*/
32903+#endif
32904+
32905+#ifdef CONFIG_MODULES
32906+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
32907+#endif
32908+
32909+#else
32910+ /* PaX: make kernel code/rodata read-only, rest non-executable */
32911+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
32912+ pgd = pgd_offset_k(addr);
32913+ pud = pud_offset(pgd, addr);
32914+ pmd = pmd_offset(pud, addr);
32915+ if (!pmd_present(*pmd))
32916+ continue;
32917+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
32918+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32919+ else
32920+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
32921+ }
32922+
32923+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
32924+ end = addr + KERNEL_IMAGE_SIZE;
32925+ for (; addr < end; addr += PMD_SIZE) {
32926+ pgd = pgd_offset_k(addr);
32927+ pud = pud_offset(pgd, addr);
32928+ pmd = pmd_offset(pud, addr);
32929+ if (!pmd_present(*pmd))
32930+ continue;
32931+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
32932+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
32933+ }
32934+#endif
32935+
32936+ flush_tlb_all();
32937+#endif
32938+
32939 free_init_pages("unused kernel",
32940 (unsigned long)(&__init_begin),
32941 (unsigned long)(&__init_end));
32942diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
32943index c8140e1..59257fc 100644
32944--- a/arch/x86/mm/init_32.c
32945+++ b/arch/x86/mm/init_32.c
32946@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
32947 bool __read_mostly __vmalloc_start_set = false;
32948
32949 /*
32950- * Creates a middle page table and puts a pointer to it in the
32951- * given global directory entry. This only returns the gd entry
32952- * in non-PAE compilation mode, since the middle layer is folded.
32953- */
32954-static pmd_t * __init one_md_table_init(pgd_t *pgd)
32955-{
32956- pud_t *pud;
32957- pmd_t *pmd_table;
32958-
32959-#ifdef CONFIG_X86_PAE
32960- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
32961- pmd_table = (pmd_t *)alloc_low_page();
32962- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
32963- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
32964- pud = pud_offset(pgd, 0);
32965- BUG_ON(pmd_table != pmd_offset(pud, 0));
32966-
32967- return pmd_table;
32968- }
32969-#endif
32970- pud = pud_offset(pgd, 0);
32971- pmd_table = pmd_offset(pud, 0);
32972-
32973- return pmd_table;
32974-}
32975-
32976-/*
32977 * Create a page table and place a pointer to it in a middle page
32978 * directory entry:
32979 */
32980@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
32981 pte_t *page_table = (pte_t *)alloc_low_page();
32982
32983 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
32984+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32985+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
32986+#else
32987 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
32988+#endif
32989 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
32990 }
32991
32992 return pte_offset_kernel(pmd, 0);
32993 }
32994
32995+static pmd_t * __init one_md_table_init(pgd_t *pgd)
32996+{
32997+ pud_t *pud;
32998+ pmd_t *pmd_table;
32999+
33000+ pud = pud_offset(pgd, 0);
33001+ pmd_table = pmd_offset(pud, 0);
33002+
33003+ return pmd_table;
33004+}
33005+
33006 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33007 {
33008 int pgd_idx = pgd_index(vaddr);
33009@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33010 int pgd_idx, pmd_idx;
33011 unsigned long vaddr;
33012 pgd_t *pgd;
33013+ pud_t *pud;
33014 pmd_t *pmd;
33015 pte_t *pte = NULL;
33016 unsigned long count = page_table_range_init_count(start, end);
33017@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33018 pgd = pgd_base + pgd_idx;
33019
33020 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33021- pmd = one_md_table_init(pgd);
33022- pmd = pmd + pmd_index(vaddr);
33023+ pud = pud_offset(pgd, vaddr);
33024+ pmd = pmd_offset(pud, vaddr);
33025+
33026+#ifdef CONFIG_X86_PAE
33027+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33028+#endif
33029+
33030 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33031 pmd++, pmd_idx++) {
33032 pte = page_table_kmap_check(one_page_table_init(pmd),
33033@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33034 }
33035 }
33036
33037-static inline int is_kernel_text(unsigned long addr)
33038+static inline int is_kernel_text(unsigned long start, unsigned long end)
33039 {
33040- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33041- return 1;
33042- return 0;
33043+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33044+ end <= ktla_ktva((unsigned long)_stext)) &&
33045+ (start >= ktla_ktva((unsigned long)_einittext) ||
33046+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33047+
33048+#ifdef CONFIG_ACPI_SLEEP
33049+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33050+#endif
33051+
33052+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33053+ return 0;
33054+ return 1;
33055 }
33056
33057 /*
33058@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33059 unsigned long last_map_addr = end;
33060 unsigned long start_pfn, end_pfn;
33061 pgd_t *pgd_base = swapper_pg_dir;
33062- int pgd_idx, pmd_idx, pte_ofs;
33063+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33064 unsigned long pfn;
33065 pgd_t *pgd;
33066+ pud_t *pud;
33067 pmd_t *pmd;
33068 pte_t *pte;
33069 unsigned pages_2m, pages_4k;
33070@@ -291,8 +295,13 @@ repeat:
33071 pfn = start_pfn;
33072 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33073 pgd = pgd_base + pgd_idx;
33074- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33075- pmd = one_md_table_init(pgd);
33076+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33077+ pud = pud_offset(pgd, 0);
33078+ pmd = pmd_offset(pud, 0);
33079+
33080+#ifdef CONFIG_X86_PAE
33081+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33082+#endif
33083
33084 if (pfn >= end_pfn)
33085 continue;
33086@@ -304,14 +313,13 @@ repeat:
33087 #endif
33088 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33089 pmd++, pmd_idx++) {
33090- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33091+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33092
33093 /*
33094 * Map with big pages if possible, otherwise
33095 * create normal page tables:
33096 */
33097 if (use_pse) {
33098- unsigned int addr2;
33099 pgprot_t prot = PAGE_KERNEL_LARGE;
33100 /*
33101 * first pass will use the same initial
33102@@ -322,11 +330,7 @@ repeat:
33103 _PAGE_PSE);
33104
33105 pfn &= PMD_MASK >> PAGE_SHIFT;
33106- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33107- PAGE_OFFSET + PAGE_SIZE-1;
33108-
33109- if (is_kernel_text(addr) ||
33110- is_kernel_text(addr2))
33111+ if (is_kernel_text(address, address + PMD_SIZE))
33112 prot = PAGE_KERNEL_LARGE_EXEC;
33113
33114 pages_2m++;
33115@@ -343,7 +347,7 @@ repeat:
33116 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33117 pte += pte_ofs;
33118 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33119- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33120+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33121 pgprot_t prot = PAGE_KERNEL;
33122 /*
33123 * first pass will use the same initial
33124@@ -351,7 +355,7 @@ repeat:
33125 */
33126 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33127
33128- if (is_kernel_text(addr))
33129+ if (is_kernel_text(address, address + PAGE_SIZE))
33130 prot = PAGE_KERNEL_EXEC;
33131
33132 pages_4k++;
33133@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33134
33135 pud = pud_offset(pgd, va);
33136 pmd = pmd_offset(pud, va);
33137- if (!pmd_present(*pmd))
33138+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33139 break;
33140
33141 /* should not be large page here */
33142@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33143
33144 static void __init pagetable_init(void)
33145 {
33146- pgd_t *pgd_base = swapper_pg_dir;
33147-
33148- permanent_kmaps_init(pgd_base);
33149+ permanent_kmaps_init(swapper_pg_dir);
33150 }
33151
33152-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
33153+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL);
33154 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33155
33156 /* user-defined highmem size */
33157@@ -787,10 +789,10 @@ void __init mem_init(void)
33158 ((unsigned long)&__init_end -
33159 (unsigned long)&__init_begin) >> 10,
33160
33161- (unsigned long)&_etext, (unsigned long)&_edata,
33162- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33163+ (unsigned long)&_sdata, (unsigned long)&_edata,
33164+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33165
33166- (unsigned long)&_text, (unsigned long)&_etext,
33167+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33168 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33169
33170 /*
33171@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33172 if (!kernel_set_to_readonly)
33173 return;
33174
33175+ start = ktla_ktva(start);
33176 pr_debug("Set kernel text: %lx - %lx for read write\n",
33177 start, start+size);
33178
33179@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33180 if (!kernel_set_to_readonly)
33181 return;
33182
33183+ start = ktla_ktva(start);
33184 pr_debug("Set kernel text: %lx - %lx for read only\n",
33185 start, start+size);
33186
33187@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33188 unsigned long start = PFN_ALIGN(_text);
33189 unsigned long size = PFN_ALIGN(_etext) - start;
33190
33191+ start = ktla_ktva(start);
33192 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33193 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33194 size >> 10);
33195diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33196index 30eb05a..ae671ac 100644
33197--- a/arch/x86/mm/init_64.c
33198+++ b/arch/x86/mm/init_64.c
33199@@ -150,7 +150,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33200 * around without checking the pgd every time.
33201 */
33202
33203-pteval_t __supported_pte_mask __read_mostly = ~0;
33204+pteval_t __supported_pte_mask __read_only = ~_PAGE_NX;
33205 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33206
33207 int force_personality32;
33208@@ -183,7 +183,12 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33209
33210 for (address = start; address <= end; address += PGDIR_SIZE) {
33211 const pgd_t *pgd_ref = pgd_offset_k(address);
33212+
33213+#ifdef CONFIG_PAX_PER_CPU_PGD
33214+ unsigned long cpu;
33215+#else
33216 struct page *page;
33217+#endif
33218
33219 /*
33220 * When it is called after memory hot remove, pgd_none()
33221@@ -194,6 +199,25 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33222 continue;
33223
33224 spin_lock(&pgd_lock);
33225+
33226+#ifdef CONFIG_PAX_PER_CPU_PGD
33227+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33228+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33229+
33230+ if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33231+ BUG_ON(pgd_page_vaddr(*pgd)
33232+ != pgd_page_vaddr(*pgd_ref));
33233+
33234+ if (removed) {
33235+ if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
33236+ pgd_clear(pgd);
33237+ } else {
33238+ if (pgd_none(*pgd))
33239+ set_pgd(pgd, *pgd_ref);
33240+ }
33241+
33242+ pgd = pgd_offset_cpu(cpu, kernel, address);
33243+#else
33244 list_for_each_entry(page, &pgd_list, lru) {
33245 pgd_t *pgd;
33246 spinlock_t *pgt_lock;
33247@@ -202,6 +226,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33248 /* the pgt_lock only for Xen */
33249 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33250 spin_lock(pgt_lock);
33251+#endif
33252
33253 if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
33254 BUG_ON(pgd_page_vaddr(*pgd)
33255@@ -215,7 +240,10 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
33256 set_pgd(pgd, *pgd_ref);
33257 }
33258
33259+#ifndef CONFIG_PAX_PER_CPU_PGD
33260 spin_unlock(pgt_lock);
33261+#endif
33262+
33263 }
33264 spin_unlock(&pgd_lock);
33265 }
33266@@ -248,7 +276,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33267 {
33268 if (pgd_none(*pgd)) {
33269 pud_t *pud = (pud_t *)spp_getpage();
33270- pgd_populate(&init_mm, pgd, pud);
33271+ pgd_populate_kernel(&init_mm, pgd, pud);
33272 if (pud != pud_offset(pgd, 0))
33273 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33274 pud, pud_offset(pgd, 0));
33275@@ -260,7 +288,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33276 {
33277 if (pud_none(*pud)) {
33278 pmd_t *pmd = (pmd_t *) spp_getpage();
33279- pud_populate(&init_mm, pud, pmd);
33280+ pud_populate_kernel(&init_mm, pud, pmd);
33281 if (pmd != pmd_offset(pud, 0))
33282 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33283 pmd, pmd_offset(pud, 0));
33284@@ -289,7 +317,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33285 pmd = fill_pmd(pud, vaddr);
33286 pte = fill_pte(pmd, vaddr);
33287
33288+ pax_open_kernel();
33289 set_pte(pte, new_pte);
33290+ pax_close_kernel();
33291
33292 /*
33293 * It's enough to flush this one mapping.
33294@@ -351,14 +381,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33295 pgd = pgd_offset_k((unsigned long)__va(phys));
33296 if (pgd_none(*pgd)) {
33297 pud = (pud_t *) spp_getpage();
33298- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33299- _PAGE_USER));
33300+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33301 }
33302 pud = pud_offset(pgd, (unsigned long)__va(phys));
33303 if (pud_none(*pud)) {
33304 pmd = (pmd_t *) spp_getpage();
33305- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33306- _PAGE_USER));
33307+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33308 }
33309 pmd = pmd_offset(pud, phys);
33310 BUG_ON(!pmd_none(*pmd));
33311@@ -599,7 +627,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33312 prot);
33313
33314 spin_lock(&init_mm.page_table_lock);
33315- pud_populate(&init_mm, pud, pmd);
33316+ pud_populate_kernel(&init_mm, pud, pmd);
33317 spin_unlock(&init_mm.page_table_lock);
33318 }
33319 __flush_tlb_all();
33320@@ -640,7 +668,7 @@ kernel_physical_mapping_init(unsigned long start,
33321 page_size_mask);
33322
33323 spin_lock(&init_mm.page_table_lock);
33324- pgd_populate(&init_mm, pgd, pud);
33325+ pgd_populate_kernel(&init_mm, pgd, pud);
33326 spin_unlock(&init_mm.page_table_lock);
33327 pgd_changed = true;
33328 }
33329diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33330index 9ca35fc..4b2b7b7 100644
33331--- a/arch/x86/mm/iomap_32.c
33332+++ b/arch/x86/mm/iomap_32.c
33333@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33334 type = kmap_atomic_idx_push();
33335 idx = type + KM_TYPE_NR * smp_processor_id();
33336 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33337+
33338+ pax_open_kernel();
33339 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33340+ pax_close_kernel();
33341+
33342 arch_flush_lazy_mmu_mode();
33343
33344 return (void *)vaddr;
33345diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33346index fdf617c..b9e85bc 100644
33347--- a/arch/x86/mm/ioremap.c
33348+++ b/arch/x86/mm/ioremap.c
33349@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33350 unsigned long i;
33351
33352 for (i = 0; i < nr_pages; ++i)
33353- if (pfn_valid(start_pfn + i) &&
33354- !PageReserved(pfn_to_page(start_pfn + i)))
33355+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33356+ !PageReserved(pfn_to_page(start_pfn + i))))
33357 return 1;
33358
33359 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33360@@ -283,7 +283,7 @@ EXPORT_SYMBOL(ioremap_prot);
33361 *
33362 * Caller must ensure there is only one unmapping for the same pointer.
33363 */
33364-void iounmap(volatile void __iomem *addr)
33365+void iounmap(const volatile void __iomem *addr)
33366 {
33367 struct vm_struct *p, *o;
33368
33369@@ -332,30 +332,29 @@ EXPORT_SYMBOL(iounmap);
33370 */
33371 void *xlate_dev_mem_ptr(phys_addr_t phys)
33372 {
33373- void *addr;
33374- unsigned long start = phys & PAGE_MASK;
33375-
33376 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33377- if (page_is_ram(start >> PAGE_SHIFT))
33378+ if (page_is_ram(phys >> PAGE_SHIFT))
33379+#ifdef CONFIG_HIGHMEM
33380+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33381+#endif
33382 return __va(phys);
33383
33384- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33385- if (addr)
33386- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
33387-
33388- return addr;
33389+ return (void __force *)ioremap_cache(phys, PAGE_SIZE);
33390 }
33391
33392 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
33393 {
33394 if (page_is_ram(phys >> PAGE_SHIFT))
33395+#ifdef CONFIG_HIGHMEM
33396+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33397+#endif
33398 return;
33399
33400 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33401 return;
33402 }
33403
33404-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33405+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33406
33407 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33408 {
33409@@ -391,8 +390,7 @@ void __init early_ioremap_init(void)
33410 early_ioremap_setup();
33411
33412 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33413- memset(bm_pte, 0, sizeof(bm_pte));
33414- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33415+ pmd_populate_user(&init_mm, pmd, bm_pte);
33416
33417 /*
33418 * The boot-ioremap range spans multiple pmds, for which
33419diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33420index b4f2e7e..96c9c3e 100644
33421--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33422+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33423@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33424 * memory (e.g. tracked pages)? For now, we need this to avoid
33425 * invoking kmemcheck for PnP BIOS calls.
33426 */
33427- if (regs->flags & X86_VM_MASK)
33428+ if (v8086_mode(regs))
33429 return false;
33430- if (regs->cs != __KERNEL_CS)
33431+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33432 return false;
33433
33434 pte = kmemcheck_pte_lookup(address);
33435diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
33436index df4552b..12c129c 100644
33437--- a/arch/x86/mm/mmap.c
33438+++ b/arch/x86/mm/mmap.c
33439@@ -52,7 +52,7 @@ static unsigned long stack_maxrandom_size(void)
33440 * Leave an at least ~128 MB hole with possible stack randomization.
33441 */
33442 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
33443-#define MAX_GAP (TASK_SIZE/6*5)
33444+#define MAX_GAP (pax_task_size/6*5)
33445
33446 static int mmap_is_legacy(void)
33447 {
33448@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
33449 return rnd << PAGE_SHIFT;
33450 }
33451
33452-static unsigned long mmap_base(void)
33453+static unsigned long mmap_base(struct mm_struct *mm)
33454 {
33455 unsigned long gap = rlimit(RLIMIT_STACK);
33456+ unsigned long pax_task_size = TASK_SIZE;
33457+
33458+#ifdef CONFIG_PAX_SEGMEXEC
33459+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33460+ pax_task_size = SEGMEXEC_TASK_SIZE;
33461+#endif
33462
33463 if (gap < MIN_GAP)
33464 gap = MIN_GAP;
33465 else if (gap > MAX_GAP)
33466 gap = MAX_GAP;
33467
33468- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
33469+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
33470 }
33471
33472 /*
33473 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
33474 * does, but not when emulating X86_32
33475 */
33476-static unsigned long mmap_legacy_base(void)
33477+static unsigned long mmap_legacy_base(struct mm_struct *mm)
33478 {
33479- if (mmap_is_ia32())
33480+ if (mmap_is_ia32()) {
33481+
33482+#ifdef CONFIG_PAX_SEGMEXEC
33483+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33484+ return SEGMEXEC_TASK_UNMAPPED_BASE;
33485+ else
33486+#endif
33487+
33488 return TASK_UNMAPPED_BASE;
33489- else
33490+ } else
33491 return TASK_UNMAPPED_BASE + mmap_rnd();
33492 }
33493
33494@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
33495 */
33496 void arch_pick_mmap_layout(struct mm_struct *mm)
33497 {
33498- mm->mmap_legacy_base = mmap_legacy_base();
33499- mm->mmap_base = mmap_base();
33500+ mm->mmap_legacy_base = mmap_legacy_base(mm);
33501+ mm->mmap_base = mmap_base(mm);
33502+
33503+#ifdef CONFIG_PAX_RANDMMAP
33504+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
33505+ mm->mmap_legacy_base += mm->delta_mmap;
33506+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
33507+ }
33508+#endif
33509
33510 if (mmap_is_legacy()) {
33511 mm->mmap_base = mm->mmap_legacy_base;
33512diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
33513index 0057a7a..95c7edd 100644
33514--- a/arch/x86/mm/mmio-mod.c
33515+++ b/arch/x86/mm/mmio-mod.c
33516@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
33517 break;
33518 default:
33519 {
33520- unsigned char *ip = (unsigned char *)instptr;
33521+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
33522 my_trace->opcode = MMIO_UNKNOWN_OP;
33523 my_trace->width = 0;
33524 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
33525@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
33526 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33527 void __iomem *addr)
33528 {
33529- static atomic_t next_id;
33530+ static atomic_unchecked_t next_id;
33531 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
33532 /* These are page-unaligned. */
33533 struct mmiotrace_map map = {
33534@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
33535 .private = trace
33536 },
33537 .phys = offset,
33538- .id = atomic_inc_return(&next_id)
33539+ .id = atomic_inc_return_unchecked(&next_id)
33540 };
33541 map.map_id = trace->id;
33542
33543@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
33544 ioremap_trace_core(offset, size, addr);
33545 }
33546
33547-static void iounmap_trace_core(volatile void __iomem *addr)
33548+static void iounmap_trace_core(const volatile void __iomem *addr)
33549 {
33550 struct mmiotrace_map map = {
33551 .phys = 0,
33552@@ -328,7 +328,7 @@ not_enabled:
33553 }
33554 }
33555
33556-void mmiotrace_iounmap(volatile void __iomem *addr)
33557+void mmiotrace_iounmap(const volatile void __iomem *addr)
33558 {
33559 might_sleep();
33560 if (is_enabled()) /* recheck and proper locking in *_core() */
33561diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
33562index 1a88370..3f598b5 100644
33563--- a/arch/x86/mm/numa.c
33564+++ b/arch/x86/mm/numa.c
33565@@ -499,7 +499,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
33566 }
33567 }
33568
33569-static int __init numa_register_memblks(struct numa_meminfo *mi)
33570+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
33571 {
33572 unsigned long uninitialized_var(pfn_align);
33573 int i, nid;
33574diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
33575index 536ea2f..f42c293 100644
33576--- a/arch/x86/mm/pageattr.c
33577+++ b/arch/x86/mm/pageattr.c
33578@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33579 */
33580 #ifdef CONFIG_PCI_BIOS
33581 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
33582- pgprot_val(forbidden) |= _PAGE_NX;
33583+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33584 #endif
33585
33586 /*
33587@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33588 * Does not cover __inittext since that is gone later on. On
33589 * 64bit we do not enforce !NX on the low mapping
33590 */
33591- if (within(address, (unsigned long)_text, (unsigned long)_etext))
33592- pgprot_val(forbidden) |= _PAGE_NX;
33593+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
33594+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33595
33596+#ifdef CONFIG_DEBUG_RODATA
33597 /*
33598 * The .rodata section needs to be read-only. Using the pfn
33599 * catches all aliases.
33600@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33601 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
33602 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
33603 pgprot_val(forbidden) |= _PAGE_RW;
33604+#endif
33605
33606 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
33607 /*
33608@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
33609 }
33610 #endif
33611
33612+#ifdef CONFIG_PAX_KERNEXEC
33613+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
33614+ pgprot_val(forbidden) |= _PAGE_RW;
33615+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
33616+ }
33617+#endif
33618+
33619 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
33620
33621 return prot;
33622@@ -440,23 +449,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
33623 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
33624 {
33625 /* change init_mm */
33626+ pax_open_kernel();
33627 set_pte_atomic(kpte, pte);
33628+
33629 #ifdef CONFIG_X86_32
33630 if (!SHARED_KERNEL_PMD) {
33631+
33632+#ifdef CONFIG_PAX_PER_CPU_PGD
33633+ unsigned long cpu;
33634+#else
33635 struct page *page;
33636+#endif
33637
33638+#ifdef CONFIG_PAX_PER_CPU_PGD
33639+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33640+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
33641+#else
33642 list_for_each_entry(page, &pgd_list, lru) {
33643- pgd_t *pgd;
33644+ pgd_t *pgd = (pgd_t *)page_address(page);
33645+#endif
33646+
33647 pud_t *pud;
33648 pmd_t *pmd;
33649
33650- pgd = (pgd_t *)page_address(page) + pgd_index(address);
33651+ pgd += pgd_index(address);
33652 pud = pud_offset(pgd, address);
33653 pmd = pmd_offset(pud, address);
33654 set_pte_atomic((pte_t *)pmd, pte);
33655 }
33656 }
33657 #endif
33658+ pax_close_kernel();
33659 }
33660
33661 static int
33662diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
33663index 7ac6869..c0ba541 100644
33664--- a/arch/x86/mm/pat.c
33665+++ b/arch/x86/mm/pat.c
33666@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
33667 unsigned long pg_flags = pg->flags & _PGMT_MASK;
33668
33669 if (pg_flags == _PGMT_DEFAULT)
33670- return -1;
33671+ return _PAGE_CACHE_MODE_NUM;
33672 else if (pg_flags == _PGMT_WC)
33673 return _PAGE_CACHE_MODE_WC;
33674 else if (pg_flags == _PGMT_UC_MINUS)
33675@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
33676
33677 page = pfn_to_page(pfn);
33678 type = get_page_memtype(page);
33679- if (type != -1) {
33680+ if (type != _PAGE_CACHE_MODE_NUM) {
33681 pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
33682 start, end - 1, type, req_type);
33683 if (new_type)
33684@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
33685
33686 if (!entry) {
33687 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
33688- current->comm, current->pid, start, end - 1);
33689+ current->comm, task_pid_nr(current), start, end - 1);
33690 return -EINVAL;
33691 }
33692
33693@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
33694 page = pfn_to_page(paddr >> PAGE_SHIFT);
33695 rettype = get_page_memtype(page);
33696 /*
33697- * -1 from get_page_memtype() implies RAM page is in its
33698+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
33699 * default state and not reserved, and hence of type WB
33700 */
33701- if (rettype == -1)
33702+ if (rettype == _PAGE_CACHE_MODE_NUM)
33703 rettype = _PAGE_CACHE_MODE_WB;
33704
33705 return rettype;
33706@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33707
33708 while (cursor < to) {
33709 if (!devmem_is_allowed(pfn)) {
33710- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
33711- current->comm, from, to - 1);
33712+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
33713+ current->comm, from, to - 1, cursor);
33714 return 0;
33715 }
33716 cursor += PAGE_SIZE;
33717@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
33718 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
33719 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
33720 "for [mem %#010Lx-%#010Lx]\n",
33721- current->comm, current->pid,
33722+ current->comm, task_pid_nr(current),
33723 cattr_name(pcm),
33724 base, (unsigned long long)(base + size-1));
33725 return -EINVAL;
33726@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33727 pcm = lookup_memtype(paddr);
33728 if (want_pcm != pcm) {
33729 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
33730- current->comm, current->pid,
33731+ current->comm, task_pid_nr(current),
33732 cattr_name(want_pcm),
33733 (unsigned long long)paddr,
33734 (unsigned long long)(paddr + size - 1),
33735@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
33736 free_memtype(paddr, paddr + size);
33737 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
33738 " for [mem %#010Lx-%#010Lx], got %s\n",
33739- current->comm, current->pid,
33740+ current->comm, task_pid_nr(current),
33741 cattr_name(want_pcm),
33742 (unsigned long long)paddr,
33743 (unsigned long long)(paddr + size - 1),
33744diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
33745index 6582adc..fcc5d0b 100644
33746--- a/arch/x86/mm/pat_rbtree.c
33747+++ b/arch/x86/mm/pat_rbtree.c
33748@@ -161,7 +161,7 @@ success:
33749
33750 failure:
33751 printk(KERN_INFO "%s:%d conflicting memory types "
33752- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
33753+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
33754 end, cattr_name(found_type), cattr_name(match->type));
33755 return -EBUSY;
33756 }
33757diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
33758index 9f0614d..92ae64a 100644
33759--- a/arch/x86/mm/pf_in.c
33760+++ b/arch/x86/mm/pf_in.c
33761@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
33762 int i;
33763 enum reason_type rv = OTHERS;
33764
33765- p = (unsigned char *)ins_addr;
33766+ p = (unsigned char *)ktla_ktva(ins_addr);
33767 p += skip_prefix(p, &prf);
33768 p += get_opcode(p, &opcode);
33769
33770@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
33771 struct prefix_bits prf;
33772 int i;
33773
33774- p = (unsigned char *)ins_addr;
33775+ p = (unsigned char *)ktla_ktva(ins_addr);
33776 p += skip_prefix(p, &prf);
33777 p += get_opcode(p, &opcode);
33778
33779@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
33780 struct prefix_bits prf;
33781 int i;
33782
33783- p = (unsigned char *)ins_addr;
33784+ p = (unsigned char *)ktla_ktva(ins_addr);
33785 p += skip_prefix(p, &prf);
33786 p += get_opcode(p, &opcode);
33787
33788@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
33789 struct prefix_bits prf;
33790 int i;
33791
33792- p = (unsigned char *)ins_addr;
33793+ p = (unsigned char *)ktla_ktva(ins_addr);
33794 p += skip_prefix(p, &prf);
33795 p += get_opcode(p, &opcode);
33796 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
33797@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
33798 struct prefix_bits prf;
33799 int i;
33800
33801- p = (unsigned char *)ins_addr;
33802+ p = (unsigned char *)ktla_ktva(ins_addr);
33803 p += skip_prefix(p, &prf);
33804 p += get_opcode(p, &opcode);
33805 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
33806diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
33807index 6fb6927..634b0f7 100644
33808--- a/arch/x86/mm/pgtable.c
33809+++ b/arch/x86/mm/pgtable.c
33810@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
33811 list_del(&page->lru);
33812 }
33813
33814-#define UNSHARED_PTRS_PER_PGD \
33815- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33816+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33817+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
33818
33819+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
33820+{
33821+ unsigned int count = USER_PGD_PTRS;
33822
33823+ if (!pax_user_shadow_base)
33824+ return;
33825+
33826+ while (count--)
33827+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
33828+}
33829+#endif
33830+
33831+#ifdef CONFIG_PAX_PER_CPU_PGD
33832+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
33833+{
33834+ unsigned int count = USER_PGD_PTRS;
33835+
33836+ while (count--) {
33837+ pgd_t pgd;
33838+
33839+#ifdef CONFIG_X86_64
33840+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
33841+#else
33842+ pgd = *src++;
33843+#endif
33844+
33845+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
33846+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
33847+#endif
33848+
33849+ *dst++ = pgd;
33850+ }
33851+
33852+}
33853+#endif
33854+
33855+#ifdef CONFIG_X86_64
33856+#define pxd_t pud_t
33857+#define pyd_t pgd_t
33858+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
33859+#define pgtable_pxd_page_ctor(page) true
33860+#define pgtable_pxd_page_dtor(page) do {} while (0)
33861+#define pxd_free(mm, pud) pud_free((mm), (pud))
33862+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
33863+#define pyd_offset(mm, address) pgd_offset((mm), (address))
33864+#define PYD_SIZE PGDIR_SIZE
33865+#else
33866+#define pxd_t pmd_t
33867+#define pyd_t pud_t
33868+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
33869+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
33870+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
33871+#define pxd_free(mm, pud) pmd_free((mm), (pud))
33872+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
33873+#define pyd_offset(mm, address) pud_offset((mm), (address))
33874+#define PYD_SIZE PUD_SIZE
33875+#endif
33876+
33877+#ifdef CONFIG_PAX_PER_CPU_PGD
33878+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
33879+static inline void pgd_dtor(pgd_t *pgd) {}
33880+#else
33881 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
33882 {
33883 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
33884@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
33885 pgd_list_del(pgd);
33886 spin_unlock(&pgd_lock);
33887 }
33888+#endif
33889
33890 /*
33891 * List of all pgd's needed for non-PAE so it can invalidate entries
33892@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
33893 * -- nyc
33894 */
33895
33896-#ifdef CONFIG_X86_PAE
33897+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
33898 /*
33899 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
33900 * updating the top-level pagetable entries to guarantee the
33901@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
33902 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
33903 * and initialize the kernel pmds here.
33904 */
33905-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
33906+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
33907
33908 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33909 {
33910@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
33911 */
33912 flush_tlb_mm(mm);
33913 }
33914+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
33915+#define PREALLOCATED_PXDS USER_PGD_PTRS
33916 #else /* !CONFIG_X86_PAE */
33917
33918 /* No need to prepopulate any pagetable entries in non-PAE modes. */
33919-#define PREALLOCATED_PMDS 0
33920+#define PREALLOCATED_PXDS 0
33921
33922 #endif /* CONFIG_X86_PAE */
33923
33924-static void free_pmds(pmd_t *pmds[])
33925+static void free_pxds(pxd_t *pxds[])
33926 {
33927 int i;
33928
33929- for(i = 0; i < PREALLOCATED_PMDS; i++)
33930- if (pmds[i]) {
33931- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
33932- free_page((unsigned long)pmds[i]);
33933+ for(i = 0; i < PREALLOCATED_PXDS; i++)
33934+ if (pxds[i]) {
33935+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
33936+ free_page((unsigned long)pxds[i]);
33937 }
33938 }
33939
33940-static int preallocate_pmds(pmd_t *pmds[])
33941+static int preallocate_pxds(pxd_t *pxds[])
33942 {
33943 int i;
33944 bool failed = false;
33945
33946- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33947- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
33948- if (!pmd)
33949+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33950+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
33951+ if (!pxd)
33952 failed = true;
33953- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
33954- free_page((unsigned long)pmd);
33955- pmd = NULL;
33956+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
33957+ free_page((unsigned long)pxd);
33958+ pxd = NULL;
33959 failed = true;
33960 }
33961- pmds[i] = pmd;
33962+ pxds[i] = pxd;
33963 }
33964
33965 if (failed) {
33966- free_pmds(pmds);
33967+ free_pxds(pxds);
33968 return -ENOMEM;
33969 }
33970
33971@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
33972 * preallocate which never got a corresponding vma will need to be
33973 * freed manually.
33974 */
33975-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
33976+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
33977 {
33978 int i;
33979
33980- for(i = 0; i < PREALLOCATED_PMDS; i++) {
33981+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
33982 pgd_t pgd = pgdp[i];
33983
33984 if (pgd_val(pgd) != 0) {
33985- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
33986+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
33987
33988- pgdp[i] = native_make_pgd(0);
33989+ set_pgd(pgdp + i, native_make_pgd(0));
33990
33991- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
33992- pmd_free(mm, pmd);
33993+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
33994+ pxd_free(mm, pxd);
33995 }
33996 }
33997 }
33998
33999-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34000+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34001 {
34002- pud_t *pud;
34003+ pyd_t *pyd;
34004 int i;
34005
34006- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34007+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34008 return;
34009
34010- pud = pud_offset(pgd, 0);
34011-
34012- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34013- pmd_t *pmd = pmds[i];
34014+#ifdef CONFIG_X86_64
34015+ pyd = pyd_offset(mm, 0L);
34016+#else
34017+ pyd = pyd_offset(pgd, 0L);
34018+#endif
34019
34020+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34021+ pxd_t *pxd = pxds[i];
34022 if (i >= KERNEL_PGD_BOUNDARY)
34023- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34024- sizeof(pmd_t) * PTRS_PER_PMD);
34025+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34026+ sizeof(pxd_t) * PTRS_PER_PMD);
34027
34028- pud_populate(mm, pud, pmd);
34029+ pyd_populate(mm, pyd, pxd);
34030 }
34031 }
34032
34033 pgd_t *pgd_alloc(struct mm_struct *mm)
34034 {
34035 pgd_t *pgd;
34036- pmd_t *pmds[PREALLOCATED_PMDS];
34037+ pxd_t *pxds[PREALLOCATED_PXDS];
34038
34039 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34040
34041@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34042
34043 mm->pgd = pgd;
34044
34045- if (preallocate_pmds(pmds) != 0)
34046+ if (preallocate_pxds(pxds) != 0)
34047 goto out_free_pgd;
34048
34049 if (paravirt_pgd_alloc(mm) != 0)
34050- goto out_free_pmds;
34051+ goto out_free_pxds;
34052
34053 /*
34054 * Make sure that pre-populating the pmds is atomic with
34055@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34056 spin_lock(&pgd_lock);
34057
34058 pgd_ctor(mm, pgd);
34059- pgd_prepopulate_pmd(mm, pgd, pmds);
34060+ pgd_prepopulate_pxd(mm, pgd, pxds);
34061
34062 spin_unlock(&pgd_lock);
34063
34064 return pgd;
34065
34066-out_free_pmds:
34067- free_pmds(pmds);
34068+out_free_pxds:
34069+ free_pxds(pxds);
34070 out_free_pgd:
34071 free_page((unsigned long)pgd);
34072 out:
34073@@ -313,7 +380,7 @@ out:
34074
34075 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34076 {
34077- pgd_mop_up_pmds(mm, pgd);
34078+ pgd_mop_up_pxds(mm, pgd);
34079 pgd_dtor(pgd);
34080 paravirt_pgd_free(mm, pgd);
34081 free_page((unsigned long)pgd);
34082diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34083index 75cc097..79a097f 100644
34084--- a/arch/x86/mm/pgtable_32.c
34085+++ b/arch/x86/mm/pgtable_32.c
34086@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34087 return;
34088 }
34089 pte = pte_offset_kernel(pmd, vaddr);
34090+
34091+ pax_open_kernel();
34092 if (pte_val(pteval))
34093 set_pte_at(&init_mm, vaddr, pte, pteval);
34094 else
34095 pte_clear(&init_mm, vaddr, pte);
34096+ pax_close_kernel();
34097
34098 /*
34099 * It's enough to flush this one mapping.
34100diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34101index e666cbb..61788c45 100644
34102--- a/arch/x86/mm/physaddr.c
34103+++ b/arch/x86/mm/physaddr.c
34104@@ -10,7 +10,7 @@
34105 #ifdef CONFIG_X86_64
34106
34107 #ifdef CONFIG_DEBUG_VIRTUAL
34108-unsigned long __phys_addr(unsigned long x)
34109+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34110 {
34111 unsigned long y = x - __START_KERNEL_map;
34112
34113@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34114 #else
34115
34116 #ifdef CONFIG_DEBUG_VIRTUAL
34117-unsigned long __phys_addr(unsigned long x)
34118+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34119 {
34120 unsigned long phys_addr = x - PAGE_OFFSET;
34121 /* VMALLOC_* aren't constants */
34122diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34123index 90555bf..f5f1828 100644
34124--- a/arch/x86/mm/setup_nx.c
34125+++ b/arch/x86/mm/setup_nx.c
34126@@ -5,8 +5,10 @@
34127 #include <asm/pgtable.h>
34128 #include <asm/proto.h>
34129
34130+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34131 static int disable_nx;
34132
34133+#ifndef CONFIG_PAX_PAGEEXEC
34134 /*
34135 * noexec = on|off
34136 *
34137@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34138 return 0;
34139 }
34140 early_param("noexec", noexec_setup);
34141+#endif
34142+
34143+#endif
34144
34145 void x86_configure_nx(void)
34146 {
34147+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34148 if (cpu_has_nx && !disable_nx)
34149 __supported_pte_mask |= _PAGE_NX;
34150 else
34151+#endif
34152 __supported_pte_mask &= ~_PAGE_NX;
34153 }
34154
34155diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34156index ee61c36..e6fedeb 100644
34157--- a/arch/x86/mm/tlb.c
34158+++ b/arch/x86/mm/tlb.c
34159@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34160 BUG();
34161 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34162 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34163+
34164+#ifndef CONFIG_PAX_PER_CPU_PGD
34165 load_cr3(swapper_pg_dir);
34166+#endif
34167+
34168 /*
34169 * This gets called in the idle path where RCU
34170 * functions differently. Tracing normally
34171diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34172new file mode 100644
34173index 0000000..dace51c
34174--- /dev/null
34175+++ b/arch/x86/mm/uderef_64.c
34176@@ -0,0 +1,37 @@
34177+#include <linux/mm.h>
34178+#include <asm/pgtable.h>
34179+#include <asm/uaccess.h>
34180+
34181+#ifdef CONFIG_PAX_MEMORY_UDEREF
34182+/* PaX: due to the special call convention these functions must
34183+ * - remain leaf functions under all configurations,
34184+ * - never be called directly, only dereferenced from the wrappers.
34185+ */
34186+void __pax_open_userland(void)
34187+{
34188+ unsigned int cpu;
34189+
34190+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34191+ return;
34192+
34193+ cpu = raw_get_cpu();
34194+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34195+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34196+ raw_put_cpu_no_resched();
34197+}
34198+EXPORT_SYMBOL(__pax_open_userland);
34199+
34200+void __pax_close_userland(void)
34201+{
34202+ unsigned int cpu;
34203+
34204+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34205+ return;
34206+
34207+ cpu = raw_get_cpu();
34208+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34209+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34210+ raw_put_cpu_no_resched();
34211+}
34212+EXPORT_SYMBOL(__pax_close_userland);
34213+#endif
34214diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34215index 6440221..f84b5c7 100644
34216--- a/arch/x86/net/bpf_jit.S
34217+++ b/arch/x86/net/bpf_jit.S
34218@@ -9,6 +9,7 @@
34219 */
34220 #include <linux/linkage.h>
34221 #include <asm/dwarf2.h>
34222+#include <asm/alternative-asm.h>
34223
34224 /*
34225 * Calling convention :
34226@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34227 jle bpf_slow_path_word
34228 mov (SKBDATA,%rsi),%eax
34229 bswap %eax /* ntohl() */
34230+ pax_force_retaddr
34231 ret
34232
34233 sk_load_half:
34234@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34235 jle bpf_slow_path_half
34236 movzwl (SKBDATA,%rsi),%eax
34237 rol $8,%ax # ntohs()
34238+ pax_force_retaddr
34239 ret
34240
34241 sk_load_byte:
34242@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34243 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34244 jle bpf_slow_path_byte
34245 movzbl (SKBDATA,%rsi),%eax
34246+ pax_force_retaddr
34247 ret
34248
34249 /* rsi contains offset and can be scratched */
34250@@ -90,6 +94,7 @@ bpf_slow_path_word:
34251 js bpf_error
34252 mov - MAX_BPF_STACK + 32(%rbp),%eax
34253 bswap %eax
34254+ pax_force_retaddr
34255 ret
34256
34257 bpf_slow_path_half:
34258@@ -98,12 +103,14 @@ bpf_slow_path_half:
34259 mov - MAX_BPF_STACK + 32(%rbp),%ax
34260 rol $8,%ax
34261 movzwl %ax,%eax
34262+ pax_force_retaddr
34263 ret
34264
34265 bpf_slow_path_byte:
34266 bpf_slow_path_common(1)
34267 js bpf_error
34268 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34269+ pax_force_retaddr
34270 ret
34271
34272 #define sk_negative_common(SIZE) \
34273@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34274 sk_negative_common(4)
34275 mov (%rax), %eax
34276 bswap %eax
34277+ pax_force_retaddr
34278 ret
34279
34280 bpf_slow_path_half_neg:
34281@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34282 mov (%rax),%ax
34283 rol $8,%ax
34284 movzwl %ax,%eax
34285+ pax_force_retaddr
34286 ret
34287
34288 bpf_slow_path_byte_neg:
34289@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34290 .globl sk_load_byte_negative_offset
34291 sk_negative_common(1)
34292 movzbl (%rax), %eax
34293+ pax_force_retaddr
34294 ret
34295
34296 bpf_error:
34297@@ -156,4 +166,5 @@ bpf_error:
34298 mov - MAX_BPF_STACK + 16(%rbp),%r14
34299 mov - MAX_BPF_STACK + 24(%rbp),%r15
34300 leaveq
34301+ pax_force_retaddr
34302 ret
34303diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34304index 9875143..00f6656 100644
34305--- a/arch/x86/net/bpf_jit_comp.c
34306+++ b/arch/x86/net/bpf_jit_comp.c
34307@@ -13,7 +13,11 @@
34308 #include <linux/if_vlan.h>
34309 #include <asm/cacheflush.h>
34310
34311+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34312+int bpf_jit_enable __read_only;
34313+#else
34314 int bpf_jit_enable __read_mostly;
34315+#endif
34316
34317 /*
34318 * assembly code in arch/x86/net/bpf_jit.S
34319@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
34320 static void jit_fill_hole(void *area, unsigned int size)
34321 {
34322 /* fill whole space with int3 instructions */
34323+ pax_open_kernel();
34324 memset(area, 0xcc, size);
34325+ pax_close_kernel();
34326 }
34327
34328 struct jit_context {
34329@@ -896,7 +902,9 @@ common_load:
34330 pr_err("bpf_jit_compile fatal error\n");
34331 return -EFAULT;
34332 }
34333+ pax_open_kernel();
34334 memcpy(image + proglen, temp, ilen);
34335+ pax_close_kernel();
34336 }
34337 proglen += ilen;
34338 addrs[i] = proglen;
34339@@ -968,7 +976,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34340
34341 if (image) {
34342 bpf_flush_icache(header, image + proglen);
34343- set_memory_ro((unsigned long)header, header->pages);
34344 prog->bpf_func = (void *)image;
34345 prog->jited = true;
34346 }
34347@@ -981,12 +988,8 @@ void bpf_jit_free(struct bpf_prog *fp)
34348 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34349 struct bpf_binary_header *header = (void *)addr;
34350
34351- if (!fp->jited)
34352- goto free_filter;
34353+ if (fp->jited)
34354+ bpf_jit_binary_free(header);
34355
34356- set_memory_rw(addr, header->pages);
34357- bpf_jit_binary_free(header);
34358-
34359-free_filter:
34360 bpf_prog_unlock_free(fp);
34361 }
34362diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34363index 5d04be5..2beeaa2 100644
34364--- a/arch/x86/oprofile/backtrace.c
34365+++ b/arch/x86/oprofile/backtrace.c
34366@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34367 struct stack_frame_ia32 *fp;
34368 unsigned long bytes;
34369
34370- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34371+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34372 if (bytes != 0)
34373 return NULL;
34374
34375- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34376+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34377
34378 oprofile_add_trace(bufhead[0].return_address);
34379
34380@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34381 struct stack_frame bufhead[2];
34382 unsigned long bytes;
34383
34384- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34385+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34386 if (bytes != 0)
34387 return NULL;
34388
34389@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34390 {
34391 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34392
34393- if (!user_mode_vm(regs)) {
34394+ if (!user_mode(regs)) {
34395 unsigned long stack = kernel_stack_pointer(regs);
34396 if (depth)
34397 dump_trace(NULL, regs, (unsigned long *)stack, 0,
34398diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
34399index 1d2e639..f6ef82a 100644
34400--- a/arch/x86/oprofile/nmi_int.c
34401+++ b/arch/x86/oprofile/nmi_int.c
34402@@ -23,6 +23,7 @@
34403 #include <asm/nmi.h>
34404 #include <asm/msr.h>
34405 #include <asm/apic.h>
34406+#include <asm/pgtable.h>
34407
34408 #include "op_counter.h"
34409 #include "op_x86_model.h"
34410@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
34411 if (ret)
34412 return ret;
34413
34414- if (!model->num_virt_counters)
34415- model->num_virt_counters = model->num_counters;
34416+ if (!model->num_virt_counters) {
34417+ pax_open_kernel();
34418+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
34419+ pax_close_kernel();
34420+ }
34421
34422 mux_init(ops);
34423
34424diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
34425index 50d86c0..7985318 100644
34426--- a/arch/x86/oprofile/op_model_amd.c
34427+++ b/arch/x86/oprofile/op_model_amd.c
34428@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
34429 num_counters = AMD64_NUM_COUNTERS;
34430 }
34431
34432- op_amd_spec.num_counters = num_counters;
34433- op_amd_spec.num_controls = num_counters;
34434- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34435+ pax_open_kernel();
34436+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
34437+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
34438+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
34439+ pax_close_kernel();
34440
34441 return 0;
34442 }
34443diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
34444index d90528e..0127e2b 100644
34445--- a/arch/x86/oprofile/op_model_ppro.c
34446+++ b/arch/x86/oprofile/op_model_ppro.c
34447@@ -19,6 +19,7 @@
34448 #include <asm/msr.h>
34449 #include <asm/apic.h>
34450 #include <asm/nmi.h>
34451+#include <asm/pgtable.h>
34452
34453 #include "op_x86_model.h"
34454 #include "op_counter.h"
34455@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
34456
34457 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
34458
34459- op_arch_perfmon_spec.num_counters = num_counters;
34460- op_arch_perfmon_spec.num_controls = num_counters;
34461+ pax_open_kernel();
34462+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
34463+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
34464+ pax_close_kernel();
34465 }
34466
34467 static int arch_perfmon_init(struct oprofile_operations *ignore)
34468diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
34469index 71e8a67..6a313bb 100644
34470--- a/arch/x86/oprofile/op_x86_model.h
34471+++ b/arch/x86/oprofile/op_x86_model.h
34472@@ -52,7 +52,7 @@ struct op_x86_model_spec {
34473 void (*switch_ctrl)(struct op_x86_model_spec const *model,
34474 struct op_msrs const * const msrs);
34475 #endif
34476-};
34477+} __do_const;
34478
34479 struct op_counter_config;
34480
34481diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
34482index 44b9271..4c5a988 100644
34483--- a/arch/x86/pci/intel_mid_pci.c
34484+++ b/arch/x86/pci/intel_mid_pci.c
34485@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
34486 pci_mmcfg_late_init();
34487 pcibios_enable_irq = intel_mid_pci_irq_enable;
34488 pcibios_disable_irq = intel_mid_pci_irq_disable;
34489- pci_root_ops = intel_mid_pci_ops;
34490+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
34491 pci_soc_mode = 1;
34492 /* Continue with standard init */
34493 return 1;
34494diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
34495index 5dc6ca5..25c03f5 100644
34496--- a/arch/x86/pci/irq.c
34497+++ b/arch/x86/pci/irq.c
34498@@ -51,7 +51,7 @@ struct irq_router {
34499 struct irq_router_handler {
34500 u16 vendor;
34501 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
34502-};
34503+} __do_const;
34504
34505 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
34506 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
34507@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
34508 return 0;
34509 }
34510
34511-static __initdata struct irq_router_handler pirq_routers[] = {
34512+static __initconst const struct irq_router_handler pirq_routers[] = {
34513 { PCI_VENDOR_ID_INTEL, intel_router_probe },
34514 { PCI_VENDOR_ID_AL, ali_router_probe },
34515 { PCI_VENDOR_ID_ITE, ite_router_probe },
34516@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
34517 static void __init pirq_find_router(struct irq_router *r)
34518 {
34519 struct irq_routing_table *rt = pirq_table;
34520- struct irq_router_handler *h;
34521+ const struct irq_router_handler *h;
34522
34523 #ifdef CONFIG_PCI_BIOS
34524 if (!rt->signature) {
34525@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
34526 return 0;
34527 }
34528
34529-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
34530+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
34531 {
34532 .callback = fix_broken_hp_bios_irq9,
34533 .ident = "HP Pavilion N5400 Series Laptop",
34534diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
34535index 9b83b90..4112152 100644
34536--- a/arch/x86/pci/pcbios.c
34537+++ b/arch/x86/pci/pcbios.c
34538@@ -79,7 +79,7 @@ union bios32 {
34539 static struct {
34540 unsigned long address;
34541 unsigned short segment;
34542-} bios32_indirect __initdata = { 0, __KERNEL_CS };
34543+} bios32_indirect __initconst = { 0, __PCIBIOS_CS };
34544
34545 /*
34546 * Returns the entry point for the given service, NULL on error
34547@@ -92,37 +92,80 @@ static unsigned long __init bios32_service(unsigned long service)
34548 unsigned long length; /* %ecx */
34549 unsigned long entry; /* %edx */
34550 unsigned long flags;
34551+ struct desc_struct d, *gdt;
34552
34553 local_irq_save(flags);
34554- __asm__("lcall *(%%edi); cld"
34555+
34556+ gdt = get_cpu_gdt_table(smp_processor_id());
34557+
34558+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
34559+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34560+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
34561+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34562+
34563+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
34564 : "=a" (return_code),
34565 "=b" (address),
34566 "=c" (length),
34567 "=d" (entry)
34568 : "0" (service),
34569 "1" (0),
34570- "D" (&bios32_indirect));
34571+ "D" (&bios32_indirect),
34572+ "r"(__PCIBIOS_DS)
34573+ : "memory");
34574+
34575+ pax_open_kernel();
34576+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
34577+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
34578+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
34579+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
34580+ pax_close_kernel();
34581+
34582 local_irq_restore(flags);
34583
34584 switch (return_code) {
34585- case 0:
34586- return address + entry;
34587- case 0x80: /* Not present */
34588- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34589- return 0;
34590- default: /* Shouldn't happen */
34591- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34592- service, return_code);
34593+ case 0: {
34594+ int cpu;
34595+ unsigned char flags;
34596+
34597+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
34598+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
34599+ printk(KERN_WARNING "bios32_service: not valid\n");
34600 return 0;
34601+ }
34602+ address = address + PAGE_OFFSET;
34603+ length += 16UL; /* some BIOSs underreport this... */
34604+ flags = 4;
34605+ if (length >= 64*1024*1024) {
34606+ length >>= PAGE_SHIFT;
34607+ flags |= 8;
34608+ }
34609+
34610+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
34611+ gdt = get_cpu_gdt_table(cpu);
34612+ pack_descriptor(&d, address, length, 0x9b, flags);
34613+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
34614+ pack_descriptor(&d, address, length, 0x93, flags);
34615+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
34616+ }
34617+ return entry;
34618+ }
34619+ case 0x80: /* Not present */
34620+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
34621+ return 0;
34622+ default: /* Shouldn't happen */
34623+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
34624+ service, return_code);
34625+ return 0;
34626 }
34627 }
34628
34629 static struct {
34630 unsigned long address;
34631 unsigned short segment;
34632-} pci_indirect = { 0, __KERNEL_CS };
34633+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
34634
34635-static int pci_bios_present;
34636+static int pci_bios_present __read_only;
34637
34638 static int __init check_pcibios(void)
34639 {
34640@@ -131,11 +174,13 @@ static int __init check_pcibios(void)
34641 unsigned long flags, pcibios_entry;
34642
34643 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
34644- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
34645+ pci_indirect.address = pcibios_entry;
34646
34647 local_irq_save(flags);
34648- __asm__(
34649- "lcall *(%%edi); cld\n\t"
34650+ __asm__("movw %w6, %%ds\n\t"
34651+ "lcall *%%ss:(%%edi); cld\n\t"
34652+ "push %%ss\n\t"
34653+ "pop %%ds\n\t"
34654 "jc 1f\n\t"
34655 "xor %%ah, %%ah\n"
34656 "1:"
34657@@ -144,7 +189,8 @@ static int __init check_pcibios(void)
34658 "=b" (ebx),
34659 "=c" (ecx)
34660 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
34661- "D" (&pci_indirect)
34662+ "D" (&pci_indirect),
34663+ "r" (__PCIBIOS_DS)
34664 : "memory");
34665 local_irq_restore(flags);
34666
34667@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34668
34669 switch (len) {
34670 case 1:
34671- __asm__("lcall *(%%esi); cld\n\t"
34672+ __asm__("movw %w6, %%ds\n\t"
34673+ "lcall *%%ss:(%%esi); cld\n\t"
34674+ "push %%ss\n\t"
34675+ "pop %%ds\n\t"
34676 "jc 1f\n\t"
34677 "xor %%ah, %%ah\n"
34678 "1:"
34679@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34680 : "1" (PCIBIOS_READ_CONFIG_BYTE),
34681 "b" (bx),
34682 "D" ((long)reg),
34683- "S" (&pci_indirect));
34684+ "S" (&pci_indirect),
34685+ "r" (__PCIBIOS_DS));
34686 /*
34687 * Zero-extend the result beyond 8 bits, do not trust the
34688 * BIOS having done it:
34689@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34690 *value &= 0xff;
34691 break;
34692 case 2:
34693- __asm__("lcall *(%%esi); cld\n\t"
34694+ __asm__("movw %w6, %%ds\n\t"
34695+ "lcall *%%ss:(%%esi); cld\n\t"
34696+ "push %%ss\n\t"
34697+ "pop %%ds\n\t"
34698 "jc 1f\n\t"
34699 "xor %%ah, %%ah\n"
34700 "1:"
34701@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34702 : "1" (PCIBIOS_READ_CONFIG_WORD),
34703 "b" (bx),
34704 "D" ((long)reg),
34705- "S" (&pci_indirect));
34706+ "S" (&pci_indirect),
34707+ "r" (__PCIBIOS_DS));
34708 /*
34709 * Zero-extend the result beyond 16 bits, do not trust the
34710 * BIOS having done it:
34711@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34712 *value &= 0xffff;
34713 break;
34714 case 4:
34715- __asm__("lcall *(%%esi); cld\n\t"
34716+ __asm__("movw %w6, %%ds\n\t"
34717+ "lcall *%%ss:(%%esi); cld\n\t"
34718+ "push %%ss\n\t"
34719+ "pop %%ds\n\t"
34720 "jc 1f\n\t"
34721 "xor %%ah, %%ah\n"
34722 "1:"
34723@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
34724 : "1" (PCIBIOS_READ_CONFIG_DWORD),
34725 "b" (bx),
34726 "D" ((long)reg),
34727- "S" (&pci_indirect));
34728+ "S" (&pci_indirect),
34729+ "r" (__PCIBIOS_DS));
34730 break;
34731 }
34732
34733@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34734
34735 switch (len) {
34736 case 1:
34737- __asm__("lcall *(%%esi); cld\n\t"
34738+ __asm__("movw %w6, %%ds\n\t"
34739+ "lcall *%%ss:(%%esi); cld\n\t"
34740+ "push %%ss\n\t"
34741+ "pop %%ds\n\t"
34742 "jc 1f\n\t"
34743 "xor %%ah, %%ah\n"
34744 "1:"
34745@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34746 "c" (value),
34747 "b" (bx),
34748 "D" ((long)reg),
34749- "S" (&pci_indirect));
34750+ "S" (&pci_indirect),
34751+ "r" (__PCIBIOS_DS));
34752 break;
34753 case 2:
34754- __asm__("lcall *(%%esi); cld\n\t"
34755+ __asm__("movw %w6, %%ds\n\t"
34756+ "lcall *%%ss:(%%esi); cld\n\t"
34757+ "push %%ss\n\t"
34758+ "pop %%ds\n\t"
34759 "jc 1f\n\t"
34760 "xor %%ah, %%ah\n"
34761 "1:"
34762@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34763 "c" (value),
34764 "b" (bx),
34765 "D" ((long)reg),
34766- "S" (&pci_indirect));
34767+ "S" (&pci_indirect),
34768+ "r" (__PCIBIOS_DS));
34769 break;
34770 case 4:
34771- __asm__("lcall *(%%esi); cld\n\t"
34772+ __asm__("movw %w6, %%ds\n\t"
34773+ "lcall *%%ss:(%%esi); cld\n\t"
34774+ "push %%ss\n\t"
34775+ "pop %%ds\n\t"
34776 "jc 1f\n\t"
34777 "xor %%ah, %%ah\n"
34778 "1:"
34779@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
34780 "c" (value),
34781 "b" (bx),
34782 "D" ((long)reg),
34783- "S" (&pci_indirect));
34784+ "S" (&pci_indirect),
34785+ "r" (__PCIBIOS_DS));
34786 break;
34787 }
34788
34789@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34790
34791 DBG("PCI: Fetching IRQ routing table... ");
34792 __asm__("push %%es\n\t"
34793+ "movw %w8, %%ds\n\t"
34794 "push %%ds\n\t"
34795 "pop %%es\n\t"
34796- "lcall *(%%esi); cld\n\t"
34797+ "lcall *%%ss:(%%esi); cld\n\t"
34798 "pop %%es\n\t"
34799+ "push %%ss\n\t"
34800+ "pop %%ds\n"
34801 "jc 1f\n\t"
34802 "xor %%ah, %%ah\n"
34803 "1:"
34804@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
34805 "1" (0),
34806 "D" ((long) &opt),
34807 "S" (&pci_indirect),
34808- "m" (opt)
34809+ "m" (opt),
34810+ "r" (__PCIBIOS_DS)
34811 : "memory");
34812 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
34813 if (ret & 0xff00)
34814@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34815 {
34816 int ret;
34817
34818- __asm__("lcall *(%%esi); cld\n\t"
34819+ __asm__("movw %w5, %%ds\n\t"
34820+ "lcall *%%ss:(%%esi); cld\n\t"
34821+ "push %%ss\n\t"
34822+ "pop %%ds\n"
34823 "jc 1f\n\t"
34824 "xor %%ah, %%ah\n"
34825 "1:"
34826@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
34827 : "0" (PCIBIOS_SET_PCI_HW_INT),
34828 "b" ((dev->bus->number << 8) | dev->devfn),
34829 "c" ((irq << 8) | (pin + 10)),
34830- "S" (&pci_indirect));
34831+ "S" (&pci_indirect),
34832+ "r" (__PCIBIOS_DS));
34833 return !(ret & 0xff00);
34834 }
34835 EXPORT_SYMBOL(pcibios_set_irq_routing);
34836diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
34837index 40e7cda..c7e6672 100644
34838--- a/arch/x86/platform/efi/efi_32.c
34839+++ b/arch/x86/platform/efi/efi_32.c
34840@@ -61,11 +61,22 @@ void __init efi_call_phys_prolog(void)
34841 {
34842 struct desc_ptr gdt_descr;
34843
34844+#ifdef CONFIG_PAX_KERNEXEC
34845+ struct desc_struct d;
34846+#endif
34847+
34848 local_irq_save(efi_rt_eflags);
34849
34850 load_cr3(initial_page_table);
34851 __flush_tlb_all();
34852
34853+#ifdef CONFIG_PAX_KERNEXEC
34854+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
34855+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34856+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
34857+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34858+#endif
34859+
34860 gdt_descr.address = __pa(get_cpu_gdt_table(0));
34861 gdt_descr.size = GDT_SIZE - 1;
34862 load_gdt(&gdt_descr);
34863@@ -75,11 +86,24 @@ void __init efi_call_phys_epilog(void)
34864 {
34865 struct desc_ptr gdt_descr;
34866
34867+#ifdef CONFIG_PAX_KERNEXEC
34868+ struct desc_struct d;
34869+
34870+ memset(&d, 0, sizeof d);
34871+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
34872+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
34873+#endif
34874+
34875 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
34876 gdt_descr.size = GDT_SIZE - 1;
34877 load_gdt(&gdt_descr);
34878
34879+#ifdef CONFIG_PAX_PER_CPU_PGD
34880+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34881+#else
34882 load_cr3(swapper_pg_dir);
34883+#endif
34884+
34885 __flush_tlb_all();
34886
34887 local_irq_restore(efi_rt_eflags);
34888diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
34889index 17e80d8..9fa6e41 100644
34890--- a/arch/x86/platform/efi/efi_64.c
34891+++ b/arch/x86/platform/efi/efi_64.c
34892@@ -98,6 +98,11 @@ void __init efi_call_phys_prolog(void)
34893 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
34894 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
34895 }
34896+
34897+#ifdef CONFIG_PAX_PER_CPU_PGD
34898+ load_cr3(swapper_pg_dir);
34899+#endif
34900+
34901 __flush_tlb_all();
34902 }
34903
34904@@ -115,6 +120,11 @@ void __init efi_call_phys_epilog(void)
34905 for (pgd = 0; pgd < n_pgds; pgd++)
34906 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
34907 kfree(save_pgd);
34908+
34909+#ifdef CONFIG_PAX_PER_CPU_PGD
34910+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
34911+#endif
34912+
34913 __flush_tlb_all();
34914 local_irq_restore(efi_flags);
34915 early_code_mapping_set_exec(0);
34916@@ -145,8 +155,23 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
34917 unsigned npages;
34918 pgd_t *pgd;
34919
34920- if (efi_enabled(EFI_OLD_MEMMAP))
34921+ if (efi_enabled(EFI_OLD_MEMMAP)) {
34922+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
34923+ * able to execute the EFI services.
34924+ */
34925+ if (__supported_pte_mask & _PAGE_NX) {
34926+ unsigned long addr = (unsigned long) __va(0);
34927+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
34928+
34929+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
34930+#ifdef CONFIG_PAX_PER_CPU_PGD
34931+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
34932+#endif
34933+ set_pgd(pgd_offset_k(addr), pe);
34934+ }
34935+
34936 return 0;
34937+ }
34938
34939 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
34940 pgd = __va(efi_scratch.efi_pgt);
34941diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
34942index 040192b..7d3300f 100644
34943--- a/arch/x86/platform/efi/efi_stub_32.S
34944+++ b/arch/x86/platform/efi/efi_stub_32.S
34945@@ -6,7 +6,9 @@
34946 */
34947
34948 #include <linux/linkage.h>
34949+#include <linux/init.h>
34950 #include <asm/page_types.h>
34951+#include <asm/segment.h>
34952
34953 /*
34954 * efi_call_phys(void *, ...) is a function with variable parameters.
34955@@ -20,7 +22,7 @@
34956 * service functions will comply with gcc calling convention, too.
34957 */
34958
34959-.text
34960+__INIT
34961 ENTRY(efi_call_phys)
34962 /*
34963 * 0. The function can only be called in Linux kernel. So CS has been
34964@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
34965 * The mapping of lower virtual memory has been created in prolog and
34966 * epilog.
34967 */
34968- movl $1f, %edx
34969- subl $__PAGE_OFFSET, %edx
34970- jmp *%edx
34971+#ifdef CONFIG_PAX_KERNEXEC
34972+ movl $(__KERNEXEC_EFI_DS), %edx
34973+ mov %edx, %ds
34974+ mov %edx, %es
34975+ mov %edx, %ss
34976+ addl $2f,(1f)
34977+ ljmp *(1f)
34978+
34979+__INITDATA
34980+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
34981+.previous
34982+
34983+2:
34984+ subl $2b,(1b)
34985+#else
34986+ jmp 1f-__PAGE_OFFSET
34987 1:
34988+#endif
34989
34990 /*
34991 * 2. Now on the top of stack is the return
34992@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
34993 * parameter 2, ..., param n. To make things easy, we save the return
34994 * address of efi_call_phys in a global variable.
34995 */
34996- popl %edx
34997- movl %edx, saved_return_addr
34998- /* get the function pointer into ECX*/
34999- popl %ecx
35000- movl %ecx, efi_rt_function_ptr
35001- movl $2f, %edx
35002- subl $__PAGE_OFFSET, %edx
35003- pushl %edx
35004+ popl (saved_return_addr)
35005+ popl (efi_rt_function_ptr)
35006
35007 /*
35008 * 3. Clear PG bit in %CR0.
35009@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35010 /*
35011 * 5. Call the physical function.
35012 */
35013- jmp *%ecx
35014+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35015
35016-2:
35017 /*
35018 * 6. After EFI runtime service returns, control will return to
35019 * following instruction. We'd better readjust stack pointer first.
35020@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35021 movl %cr0, %edx
35022 orl $0x80000000, %edx
35023 movl %edx, %cr0
35024- jmp 1f
35025-1:
35026+
35027 /*
35028 * 8. Now restore the virtual mode from flat mode by
35029 * adding EIP with PAGE_OFFSET.
35030 */
35031- movl $1f, %edx
35032- jmp *%edx
35033+#ifdef CONFIG_PAX_KERNEXEC
35034+ movl $(__KERNEL_DS), %edx
35035+ mov %edx, %ds
35036+ mov %edx, %es
35037+ mov %edx, %ss
35038+ ljmp $(__KERNEL_CS),$1f
35039+#else
35040+ jmp 1f+__PAGE_OFFSET
35041+#endif
35042 1:
35043
35044 /*
35045 * 9. Balance the stack. And because EAX contain the return value,
35046 * we'd better not clobber it.
35047 */
35048- leal efi_rt_function_ptr, %edx
35049- movl (%edx), %ecx
35050- pushl %ecx
35051+ pushl (efi_rt_function_ptr)
35052
35053 /*
35054- * 10. Push the saved return address onto the stack and return.
35055+ * 10. Return to the saved return address.
35056 */
35057- leal saved_return_addr, %edx
35058- movl (%edx), %ecx
35059- pushl %ecx
35060- ret
35061+ jmpl *(saved_return_addr)
35062 ENDPROC(efi_call_phys)
35063 .previous
35064
35065-.data
35066+__INITDATA
35067 saved_return_addr:
35068 .long 0
35069 efi_rt_function_ptr:
35070diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35071index 86d0f9e..6d499f4 100644
35072--- a/arch/x86/platform/efi/efi_stub_64.S
35073+++ b/arch/x86/platform/efi/efi_stub_64.S
35074@@ -11,6 +11,7 @@
35075 #include <asm/msr.h>
35076 #include <asm/processor-flags.h>
35077 #include <asm/page_types.h>
35078+#include <asm/alternative-asm.h>
35079
35080 #define SAVE_XMM \
35081 mov %rsp, %rax; \
35082@@ -88,6 +89,7 @@ ENTRY(efi_call)
35083 RESTORE_PGT
35084 addq $48, %rsp
35085 RESTORE_XMM
35086+ pax_force_retaddr 0, 1
35087 ret
35088 ENDPROC(efi_call)
35089
35090diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35091index 1bbedc4..eb795b5 100644
35092--- a/arch/x86/platform/intel-mid/intel-mid.c
35093+++ b/arch/x86/platform/intel-mid/intel-mid.c
35094@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35095 {
35096 };
35097
35098-static void intel_mid_reboot(void)
35099+static void __noreturn intel_mid_reboot(void)
35100 {
35101 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35102+ BUG();
35103 }
35104
35105 static unsigned long __init intel_mid_calibrate_tsc(void)
35106diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35107index 3c1c386..59a68ed 100644
35108--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35109+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35110@@ -13,6 +13,6 @@
35111 /* For every CPU addition a new get_<cpuname>_ops interface needs
35112 * to be added.
35113 */
35114-extern void *get_penwell_ops(void);
35115-extern void *get_cloverview_ops(void);
35116-extern void *get_tangier_ops(void);
35117+extern const void *get_penwell_ops(void);
35118+extern const void *get_cloverview_ops(void);
35119+extern const void *get_tangier_ops(void);
35120diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35121index 23381d2..8ddc10e 100644
35122--- a/arch/x86/platform/intel-mid/mfld.c
35123+++ b/arch/x86/platform/intel-mid/mfld.c
35124@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35125 pm_power_off = mfld_power_off;
35126 }
35127
35128-void *get_penwell_ops(void)
35129+const void *get_penwell_ops(void)
35130 {
35131 return &penwell_ops;
35132 }
35133
35134-void *get_cloverview_ops(void)
35135+const void *get_cloverview_ops(void)
35136 {
35137 return &penwell_ops;
35138 }
35139diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35140index aaca917..66eadbc 100644
35141--- a/arch/x86/platform/intel-mid/mrfl.c
35142+++ b/arch/x86/platform/intel-mid/mrfl.c
35143@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35144 .arch_setup = tangier_arch_setup,
35145 };
35146
35147-void *get_tangier_ops(void)
35148+const void *get_tangier_ops(void)
35149 {
35150 return &tangier_ops;
35151 }
35152diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35153index d6ee929..3637cb5 100644
35154--- a/arch/x86/platform/olpc/olpc_dt.c
35155+++ b/arch/x86/platform/olpc/olpc_dt.c
35156@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35157 return res;
35158 }
35159
35160-static struct of_pdt_ops prom_olpc_ops __initdata = {
35161+static struct of_pdt_ops prom_olpc_ops __initconst = {
35162 .nextprop = olpc_dt_nextprop,
35163 .getproplen = olpc_dt_getproplen,
35164 .getproperty = olpc_dt_getproperty,
35165diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35166index 6ec7910..ecdbb11 100644
35167--- a/arch/x86/power/cpu.c
35168+++ b/arch/x86/power/cpu.c
35169@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35170 static void fix_processor_context(void)
35171 {
35172 int cpu = smp_processor_id();
35173- struct tss_struct *t = &per_cpu(init_tss, cpu);
35174-#ifdef CONFIG_X86_64
35175- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35176- tss_desc tss;
35177-#endif
35178+ struct tss_struct *t = init_tss + cpu;
35179+
35180 set_tss_desc(cpu, t); /*
35181 * This just modifies memory; should not be
35182 * necessary. But... This is necessary, because
35183@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35184 */
35185
35186 #ifdef CONFIG_X86_64
35187- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35188- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35189- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35190-
35191 syscall_init(); /* This sets MSR_*STAR and related */
35192 #endif
35193 load_TR_desc(); /* This does ltr */
35194diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35195index bad628a..a102610 100644
35196--- a/arch/x86/realmode/init.c
35197+++ b/arch/x86/realmode/init.c
35198@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35199 __va(real_mode_header->trampoline_header);
35200
35201 #ifdef CONFIG_X86_32
35202- trampoline_header->start = __pa_symbol(startup_32_smp);
35203+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35204+
35205+#ifdef CONFIG_PAX_KERNEXEC
35206+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35207+#endif
35208+
35209+ trampoline_header->boot_cs = __BOOT_CS;
35210 trampoline_header->gdt_limit = __BOOT_DS + 7;
35211 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35212 #else
35213@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35214 *trampoline_cr4_features = read_cr4();
35215
35216 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35217- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35218+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35219 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35220 #endif
35221 }
35222diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35223index 7c0d7be..d24dc88 100644
35224--- a/arch/x86/realmode/rm/Makefile
35225+++ b/arch/x86/realmode/rm/Makefile
35226@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35227
35228 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35229 -I$(srctree)/arch/x86/boot
35230+ifdef CONSTIFY_PLUGIN
35231+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35232+endif
35233 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35234 GCOV_PROFILE := n
35235diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35236index a28221d..93c40f1 100644
35237--- a/arch/x86/realmode/rm/header.S
35238+++ b/arch/x86/realmode/rm/header.S
35239@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35240 #endif
35241 /* APM/BIOS reboot */
35242 .long pa_machine_real_restart_asm
35243-#ifdef CONFIG_X86_64
35244+#ifdef CONFIG_X86_32
35245+ .long __KERNEL_CS
35246+#else
35247 .long __KERNEL32_CS
35248 #endif
35249 END(real_mode_header)
35250diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35251index 48ddd76..c26749f 100644
35252--- a/arch/x86/realmode/rm/trampoline_32.S
35253+++ b/arch/x86/realmode/rm/trampoline_32.S
35254@@ -24,6 +24,12 @@
35255 #include <asm/page_types.h>
35256 #include "realmode.h"
35257
35258+#ifdef CONFIG_PAX_KERNEXEC
35259+#define ta(X) (X)
35260+#else
35261+#define ta(X) (pa_ ## X)
35262+#endif
35263+
35264 .text
35265 .code16
35266
35267@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35268
35269 cli # We should be safe anyway
35270
35271- movl tr_start, %eax # where we need to go
35272-
35273 movl $0xA5A5A5A5, trampoline_status
35274 # write marker for master knows we're running
35275
35276@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35277 movw $1, %dx # protected mode (PE) bit
35278 lmsw %dx # into protected mode
35279
35280- ljmpl $__BOOT_CS, $pa_startup_32
35281+ ljmpl *(trampoline_header)
35282
35283 .section ".text32","ax"
35284 .code32
35285@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35286 .balign 8
35287 GLOBAL(trampoline_header)
35288 tr_start: .space 4
35289- tr_gdt_pad: .space 2
35290+ tr_boot_cs: .space 2
35291 tr_gdt: .space 6
35292 END(trampoline_header)
35293
35294diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35295index dac7b20..72dbaca 100644
35296--- a/arch/x86/realmode/rm/trampoline_64.S
35297+++ b/arch/x86/realmode/rm/trampoline_64.S
35298@@ -93,6 +93,7 @@ ENTRY(startup_32)
35299 movl %edx, %gs
35300
35301 movl pa_tr_cr4, %eax
35302+ andl $~X86_CR4_PCIDE, %eax
35303 movl %eax, %cr4 # Enable PAE mode
35304
35305 # Setup trampoline 4 level pagetables
35306@@ -106,7 +107,7 @@ ENTRY(startup_32)
35307 wrmsr
35308
35309 # Enable paging and in turn activate Long Mode
35310- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35311+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35312 movl %eax, %cr0
35313
35314 /*
35315diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35316index 9e7e147..25a4158 100644
35317--- a/arch/x86/realmode/rm/wakeup_asm.S
35318+++ b/arch/x86/realmode/rm/wakeup_asm.S
35319@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35320 lgdtl pmode_gdt
35321
35322 /* This really couldn't... */
35323- movl pmode_entry, %eax
35324 movl pmode_cr0, %ecx
35325 movl %ecx, %cr0
35326- ljmpl $__KERNEL_CS, $pa_startup_32
35327- /* -> jmp *%eax in trampoline_32.S */
35328+
35329+ ljmpl *pmode_entry
35330 #else
35331 jmp trampoline_start
35332 #endif
35333diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35334index 604a37e..e49702a 100644
35335--- a/arch/x86/tools/Makefile
35336+++ b/arch/x86/tools/Makefile
35337@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35338
35339 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35340
35341-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35342+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35343 hostprogs-y += relocs
35344 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35345 PHONY += relocs
35346diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35347index 0c2fae8..88036b7 100644
35348--- a/arch/x86/tools/relocs.c
35349+++ b/arch/x86/tools/relocs.c
35350@@ -1,5 +1,7 @@
35351 /* This is included from relocs_32/64.c */
35352
35353+#include "../../../include/generated/autoconf.h"
35354+
35355 #define ElfW(type) _ElfW(ELF_BITS, type)
35356 #define _ElfW(bits, type) __ElfW(bits, type)
35357 #define __ElfW(bits, type) Elf##bits##_##type
35358@@ -11,6 +13,7 @@
35359 #define Elf_Sym ElfW(Sym)
35360
35361 static Elf_Ehdr ehdr;
35362+static Elf_Phdr *phdr;
35363
35364 struct relocs {
35365 uint32_t *offset;
35366@@ -386,9 +389,39 @@ static void read_ehdr(FILE *fp)
35367 }
35368 }
35369
35370+static void read_phdrs(FILE *fp)
35371+{
35372+ unsigned int i;
35373+
35374+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35375+ if (!phdr) {
35376+ die("Unable to allocate %d program headers\n",
35377+ ehdr.e_phnum);
35378+ }
35379+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
35380+ die("Seek to %d failed: %s\n",
35381+ ehdr.e_phoff, strerror(errno));
35382+ }
35383+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
35384+ die("Cannot read ELF program headers: %s\n",
35385+ strerror(errno));
35386+ }
35387+ for(i = 0; i < ehdr.e_phnum; i++) {
35388+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
35389+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
35390+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
35391+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
35392+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
35393+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
35394+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
35395+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
35396+ }
35397+
35398+}
35399+
35400 static void read_shdrs(FILE *fp)
35401 {
35402- int i;
35403+ unsigned int i;
35404 Elf_Shdr shdr;
35405
35406 secs = calloc(ehdr.e_shnum, sizeof(struct section));
35407@@ -423,7 +456,7 @@ static void read_shdrs(FILE *fp)
35408
35409 static void read_strtabs(FILE *fp)
35410 {
35411- int i;
35412+ unsigned int i;
35413 for (i = 0; i < ehdr.e_shnum; i++) {
35414 struct section *sec = &secs[i];
35415 if (sec->shdr.sh_type != SHT_STRTAB) {
35416@@ -448,7 +481,7 @@ static void read_strtabs(FILE *fp)
35417
35418 static void read_symtabs(FILE *fp)
35419 {
35420- int i,j;
35421+ unsigned int i,j;
35422 for (i = 0; i < ehdr.e_shnum; i++) {
35423 struct section *sec = &secs[i];
35424 if (sec->shdr.sh_type != SHT_SYMTAB) {
35425@@ -479,9 +512,11 @@ static void read_symtabs(FILE *fp)
35426 }
35427
35428
35429-static void read_relocs(FILE *fp)
35430+static void read_relocs(FILE *fp, int use_real_mode)
35431 {
35432- int i,j;
35433+ unsigned int i,j;
35434+ uint32_t base;
35435+
35436 for (i = 0; i < ehdr.e_shnum; i++) {
35437 struct section *sec = &secs[i];
35438 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35439@@ -501,9 +536,22 @@ static void read_relocs(FILE *fp)
35440 die("Cannot read symbol table: %s\n",
35441 strerror(errno));
35442 }
35443+ base = 0;
35444+
35445+#ifdef CONFIG_X86_32
35446+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
35447+ if (phdr[j].p_type != PT_LOAD )
35448+ continue;
35449+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
35450+ continue;
35451+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
35452+ break;
35453+ }
35454+#endif
35455+
35456 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
35457 Elf_Rel *rel = &sec->reltab[j];
35458- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
35459+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
35460 rel->r_info = elf_xword_to_cpu(rel->r_info);
35461 #if (SHT_REL_TYPE == SHT_RELA)
35462 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
35463@@ -515,7 +563,7 @@ static void read_relocs(FILE *fp)
35464
35465 static void print_absolute_symbols(void)
35466 {
35467- int i;
35468+ unsigned int i;
35469 const char *format;
35470
35471 if (ELF_BITS == 64)
35472@@ -528,7 +576,7 @@ static void print_absolute_symbols(void)
35473 for (i = 0; i < ehdr.e_shnum; i++) {
35474 struct section *sec = &secs[i];
35475 char *sym_strtab;
35476- int j;
35477+ unsigned int j;
35478
35479 if (sec->shdr.sh_type != SHT_SYMTAB) {
35480 continue;
35481@@ -555,7 +603,7 @@ static void print_absolute_symbols(void)
35482
35483 static void print_absolute_relocs(void)
35484 {
35485- int i, printed = 0;
35486+ unsigned int i, printed = 0;
35487 const char *format;
35488
35489 if (ELF_BITS == 64)
35490@@ -568,7 +616,7 @@ static void print_absolute_relocs(void)
35491 struct section *sec_applies, *sec_symtab;
35492 char *sym_strtab;
35493 Elf_Sym *sh_symtab;
35494- int j;
35495+ unsigned int j;
35496 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35497 continue;
35498 }
35499@@ -645,13 +693,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
35500 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
35501 Elf_Sym *sym, const char *symname))
35502 {
35503- int i;
35504+ unsigned int i;
35505 /* Walk through the relocations */
35506 for (i = 0; i < ehdr.e_shnum; i++) {
35507 char *sym_strtab;
35508 Elf_Sym *sh_symtab;
35509 struct section *sec_applies, *sec_symtab;
35510- int j;
35511+ unsigned int j;
35512 struct section *sec = &secs[i];
35513
35514 if (sec->shdr.sh_type != SHT_REL_TYPE) {
35515@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35516 {
35517 unsigned r_type = ELF32_R_TYPE(rel->r_info);
35518 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
35519+ char *sym_strtab = sec->link->link->strtab;
35520+
35521+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
35522+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
35523+ return 0;
35524+
35525+#ifdef CONFIG_PAX_KERNEXEC
35526+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
35527+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
35528+ return 0;
35529+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
35530+ return 0;
35531+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
35532+ return 0;
35533+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
35534+ return 0;
35535+#endif
35536
35537 switch (r_type) {
35538 case R_386_NONE:
35539@@ -968,7 +1033,7 @@ static int write32_as_text(uint32_t v, FILE *f)
35540
35541 static void emit_relocs(int as_text, int use_real_mode)
35542 {
35543- int i;
35544+ unsigned int i;
35545 int (*write_reloc)(uint32_t, FILE *) = write32;
35546 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
35547 const char *symname);
35548@@ -1078,10 +1143,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
35549 {
35550 regex_init(use_real_mode);
35551 read_ehdr(fp);
35552+ read_phdrs(fp);
35553 read_shdrs(fp);
35554 read_strtabs(fp);
35555 read_symtabs(fp);
35556- read_relocs(fp);
35557+ read_relocs(fp, use_real_mode);
35558 if (ELF_BITS == 64)
35559 percpu_init();
35560 if (show_absolute_syms) {
35561diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
35562index f40281e..92728c9 100644
35563--- a/arch/x86/um/mem_32.c
35564+++ b/arch/x86/um/mem_32.c
35565@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
35566 gate_vma.vm_start = FIXADDR_USER_START;
35567 gate_vma.vm_end = FIXADDR_USER_END;
35568 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
35569- gate_vma.vm_page_prot = __P101;
35570+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
35571
35572 return 0;
35573 }
35574diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
35575index 80ffa5b..a33bd15 100644
35576--- a/arch/x86/um/tls_32.c
35577+++ b/arch/x86/um/tls_32.c
35578@@ -260,7 +260,7 @@ out:
35579 if (unlikely(task == current &&
35580 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
35581 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
35582- "without flushed TLS.", current->pid);
35583+ "without flushed TLS.", task_pid_nr(current));
35584 }
35585
35586 return 0;
35587diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
35588index 5a4affe..9e2d522 100644
35589--- a/arch/x86/vdso/Makefile
35590+++ b/arch/x86/vdso/Makefile
35591@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
35592 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
35593 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
35594
35595-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35596+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
35597 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
35598 GCOV_PROFILE := n
35599
35600diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
35601index 0224987..c7d65a5 100644
35602--- a/arch/x86/vdso/vdso2c.h
35603+++ b/arch/x86/vdso/vdso2c.h
35604@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
35605 unsigned long load_size = -1; /* Work around bogus warning */
35606 unsigned long mapping_size;
35607 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
35608- int i;
35609+ unsigned int i;
35610 unsigned long j;
35611 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
35612 *alt_sec = NULL;
35613diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
35614index e904c27..b9eaa03 100644
35615--- a/arch/x86/vdso/vdso32-setup.c
35616+++ b/arch/x86/vdso/vdso32-setup.c
35617@@ -14,6 +14,7 @@
35618 #include <asm/cpufeature.h>
35619 #include <asm/processor.h>
35620 #include <asm/vdso.h>
35621+#include <asm/mman.h>
35622
35623 #ifdef CONFIG_COMPAT_VDSO
35624 #define VDSO_DEFAULT 0
35625diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
35626index 1c9f750..cfddb1a 100644
35627--- a/arch/x86/vdso/vma.c
35628+++ b/arch/x86/vdso/vma.c
35629@@ -19,10 +19,7 @@
35630 #include <asm/page.h>
35631 #include <asm/hpet.h>
35632 #include <asm/desc.h>
35633-
35634-#if defined(CONFIG_X86_64)
35635-unsigned int __read_mostly vdso64_enabled = 1;
35636-#endif
35637+#include <asm/mman.h>
35638
35639 void __init init_vdso_image(const struct vdso_image *image)
35640 {
35641@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35642 .pages = no_pages,
35643 };
35644
35645+#ifdef CONFIG_PAX_RANDMMAP
35646+ if (mm->pax_flags & MF_PAX_RANDMMAP)
35647+ calculate_addr = false;
35648+#endif
35649+
35650 if (calculate_addr) {
35651 addr = vdso_addr(current->mm->start_stack,
35652 image->size - image->sym_vvar_start);
35653@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35654 down_write(&mm->mmap_sem);
35655
35656 addr = get_unmapped_area(NULL, addr,
35657- image->size - image->sym_vvar_start, 0, 0);
35658+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
35659 if (IS_ERR_VALUE(addr)) {
35660 ret = addr;
35661 goto up_fail;
35662 }
35663
35664 text_start = addr - image->sym_vvar_start;
35665- current->mm->context.vdso = (void __user *)text_start;
35666+ mm->context.vdso = text_start;
35667
35668 /*
35669 * MAYWRITE to allow gdb to COW and set breakpoints
35670@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
35671 hpet_address >> PAGE_SHIFT,
35672 PAGE_SIZE,
35673 pgprot_noncached(PAGE_READONLY));
35674-
35675- if (ret)
35676- goto up_fail;
35677 }
35678 #endif
35679
35680 up_fail:
35681 if (ret)
35682- current->mm->context.vdso = NULL;
35683+ current->mm->context.vdso = 0;
35684
35685 up_write(&mm->mmap_sem);
35686 return ret;
35687@@ -191,8 +190,8 @@ static int load_vdso32(void)
35688
35689 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
35690 current_thread_info()->sysenter_return =
35691- current->mm->context.vdso +
35692- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
35693+ (void __force_user *)(current->mm->context.vdso +
35694+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
35695
35696 return 0;
35697 }
35698@@ -201,9 +200,6 @@ static int load_vdso32(void)
35699 #ifdef CONFIG_X86_64
35700 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35701 {
35702- if (!vdso64_enabled)
35703- return 0;
35704-
35705 return map_vdso(&vdso_image_64, true);
35706 }
35707
35708@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
35709 int uses_interp)
35710 {
35711 #ifdef CONFIG_X86_X32_ABI
35712- if (test_thread_flag(TIF_X32)) {
35713- if (!vdso64_enabled)
35714- return 0;
35715-
35716+ if (test_thread_flag(TIF_X32))
35717 return map_vdso(&vdso_image_x32, true);
35718- }
35719 #endif
35720
35721 return load_vdso32();
35722@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
35723 #endif
35724
35725 #ifdef CONFIG_X86_64
35726-static __init int vdso_setup(char *s)
35727-{
35728- vdso64_enabled = simple_strtoul(s, NULL, 0);
35729- return 0;
35730-}
35731-__setup("vdso=", vdso_setup);
35732-#endif
35733-
35734-#ifdef CONFIG_X86_64
35735 static void vgetcpu_cpu_init(void *arg)
35736 {
35737 int cpu = smp_processor_id();
35738diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
35739index e88fda8..76ce7ce 100644
35740--- a/arch/x86/xen/Kconfig
35741+++ b/arch/x86/xen/Kconfig
35742@@ -9,6 +9,7 @@ config XEN
35743 select XEN_HAVE_PVMMU
35744 depends on X86_64 || (X86_32 && X86_PAE)
35745 depends on X86_TSC
35746+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
35747 help
35748 This is the Linux Xen port. Enabling this will allow the
35749 kernel to boot in a paravirtualized environment under the
35750diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
35751index 78a881b..9994bbb 100644
35752--- a/arch/x86/xen/enlighten.c
35753+++ b/arch/x86/xen/enlighten.c
35754@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
35755
35756 struct shared_info xen_dummy_shared_info;
35757
35758-void *xen_initial_gdt;
35759-
35760 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
35761 __read_mostly int xen_have_vector_callback;
35762 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
35763@@ -544,8 +542,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
35764 {
35765 unsigned long va = dtr->address;
35766 unsigned int size = dtr->size + 1;
35767- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35768- unsigned long frames[pages];
35769+ unsigned long frames[65536 / PAGE_SIZE];
35770 int f;
35771
35772 /*
35773@@ -593,8 +590,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35774 {
35775 unsigned long va = dtr->address;
35776 unsigned int size = dtr->size + 1;
35777- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
35778- unsigned long frames[pages];
35779+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
35780 int f;
35781
35782 /*
35783@@ -602,7 +598,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
35784 * 8-byte entries, or 16 4k pages..
35785 */
35786
35787- BUG_ON(size > 65536);
35788+ BUG_ON(size > GDT_SIZE);
35789 BUG_ON(va & ~PAGE_MASK);
35790
35791 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
35792@@ -991,7 +987,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
35793 return 0;
35794 }
35795
35796-static void set_xen_basic_apic_ops(void)
35797+static void __init set_xen_basic_apic_ops(void)
35798 {
35799 apic->read = xen_apic_read;
35800 apic->write = xen_apic_write;
35801@@ -1291,30 +1287,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
35802 #endif
35803 };
35804
35805-static void xen_reboot(int reason)
35806+static __noreturn void xen_reboot(int reason)
35807 {
35808 struct sched_shutdown r = { .reason = reason };
35809
35810- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
35811- BUG();
35812+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
35813+ BUG();
35814 }
35815
35816-static void xen_restart(char *msg)
35817+static __noreturn void xen_restart(char *msg)
35818 {
35819 xen_reboot(SHUTDOWN_reboot);
35820 }
35821
35822-static void xen_emergency_restart(void)
35823+static __noreturn void xen_emergency_restart(void)
35824 {
35825 xen_reboot(SHUTDOWN_reboot);
35826 }
35827
35828-static void xen_machine_halt(void)
35829+static __noreturn void xen_machine_halt(void)
35830 {
35831 xen_reboot(SHUTDOWN_poweroff);
35832 }
35833
35834-static void xen_machine_power_off(void)
35835+static __noreturn void xen_machine_power_off(void)
35836 {
35837 if (pm_power_off)
35838 pm_power_off();
35839@@ -1467,8 +1463,11 @@ static void __ref xen_setup_gdt(int cpu)
35840 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
35841 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
35842
35843- setup_stack_canary_segment(0);
35844- switch_to_new_gdt(0);
35845+ setup_stack_canary_segment(cpu);
35846+#ifdef CONFIG_X86_64
35847+ load_percpu_segment(cpu);
35848+#endif
35849+ switch_to_new_gdt(cpu);
35850
35851 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
35852 pv_cpu_ops.load_gdt = xen_load_gdt;
35853@@ -1583,7 +1582,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
35854 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
35855
35856 /* Work out if we support NX */
35857- x86_configure_nx();
35858+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
35859+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
35860+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
35861+ unsigned l, h;
35862+
35863+ __supported_pte_mask |= _PAGE_NX;
35864+ rdmsr(MSR_EFER, l, h);
35865+ l |= EFER_NX;
35866+ wrmsr(MSR_EFER, l, h);
35867+ }
35868+#endif
35869
35870 /* Get mfn list */
35871 xen_build_dynamic_phys_to_machine();
35872@@ -1611,13 +1620,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
35873
35874 machine_ops = xen_machine_ops;
35875
35876- /*
35877- * The only reliable way to retain the initial address of the
35878- * percpu gdt_page is to remember it here, so we can go and
35879- * mark it RW later, when the initial percpu area is freed.
35880- */
35881- xen_initial_gdt = &per_cpu(gdt_page, 0);
35882-
35883 xen_smp_init();
35884
35885 #ifdef CONFIG_ACPI_NUMA
35886diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
35887index 5c1f9ac..0e15f5c 100644
35888--- a/arch/x86/xen/mmu.c
35889+++ b/arch/x86/xen/mmu.c
35890@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
35891 return val;
35892 }
35893
35894-static pteval_t pte_pfn_to_mfn(pteval_t val)
35895+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
35896 {
35897 if (val & _PAGE_PRESENT) {
35898 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
35899@@ -1836,7 +1836,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35900 * L3_k[511] -> level2_fixmap_pgt */
35901 convert_pfn_mfn(level3_kernel_pgt);
35902
35903+ convert_pfn_mfn(level3_vmalloc_start_pgt);
35904+ convert_pfn_mfn(level3_vmalloc_end_pgt);
35905+ convert_pfn_mfn(level3_vmemmap_pgt);
35906 /* L3_k[511][506] -> level1_fixmap_pgt */
35907+ /* L3_k[511][507] -> level1_vsyscall_pgt */
35908 convert_pfn_mfn(level2_fixmap_pgt);
35909 }
35910 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
35911@@ -1861,11 +1865,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
35912 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
35913 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
35914 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
35915+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
35916+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
35917+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
35918 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
35919 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
35920+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
35921 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
35922 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
35923 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
35924+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
35925
35926 /* Pin down new L4 */
35927 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
35928@@ -2049,6 +2058,7 @@ static void __init xen_post_allocator_init(void)
35929 pv_mmu_ops.set_pud = xen_set_pud;
35930 #if PAGETABLE_LEVELS == 4
35931 pv_mmu_ops.set_pgd = xen_set_pgd;
35932+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
35933 #endif
35934
35935 /* This will work as long as patching hasn't happened yet
35936@@ -2127,6 +2137,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
35937 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
35938 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
35939 .set_pgd = xen_set_pgd_hyper,
35940+ .set_pgd_batched = xen_set_pgd_hyper,
35941
35942 .alloc_pud = xen_alloc_pmd_init,
35943 .release_pud = xen_release_pmd_init,
35944diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
35945index 4c071ae..00e7049 100644
35946--- a/arch/x86/xen/smp.c
35947+++ b/arch/x86/xen/smp.c
35948@@ -288,17 +288,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
35949
35950 if (xen_pv_domain()) {
35951 if (!xen_feature(XENFEAT_writable_page_tables))
35952- /* We've switched to the "real" per-cpu gdt, so make
35953- * sure the old memory can be recycled. */
35954- make_lowmem_page_readwrite(xen_initial_gdt);
35955-
35956 #ifdef CONFIG_X86_32
35957 /*
35958 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
35959 * expects __USER_DS
35960 */
35961- loadsegment(ds, __USER_DS);
35962- loadsegment(es, __USER_DS);
35963+ loadsegment(ds, __KERNEL_DS);
35964+ loadsegment(es, __KERNEL_DS);
35965 #endif
35966
35967 xen_filter_cpu_maps();
35968@@ -379,7 +375,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35969 #ifdef CONFIG_X86_32
35970 /* Note: PVH is not yet supported on x86_32. */
35971 ctxt->user_regs.fs = __KERNEL_PERCPU;
35972- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
35973+ savesegment(gs, ctxt->user_regs.gs);
35974 #endif
35975 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
35976
35977@@ -387,8 +383,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
35978 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
35979 ctxt->flags = VGCF_IN_KERNEL;
35980 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
35981- ctxt->user_regs.ds = __USER_DS;
35982- ctxt->user_regs.es = __USER_DS;
35983+ ctxt->user_regs.ds = __KERNEL_DS;
35984+ ctxt->user_regs.es = __KERNEL_DS;
35985 ctxt->user_regs.ss = __KERNEL_DS;
35986
35987 xen_copy_trap_info(ctxt->trap_ctxt);
35988@@ -446,14 +442,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
35989 int rc;
35990
35991 per_cpu(current_task, cpu) = idle;
35992+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
35993 #ifdef CONFIG_X86_32
35994 irq_ctx_init(cpu);
35995 #else
35996 clear_tsk_thread_flag(idle, TIF_FORK);
35997 #endif
35998- per_cpu(kernel_stack, cpu) =
35999- (unsigned long)task_stack_page(idle) -
36000- KERNEL_STACK_OFFSET + THREAD_SIZE;
36001+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36002
36003 xen_setup_runstate_info(cpu);
36004 xen_setup_timer(cpu);
36005@@ -732,7 +727,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36006
36007 void __init xen_smp_init(void)
36008 {
36009- smp_ops = xen_smp_ops;
36010+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36011 xen_fill_possible_map();
36012 }
36013
36014diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36015index fd92a64..1f72641 100644
36016--- a/arch/x86/xen/xen-asm_32.S
36017+++ b/arch/x86/xen/xen-asm_32.S
36018@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36019 pushw %fs
36020 movl $(__KERNEL_PERCPU), %eax
36021 movl %eax, %fs
36022- movl %fs:xen_vcpu, %eax
36023+ mov PER_CPU_VAR(xen_vcpu), %eax
36024 POP_FS
36025 #else
36026 movl %ss:xen_vcpu, %eax
36027diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36028index 674b2225..f1f5dc1 100644
36029--- a/arch/x86/xen/xen-head.S
36030+++ b/arch/x86/xen/xen-head.S
36031@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36032 #ifdef CONFIG_X86_32
36033 mov %esi,xen_start_info
36034 mov $init_thread_union+THREAD_SIZE,%esp
36035+#ifdef CONFIG_SMP
36036+ movl $cpu_gdt_table,%edi
36037+ movl $__per_cpu_load,%eax
36038+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36039+ rorl $16,%eax
36040+ movb %al,__KERNEL_PERCPU + 4(%edi)
36041+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36042+ movl $__per_cpu_end - 1,%eax
36043+ subl $__per_cpu_start,%eax
36044+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36045+#endif
36046 #else
36047 mov %rsi,xen_start_info
36048 mov $init_thread_union+THREAD_SIZE,%rsp
36049diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36050index 5686bd9..0c8b6ee 100644
36051--- a/arch/x86/xen/xen-ops.h
36052+++ b/arch/x86/xen/xen-ops.h
36053@@ -10,8 +10,6 @@
36054 extern const char xen_hypervisor_callback[];
36055 extern const char xen_failsafe_callback[];
36056
36057-extern void *xen_initial_gdt;
36058-
36059 struct trap_info;
36060 void xen_copy_trap_info(struct trap_info *traps);
36061
36062diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36063index 525bd3d..ef888b1 100644
36064--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36065+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36066@@ -119,9 +119,9 @@
36067 ----------------------------------------------------------------------*/
36068
36069 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36070-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36071 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36072 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36073+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36074
36075 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36076 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36077diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36078index 2f33760..835e50a 100644
36079--- a/arch/xtensa/variants/fsf/include/variant/core.h
36080+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36081@@ -11,6 +11,7 @@
36082 #ifndef _XTENSA_CORE_H
36083 #define _XTENSA_CORE_H
36084
36085+#include <linux/const.h>
36086
36087 /****************************************************************************
36088 Parameters Useful for Any Code, USER or PRIVILEGED
36089@@ -112,9 +113,9 @@
36090 ----------------------------------------------------------------------*/
36091
36092 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36093-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36094 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36095 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36096+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36097
36098 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36099 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36100diff --git a/block/bio.c b/block/bio.c
36101index 471d738..bd3da0d 100644
36102--- a/block/bio.c
36103+++ b/block/bio.c
36104@@ -1169,7 +1169,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36105 /*
36106 * Overflow, abort
36107 */
36108- if (end < start)
36109+ if (end < start || end - start > INT_MAX - nr_pages)
36110 return ERR_PTR(-EINVAL);
36111
36112 nr_pages += end - start;
36113@@ -1303,7 +1303,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36114 /*
36115 * Overflow, abort
36116 */
36117- if (end < start)
36118+ if (end < start || end - start > INT_MAX - nr_pages)
36119 return ERR_PTR(-EINVAL);
36120
36121 nr_pages += end - start;
36122@@ -1565,7 +1565,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36123 const int read = bio_data_dir(bio) == READ;
36124 struct bio_map_data *bmd = bio->bi_private;
36125 int i;
36126- char *p = bmd->sgvecs[0].iov_base;
36127+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36128
36129 bio_for_each_segment_all(bvec, bio, i) {
36130 char *addr = page_address(bvec->bv_page);
36131diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36132index 0736729..2ec3b48 100644
36133--- a/block/blk-iopoll.c
36134+++ b/block/blk-iopoll.c
36135@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36136 }
36137 EXPORT_SYMBOL(blk_iopoll_complete);
36138
36139-static void blk_iopoll_softirq(struct softirq_action *h)
36140+static __latent_entropy void blk_iopoll_softirq(void)
36141 {
36142 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36143 int rearm = 0, budget = blk_iopoll_budget;
36144diff --git a/block/blk-map.c b/block/blk-map.c
36145index f890d43..97b0482 100644
36146--- a/block/blk-map.c
36147+++ b/block/blk-map.c
36148@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36149 if (!len || !kbuf)
36150 return -EINVAL;
36151
36152- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36153+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36154 if (do_copy)
36155 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36156 else
36157diff --git a/block/blk-mq.c b/block/blk-mq.c
36158index 447f533..da01de2 100644
36159--- a/block/blk-mq.c
36160+++ b/block/blk-mq.c
36161@@ -1456,7 +1456,7 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
36162
36163 do {
36164 page = alloc_pages_node(set->numa_node,
36165- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
36166+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
36167 this_order);
36168 if (page)
36169 break;
36170@@ -1478,8 +1478,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
36171 left -= to_do * rq_size;
36172 for (j = 0; j < to_do; j++) {
36173 tags->rqs[i] = p;
36174- tags->rqs[i]->atomic_flags = 0;
36175- tags->rqs[i]->cmd_flags = 0;
36176 if (set->ops->init_request) {
36177 if (set->ops->init_request(set->driver_data,
36178 tags->rqs[i], hctx_idx, i,
36179diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36180index 53b1737..08177d2e 100644
36181--- a/block/blk-softirq.c
36182+++ b/block/blk-softirq.c
36183@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36184 * Softirq action handler - move entries to local list and loop over them
36185 * while passing them to the queue registered handler.
36186 */
36187-static void blk_done_softirq(struct softirq_action *h)
36188+static __latent_entropy void blk_done_softirq(void)
36189 {
36190 struct list_head *cpu_list, local_list;
36191
36192diff --git a/block/bsg.c b/block/bsg.c
36193index 276e869..6fe4c61 100644
36194--- a/block/bsg.c
36195+++ b/block/bsg.c
36196@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36197 struct sg_io_v4 *hdr, struct bsg_device *bd,
36198 fmode_t has_write_perm)
36199 {
36200+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36201+ unsigned char *cmdptr;
36202+
36203 if (hdr->request_len > BLK_MAX_CDB) {
36204 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36205 if (!rq->cmd)
36206 return -ENOMEM;
36207- }
36208+ cmdptr = rq->cmd;
36209+ } else
36210+ cmdptr = tmpcmd;
36211
36212- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36213+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36214 hdr->request_len))
36215 return -EFAULT;
36216
36217+ if (cmdptr != rq->cmd)
36218+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36219+
36220 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36221 if (blk_verify_command(rq->cmd, has_write_perm))
36222 return -EPERM;
36223diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36224index f678c73..f35aa18 100644
36225--- a/block/compat_ioctl.c
36226+++ b/block/compat_ioctl.c
36227@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36228 cgc = compat_alloc_user_space(sizeof(*cgc));
36229 cgc32 = compat_ptr(arg);
36230
36231- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36232+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36233 get_user(data, &cgc32->buffer) ||
36234 put_user(compat_ptr(data), &cgc->buffer) ||
36235 copy_in_user(&cgc->buflen, &cgc32->buflen,
36236@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36237 err |= __get_user(f->spec1, &uf->spec1);
36238 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36239 err |= __get_user(name, &uf->name);
36240- f->name = compat_ptr(name);
36241+ f->name = (void __force_kernel *)compat_ptr(name);
36242 if (err) {
36243 err = -EFAULT;
36244 goto out;
36245diff --git a/block/genhd.c b/block/genhd.c
36246index 0a536dc..b8f7aca 100644
36247--- a/block/genhd.c
36248+++ b/block/genhd.c
36249@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36250
36251 /*
36252 * Register device numbers dev..(dev+range-1)
36253- * range must be nonzero
36254+ * Noop if @range is zero.
36255 * The hash chain is sorted on range, so that subranges can override.
36256 */
36257 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36258 struct kobject *(*probe)(dev_t, int *, void *),
36259 int (*lock)(dev_t, void *), void *data)
36260 {
36261- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36262+ if (range)
36263+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36264 }
36265
36266 EXPORT_SYMBOL(blk_register_region);
36267
36268+/* undo blk_register_region(), noop if @range is zero */
36269 void blk_unregister_region(dev_t devt, unsigned long range)
36270 {
36271- kobj_unmap(bdev_map, devt, range);
36272+ if (range)
36273+ kobj_unmap(bdev_map, devt, range);
36274 }
36275
36276 EXPORT_SYMBOL(blk_unregister_region);
36277diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36278index 56d08fd..2e07090 100644
36279--- a/block/partitions/efi.c
36280+++ b/block/partitions/efi.c
36281@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36282 if (!gpt)
36283 return NULL;
36284
36285+ if (!le32_to_cpu(gpt->num_partition_entries))
36286+ return NULL;
36287+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36288+ if (!pte)
36289+ return NULL;
36290+
36291 count = le32_to_cpu(gpt->num_partition_entries) *
36292 le32_to_cpu(gpt->sizeof_partition_entry);
36293- if (!count)
36294- return NULL;
36295- pte = kmalloc(count, GFP_KERNEL);
36296- if (!pte)
36297- return NULL;
36298-
36299 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36300 (u8 *) pte, count) < count) {
36301 kfree(pte);
36302diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36303index 28163fa..07190a06 100644
36304--- a/block/scsi_ioctl.c
36305+++ b/block/scsi_ioctl.c
36306@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36307 return put_user(0, p);
36308 }
36309
36310-static int sg_get_timeout(struct request_queue *q)
36311+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36312 {
36313 return jiffies_to_clock_t(q->sg_timeout);
36314 }
36315@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36316 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36317 struct sg_io_hdr *hdr, fmode_t mode)
36318 {
36319- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36320+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36321+ unsigned char *cmdptr;
36322+
36323+ if (rq->cmd != rq->__cmd)
36324+ cmdptr = rq->cmd;
36325+ else
36326+ cmdptr = tmpcmd;
36327+
36328+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36329 return -EFAULT;
36330+
36331+ if (cmdptr != rq->cmd)
36332+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36333+
36334 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36335 return -EPERM;
36336
36337@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36338 int err;
36339 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36340 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36341+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36342+ unsigned char *cmdptr;
36343
36344 if (!sic)
36345 return -EINVAL;
36346@@ -469,9 +483,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36347 */
36348 err = -EFAULT;
36349 rq->cmd_len = cmdlen;
36350- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36351+
36352+ if (rq->cmd != rq->__cmd)
36353+ cmdptr = rq->cmd;
36354+ else
36355+ cmdptr = tmpcmd;
36356+
36357+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36358 goto error;
36359
36360+ if (rq->cmd != cmdptr)
36361+ memcpy(rq->cmd, cmdptr, cmdlen);
36362+
36363 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36364 goto error;
36365
36366diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36367index 650afac1..f3307de 100644
36368--- a/crypto/cryptd.c
36369+++ b/crypto/cryptd.c
36370@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36371
36372 struct cryptd_blkcipher_request_ctx {
36373 crypto_completion_t complete;
36374-};
36375+} __no_const;
36376
36377 struct cryptd_hash_ctx {
36378 struct crypto_shash *child;
36379@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36380
36381 struct cryptd_aead_request_ctx {
36382 crypto_completion_t complete;
36383-};
36384+} __no_const;
36385
36386 static void cryptd_queue_worker(struct work_struct *work);
36387
36388diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
36389index c305d41..a96de79 100644
36390--- a/crypto/pcrypt.c
36391+++ b/crypto/pcrypt.c
36392@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
36393 int ret;
36394
36395 pinst->kobj.kset = pcrypt_kset;
36396- ret = kobject_add(&pinst->kobj, NULL, name);
36397+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
36398 if (!ret)
36399 kobject_uevent(&pinst->kobj, KOBJ_ADD);
36400
36401diff --git a/crypto/zlib.c b/crypto/zlib.c
36402index 0eefa9d..0fa3d29 100644
36403--- a/crypto/zlib.c
36404+++ b/crypto/zlib.c
36405@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
36406 zlib_comp_exit(ctx);
36407
36408 window_bits = tb[ZLIB_COMP_WINDOWBITS]
36409- ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
36410+ ? nla_get_s32(tb[ZLIB_COMP_WINDOWBITS])
36411 : MAX_WBITS;
36412 mem_level = tb[ZLIB_COMP_MEMLEVEL]
36413- ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
36414+ ? nla_get_s32(tb[ZLIB_COMP_MEMLEVEL])
36415 : DEF_MEM_LEVEL;
36416
36417 workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
36418diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
36419index 6921c7f..78e1af7 100644
36420--- a/drivers/acpi/acpica/hwxfsleep.c
36421+++ b/drivers/acpi/acpica/hwxfsleep.c
36422@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
36423 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
36424
36425 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
36426- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36427- acpi_hw_extended_sleep},
36428- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36429- acpi_hw_extended_wake_prep},
36430- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
36431+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
36432+ .extended_function = acpi_hw_extended_sleep},
36433+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
36434+ .extended_function = acpi_hw_extended_wake_prep},
36435+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
36436+ .extended_function = acpi_hw_extended_wake}
36437 };
36438
36439 /*
36440diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
36441index 16129c7..8b675cd 100644
36442--- a/drivers/acpi/apei/apei-internal.h
36443+++ b/drivers/acpi/apei/apei-internal.h
36444@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
36445 struct apei_exec_ins_type {
36446 u32 flags;
36447 apei_exec_ins_func_t run;
36448-};
36449+} __do_const;
36450
36451 struct apei_exec_context {
36452 u32 ip;
36453diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
36454index e82d097..0c855c1 100644
36455--- a/drivers/acpi/apei/ghes.c
36456+++ b/drivers/acpi/apei/ghes.c
36457@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
36458 const struct acpi_hest_generic *generic,
36459 const struct acpi_hest_generic_status *estatus)
36460 {
36461- static atomic_t seqno;
36462+ static atomic_unchecked_t seqno;
36463 unsigned int curr_seqno;
36464 char pfx_seq[64];
36465
36466@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
36467 else
36468 pfx = KERN_ERR;
36469 }
36470- curr_seqno = atomic_inc_return(&seqno);
36471+ curr_seqno = atomic_inc_return_unchecked(&seqno);
36472 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
36473 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
36474 pfx_seq, generic->header.source_id);
36475diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
36476index a83e3c6..c3d617f 100644
36477--- a/drivers/acpi/bgrt.c
36478+++ b/drivers/acpi/bgrt.c
36479@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
36480 if (!bgrt_image)
36481 return -ENODEV;
36482
36483- bin_attr_image.private = bgrt_image;
36484- bin_attr_image.size = bgrt_image_size;
36485+ pax_open_kernel();
36486+ *(void **)&bin_attr_image.private = bgrt_image;
36487+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
36488+ pax_close_kernel();
36489
36490 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
36491 if (!bgrt_kobj)
36492diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
36493index 9b693d5..8953d54 100644
36494--- a/drivers/acpi/blacklist.c
36495+++ b/drivers/acpi/blacklist.c
36496@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
36497 u32 is_critical_error;
36498 };
36499
36500-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
36501+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
36502
36503 /*
36504 * POLICY: If *anything* doesn't work, put it on the blacklist.
36505@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
36506 return 0;
36507 }
36508
36509-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
36510+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
36511 {
36512 .callback = dmi_disable_osi_vista,
36513 .ident = "Fujitsu Siemens",
36514diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
36515index c68e724..e863008 100644
36516--- a/drivers/acpi/custom_method.c
36517+++ b/drivers/acpi/custom_method.c
36518@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
36519 struct acpi_table_header table;
36520 acpi_status status;
36521
36522+#ifdef CONFIG_GRKERNSEC_KMEM
36523+ return -EPERM;
36524+#endif
36525+
36526 if (!(*ppos)) {
36527 /* parse the table header to get the table length */
36528 if (count <= sizeof(struct acpi_table_header))
36529diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
36530index c0d44d3..5ad8f9a 100644
36531--- a/drivers/acpi/device_pm.c
36532+++ b/drivers/acpi/device_pm.c
36533@@ -1025,6 +1025,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
36534
36535 #endif /* CONFIG_PM_SLEEP */
36536
36537+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
36538+
36539 static struct dev_pm_domain acpi_general_pm_domain = {
36540 .ops = {
36541 #ifdef CONFIG_PM
36542@@ -1043,6 +1045,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
36543 #endif
36544 #endif
36545 },
36546+ .detach = acpi_dev_pm_detach
36547 };
36548
36549 /**
36550@@ -1112,7 +1115,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
36551 acpi_device_wakeup(adev, ACPI_STATE_S0, false);
36552 }
36553
36554- dev->pm_domain->detach = acpi_dev_pm_detach;
36555 return 0;
36556 }
36557 EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
36558diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
36559index b27ab7a..275b1b6 100644
36560--- a/drivers/acpi/processor_idle.c
36561+++ b/drivers/acpi/processor_idle.c
36562@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
36563 {
36564 int i, count = CPUIDLE_DRIVER_STATE_START;
36565 struct acpi_processor_cx *cx;
36566- struct cpuidle_state *state;
36567+ cpuidle_state_no_const *state;
36568 struct cpuidle_driver *drv = &acpi_idle_driver;
36569
36570 if (!pr->flags.power_setup_done)
36571diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
36572index 13e577c..cef11ee 100644
36573--- a/drivers/acpi/sysfs.c
36574+++ b/drivers/acpi/sysfs.c
36575@@ -423,11 +423,11 @@ static u32 num_counters;
36576 static struct attribute **all_attrs;
36577 static u32 acpi_gpe_count;
36578
36579-static struct attribute_group interrupt_stats_attr_group = {
36580+static attribute_group_no_const interrupt_stats_attr_group = {
36581 .name = "interrupts",
36582 };
36583
36584-static struct kobj_attribute *counter_attrs;
36585+static kobj_attribute_no_const *counter_attrs;
36586
36587 static void delete_gpe_attr_array(void)
36588 {
36589diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
36590index 61a9c07..ea98fa1 100644
36591--- a/drivers/ata/libahci.c
36592+++ b/drivers/ata/libahci.c
36593@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
36594 }
36595 EXPORT_SYMBOL_GPL(ahci_kick_engine);
36596
36597-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36598+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
36599 struct ata_taskfile *tf, int is_cmd, u16 flags,
36600 unsigned long timeout_msec)
36601 {
36602diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
36603index 00f2f74..efd8b7d 100644
36604--- a/drivers/ata/libata-core.c
36605+++ b/drivers/ata/libata-core.c
36606@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
36607 static void ata_dev_xfermask(struct ata_device *dev);
36608 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
36609
36610-atomic_t ata_print_id = ATOMIC_INIT(0);
36611+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
36612
36613 struct ata_force_param {
36614 const char *name;
36615@@ -4842,7 +4842,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
36616 struct ata_port *ap;
36617 unsigned int tag;
36618
36619- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36620+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36621 ap = qc->ap;
36622
36623 qc->flags = 0;
36624@@ -4858,7 +4858,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
36625 struct ata_port *ap;
36626 struct ata_link *link;
36627
36628- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36629+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
36630 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
36631 ap = qc->ap;
36632 link = qc->dev->link;
36633@@ -5962,6 +5962,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36634 return;
36635
36636 spin_lock(&lock);
36637+ pax_open_kernel();
36638
36639 for (cur = ops->inherits; cur; cur = cur->inherits) {
36640 void **inherit = (void **)cur;
36641@@ -5975,8 +5976,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
36642 if (IS_ERR(*pp))
36643 *pp = NULL;
36644
36645- ops->inherits = NULL;
36646+ *(struct ata_port_operations **)&ops->inherits = NULL;
36647
36648+ pax_close_kernel();
36649 spin_unlock(&lock);
36650 }
36651
36652@@ -6172,7 +6174,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
36653
36654 /* give ports names and add SCSI hosts */
36655 for (i = 0; i < host->n_ports; i++) {
36656- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
36657+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
36658 host->ports[i]->local_port_no = i + 1;
36659 }
36660
36661diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
36662index 6abd17a..9961bf7 100644
36663--- a/drivers/ata/libata-scsi.c
36664+++ b/drivers/ata/libata-scsi.c
36665@@ -4169,7 +4169,7 @@ int ata_sas_port_init(struct ata_port *ap)
36666
36667 if (rc)
36668 return rc;
36669- ap->print_id = atomic_inc_return(&ata_print_id);
36670+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
36671 return 0;
36672 }
36673 EXPORT_SYMBOL_GPL(ata_sas_port_init);
36674diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
36675index 5f4e0cc..ff2c347 100644
36676--- a/drivers/ata/libata.h
36677+++ b/drivers/ata/libata.h
36678@@ -53,7 +53,7 @@ enum {
36679 ATA_DNXFER_QUIET = (1 << 31),
36680 };
36681
36682-extern atomic_t ata_print_id;
36683+extern atomic_unchecked_t ata_print_id;
36684 extern int atapi_passthru16;
36685 extern int libata_fua;
36686 extern int libata_noacpi;
36687diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
36688index a9b0c82..207d97d 100644
36689--- a/drivers/ata/pata_arasan_cf.c
36690+++ b/drivers/ata/pata_arasan_cf.c
36691@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
36692 /* Handle platform specific quirks */
36693 if (quirk) {
36694 if (quirk & CF_BROKEN_PIO) {
36695- ap->ops->set_piomode = NULL;
36696+ pax_open_kernel();
36697+ *(void **)&ap->ops->set_piomode = NULL;
36698+ pax_close_kernel();
36699 ap->pio_mask = 0;
36700 }
36701 if (quirk & CF_BROKEN_MWDMA)
36702diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
36703index f9b983a..887b9d8 100644
36704--- a/drivers/atm/adummy.c
36705+++ b/drivers/atm/adummy.c
36706@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
36707 vcc->pop(vcc, skb);
36708 else
36709 dev_kfree_skb_any(skb);
36710- atomic_inc(&vcc->stats->tx);
36711+ atomic_inc_unchecked(&vcc->stats->tx);
36712
36713 return 0;
36714 }
36715diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
36716index f1a9198..f466a4a 100644
36717--- a/drivers/atm/ambassador.c
36718+++ b/drivers/atm/ambassador.c
36719@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
36720 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
36721
36722 // VC layer stats
36723- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36724+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36725
36726 // free the descriptor
36727 kfree (tx_descr);
36728@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36729 dump_skb ("<<<", vc, skb);
36730
36731 // VC layer stats
36732- atomic_inc(&atm_vcc->stats->rx);
36733+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36734 __net_timestamp(skb);
36735 // end of our responsibility
36736 atm_vcc->push (atm_vcc, skb);
36737@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
36738 } else {
36739 PRINTK (KERN_INFO, "dropped over-size frame");
36740 // should we count this?
36741- atomic_inc(&atm_vcc->stats->rx_drop);
36742+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36743 }
36744
36745 } else {
36746@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
36747 }
36748
36749 if (check_area (skb->data, skb->len)) {
36750- atomic_inc(&atm_vcc->stats->tx_err);
36751+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
36752 return -ENOMEM; // ?
36753 }
36754
36755diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
36756index 480fa6f..947067c 100644
36757--- a/drivers/atm/atmtcp.c
36758+++ b/drivers/atm/atmtcp.c
36759@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36760 if (vcc->pop) vcc->pop(vcc,skb);
36761 else dev_kfree_skb(skb);
36762 if (dev_data) return 0;
36763- atomic_inc(&vcc->stats->tx_err);
36764+ atomic_inc_unchecked(&vcc->stats->tx_err);
36765 return -ENOLINK;
36766 }
36767 size = skb->len+sizeof(struct atmtcp_hdr);
36768@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36769 if (!new_skb) {
36770 if (vcc->pop) vcc->pop(vcc,skb);
36771 else dev_kfree_skb(skb);
36772- atomic_inc(&vcc->stats->tx_err);
36773+ atomic_inc_unchecked(&vcc->stats->tx_err);
36774 return -ENOBUFS;
36775 }
36776 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
36777@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
36778 if (vcc->pop) vcc->pop(vcc,skb);
36779 else dev_kfree_skb(skb);
36780 out_vcc->push(out_vcc,new_skb);
36781- atomic_inc(&vcc->stats->tx);
36782- atomic_inc(&out_vcc->stats->rx);
36783+ atomic_inc_unchecked(&vcc->stats->tx);
36784+ atomic_inc_unchecked(&out_vcc->stats->rx);
36785 return 0;
36786 }
36787
36788@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36789 read_unlock(&vcc_sklist_lock);
36790 if (!out_vcc) {
36791 result = -EUNATCH;
36792- atomic_inc(&vcc->stats->tx_err);
36793+ atomic_inc_unchecked(&vcc->stats->tx_err);
36794 goto done;
36795 }
36796 skb_pull(skb,sizeof(struct atmtcp_hdr));
36797@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
36798 __net_timestamp(new_skb);
36799 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
36800 out_vcc->push(out_vcc,new_skb);
36801- atomic_inc(&vcc->stats->tx);
36802- atomic_inc(&out_vcc->stats->rx);
36803+ atomic_inc_unchecked(&vcc->stats->tx);
36804+ atomic_inc_unchecked(&out_vcc->stats->rx);
36805 done:
36806 if (vcc->pop) vcc->pop(vcc,skb);
36807 else dev_kfree_skb(skb);
36808diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
36809index c7fab3e..68d0965 100644
36810--- a/drivers/atm/eni.c
36811+++ b/drivers/atm/eni.c
36812@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
36813 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
36814 vcc->dev->number);
36815 length = 0;
36816- atomic_inc(&vcc->stats->rx_err);
36817+ atomic_inc_unchecked(&vcc->stats->rx_err);
36818 }
36819 else {
36820 length = ATM_CELL_SIZE-1; /* no HEC */
36821@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36822 size);
36823 }
36824 eff = length = 0;
36825- atomic_inc(&vcc->stats->rx_err);
36826+ atomic_inc_unchecked(&vcc->stats->rx_err);
36827 }
36828 else {
36829 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
36830@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
36831 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
36832 vcc->dev->number,vcc->vci,length,size << 2,descr);
36833 length = eff = 0;
36834- atomic_inc(&vcc->stats->rx_err);
36835+ atomic_inc_unchecked(&vcc->stats->rx_err);
36836 }
36837 }
36838 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
36839@@ -770,7 +770,7 @@ rx_dequeued++;
36840 vcc->push(vcc,skb);
36841 pushed++;
36842 }
36843- atomic_inc(&vcc->stats->rx);
36844+ atomic_inc_unchecked(&vcc->stats->rx);
36845 }
36846 wake_up(&eni_dev->rx_wait);
36847 }
36848@@ -1230,7 +1230,7 @@ static void dequeue_tx(struct atm_dev *dev)
36849 PCI_DMA_TODEVICE);
36850 if (vcc->pop) vcc->pop(vcc,skb);
36851 else dev_kfree_skb_irq(skb);
36852- atomic_inc(&vcc->stats->tx);
36853+ atomic_inc_unchecked(&vcc->stats->tx);
36854 wake_up(&eni_dev->tx_wait);
36855 dma_complete++;
36856 }
36857diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
36858index 82f2ae0..f205c02 100644
36859--- a/drivers/atm/firestream.c
36860+++ b/drivers/atm/firestream.c
36861@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
36862 }
36863 }
36864
36865- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
36866+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
36867
36868 fs_dprintk (FS_DEBUG_TXMEM, "i");
36869 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
36870@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36871 #endif
36872 skb_put (skb, qe->p1 & 0xffff);
36873 ATM_SKB(skb)->vcc = atm_vcc;
36874- atomic_inc(&atm_vcc->stats->rx);
36875+ atomic_inc_unchecked(&atm_vcc->stats->rx);
36876 __net_timestamp(skb);
36877 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
36878 atm_vcc->push (atm_vcc, skb);
36879@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
36880 kfree (pe);
36881 }
36882 if (atm_vcc)
36883- atomic_inc(&atm_vcc->stats->rx_drop);
36884+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36885 break;
36886 case 0x1f: /* Reassembly abort: no buffers. */
36887 /* Silently increment error counter. */
36888 if (atm_vcc)
36889- atomic_inc(&atm_vcc->stats->rx_drop);
36890+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
36891 break;
36892 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
36893 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
36894diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
36895index d5d9eaf..65c0d53 100644
36896--- a/drivers/atm/fore200e.c
36897+++ b/drivers/atm/fore200e.c
36898@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
36899 #endif
36900 /* check error condition */
36901 if (*entry->status & STATUS_ERROR)
36902- atomic_inc(&vcc->stats->tx_err);
36903+ atomic_inc_unchecked(&vcc->stats->tx_err);
36904 else
36905- atomic_inc(&vcc->stats->tx);
36906+ atomic_inc_unchecked(&vcc->stats->tx);
36907 }
36908 }
36909
36910@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36911 if (skb == NULL) {
36912 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
36913
36914- atomic_inc(&vcc->stats->rx_drop);
36915+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36916 return -ENOMEM;
36917 }
36918
36919@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
36920
36921 dev_kfree_skb_any(skb);
36922
36923- atomic_inc(&vcc->stats->rx_drop);
36924+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36925 return -ENOMEM;
36926 }
36927
36928 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36929
36930 vcc->push(vcc, skb);
36931- atomic_inc(&vcc->stats->rx);
36932+ atomic_inc_unchecked(&vcc->stats->rx);
36933
36934 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
36935
36936@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
36937 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
36938 fore200e->atm_dev->number,
36939 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
36940- atomic_inc(&vcc->stats->rx_err);
36941+ atomic_inc_unchecked(&vcc->stats->rx_err);
36942 }
36943 }
36944
36945@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
36946 goto retry_here;
36947 }
36948
36949- atomic_inc(&vcc->stats->tx_err);
36950+ atomic_inc_unchecked(&vcc->stats->tx_err);
36951
36952 fore200e->tx_sat++;
36953 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
36954diff --git a/drivers/atm/he.c b/drivers/atm/he.c
36955index c39702b..785b73b 100644
36956--- a/drivers/atm/he.c
36957+++ b/drivers/atm/he.c
36958@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36959
36960 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
36961 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
36962- atomic_inc(&vcc->stats->rx_drop);
36963+ atomic_inc_unchecked(&vcc->stats->rx_drop);
36964 goto return_host_buffers;
36965 }
36966
36967@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36968 RBRQ_LEN_ERR(he_dev->rbrq_head)
36969 ? "LEN_ERR" : "",
36970 vcc->vpi, vcc->vci);
36971- atomic_inc(&vcc->stats->rx_err);
36972+ atomic_inc_unchecked(&vcc->stats->rx_err);
36973 goto return_host_buffers;
36974 }
36975
36976@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
36977 vcc->push(vcc, skb);
36978 spin_lock(&he_dev->global_lock);
36979
36980- atomic_inc(&vcc->stats->rx);
36981+ atomic_inc_unchecked(&vcc->stats->rx);
36982
36983 return_host_buffers:
36984 ++pdus_assembled;
36985@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
36986 tpd->vcc->pop(tpd->vcc, tpd->skb);
36987 else
36988 dev_kfree_skb_any(tpd->skb);
36989- atomic_inc(&tpd->vcc->stats->tx_err);
36990+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
36991 }
36992 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
36993 return;
36994@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
36995 vcc->pop(vcc, skb);
36996 else
36997 dev_kfree_skb_any(skb);
36998- atomic_inc(&vcc->stats->tx_err);
36999+ atomic_inc_unchecked(&vcc->stats->tx_err);
37000 return -EINVAL;
37001 }
37002
37003@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37004 vcc->pop(vcc, skb);
37005 else
37006 dev_kfree_skb_any(skb);
37007- atomic_inc(&vcc->stats->tx_err);
37008+ atomic_inc_unchecked(&vcc->stats->tx_err);
37009 return -EINVAL;
37010 }
37011 #endif
37012@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37013 vcc->pop(vcc, skb);
37014 else
37015 dev_kfree_skb_any(skb);
37016- atomic_inc(&vcc->stats->tx_err);
37017+ atomic_inc_unchecked(&vcc->stats->tx_err);
37018 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37019 return -ENOMEM;
37020 }
37021@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37022 vcc->pop(vcc, skb);
37023 else
37024 dev_kfree_skb_any(skb);
37025- atomic_inc(&vcc->stats->tx_err);
37026+ atomic_inc_unchecked(&vcc->stats->tx_err);
37027 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37028 return -ENOMEM;
37029 }
37030@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37031 __enqueue_tpd(he_dev, tpd, cid);
37032 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37033
37034- atomic_inc(&vcc->stats->tx);
37035+ atomic_inc_unchecked(&vcc->stats->tx);
37036
37037 return 0;
37038 }
37039diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37040index 1dc0519..1aadaf7 100644
37041--- a/drivers/atm/horizon.c
37042+++ b/drivers/atm/horizon.c
37043@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37044 {
37045 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37046 // VC layer stats
37047- atomic_inc(&vcc->stats->rx);
37048+ atomic_inc_unchecked(&vcc->stats->rx);
37049 __net_timestamp(skb);
37050 // end of our responsibility
37051 vcc->push (vcc, skb);
37052@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37053 dev->tx_iovec = NULL;
37054
37055 // VC layer stats
37056- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37057+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37058
37059 // free the skb
37060 hrz_kfree_skb (skb);
37061diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37062index 2b24ed0..b3d6acc 100644
37063--- a/drivers/atm/idt77252.c
37064+++ b/drivers/atm/idt77252.c
37065@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37066 else
37067 dev_kfree_skb(skb);
37068
37069- atomic_inc(&vcc->stats->tx);
37070+ atomic_inc_unchecked(&vcc->stats->tx);
37071 }
37072
37073 atomic_dec(&scq->used);
37074@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37075 if ((sb = dev_alloc_skb(64)) == NULL) {
37076 printk("%s: Can't allocate buffers for aal0.\n",
37077 card->name);
37078- atomic_add(i, &vcc->stats->rx_drop);
37079+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37080 break;
37081 }
37082 if (!atm_charge(vcc, sb->truesize)) {
37083 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37084 card->name);
37085- atomic_add(i - 1, &vcc->stats->rx_drop);
37086+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37087 dev_kfree_skb(sb);
37088 break;
37089 }
37090@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37091 ATM_SKB(sb)->vcc = vcc;
37092 __net_timestamp(sb);
37093 vcc->push(vcc, sb);
37094- atomic_inc(&vcc->stats->rx);
37095+ atomic_inc_unchecked(&vcc->stats->rx);
37096
37097 cell += ATM_CELL_PAYLOAD;
37098 }
37099@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37100 "(CDC: %08x)\n",
37101 card->name, len, rpp->len, readl(SAR_REG_CDC));
37102 recycle_rx_pool_skb(card, rpp);
37103- atomic_inc(&vcc->stats->rx_err);
37104+ atomic_inc_unchecked(&vcc->stats->rx_err);
37105 return;
37106 }
37107 if (stat & SAR_RSQE_CRC) {
37108 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37109 recycle_rx_pool_skb(card, rpp);
37110- atomic_inc(&vcc->stats->rx_err);
37111+ atomic_inc_unchecked(&vcc->stats->rx_err);
37112 return;
37113 }
37114 if (skb_queue_len(&rpp->queue) > 1) {
37115@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37116 RXPRINTK("%s: Can't alloc RX skb.\n",
37117 card->name);
37118 recycle_rx_pool_skb(card, rpp);
37119- atomic_inc(&vcc->stats->rx_err);
37120+ atomic_inc_unchecked(&vcc->stats->rx_err);
37121 return;
37122 }
37123 if (!atm_charge(vcc, skb->truesize)) {
37124@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37125 __net_timestamp(skb);
37126
37127 vcc->push(vcc, skb);
37128- atomic_inc(&vcc->stats->rx);
37129+ atomic_inc_unchecked(&vcc->stats->rx);
37130
37131 return;
37132 }
37133@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37134 __net_timestamp(skb);
37135
37136 vcc->push(vcc, skb);
37137- atomic_inc(&vcc->stats->rx);
37138+ atomic_inc_unchecked(&vcc->stats->rx);
37139
37140 if (skb->truesize > SAR_FB_SIZE_3)
37141 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37142@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37143 if (vcc->qos.aal != ATM_AAL0) {
37144 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37145 card->name, vpi, vci);
37146- atomic_inc(&vcc->stats->rx_drop);
37147+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37148 goto drop;
37149 }
37150
37151 if ((sb = dev_alloc_skb(64)) == NULL) {
37152 printk("%s: Can't allocate buffers for AAL0.\n",
37153 card->name);
37154- atomic_inc(&vcc->stats->rx_err);
37155+ atomic_inc_unchecked(&vcc->stats->rx_err);
37156 goto drop;
37157 }
37158
37159@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37160 ATM_SKB(sb)->vcc = vcc;
37161 __net_timestamp(sb);
37162 vcc->push(vcc, sb);
37163- atomic_inc(&vcc->stats->rx);
37164+ atomic_inc_unchecked(&vcc->stats->rx);
37165
37166 drop:
37167 skb_pull(queue, 64);
37168@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37169
37170 if (vc == NULL) {
37171 printk("%s: NULL connection in send().\n", card->name);
37172- atomic_inc(&vcc->stats->tx_err);
37173+ atomic_inc_unchecked(&vcc->stats->tx_err);
37174 dev_kfree_skb(skb);
37175 return -EINVAL;
37176 }
37177 if (!test_bit(VCF_TX, &vc->flags)) {
37178 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37179- atomic_inc(&vcc->stats->tx_err);
37180+ atomic_inc_unchecked(&vcc->stats->tx_err);
37181 dev_kfree_skb(skb);
37182 return -EINVAL;
37183 }
37184@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37185 break;
37186 default:
37187 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37188- atomic_inc(&vcc->stats->tx_err);
37189+ atomic_inc_unchecked(&vcc->stats->tx_err);
37190 dev_kfree_skb(skb);
37191 return -EINVAL;
37192 }
37193
37194 if (skb_shinfo(skb)->nr_frags != 0) {
37195 printk("%s: No scatter-gather yet.\n", card->name);
37196- atomic_inc(&vcc->stats->tx_err);
37197+ atomic_inc_unchecked(&vcc->stats->tx_err);
37198 dev_kfree_skb(skb);
37199 return -EINVAL;
37200 }
37201@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37202
37203 err = queue_skb(card, vc, skb, oam);
37204 if (err) {
37205- atomic_inc(&vcc->stats->tx_err);
37206+ atomic_inc_unchecked(&vcc->stats->tx_err);
37207 dev_kfree_skb(skb);
37208 return err;
37209 }
37210@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37211 skb = dev_alloc_skb(64);
37212 if (!skb) {
37213 printk("%s: Out of memory in send_oam().\n", card->name);
37214- atomic_inc(&vcc->stats->tx_err);
37215+ atomic_inc_unchecked(&vcc->stats->tx_err);
37216 return -ENOMEM;
37217 }
37218 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37219diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37220index 4217f29..88f547a 100644
37221--- a/drivers/atm/iphase.c
37222+++ b/drivers/atm/iphase.c
37223@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37224 status = (u_short) (buf_desc_ptr->desc_mode);
37225 if (status & (RX_CER | RX_PTE | RX_OFL))
37226 {
37227- atomic_inc(&vcc->stats->rx_err);
37228+ atomic_inc_unchecked(&vcc->stats->rx_err);
37229 IF_ERR(printk("IA: bad packet, dropping it");)
37230 if (status & RX_CER) {
37231 IF_ERR(printk(" cause: packet CRC error\n");)
37232@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37233 len = dma_addr - buf_addr;
37234 if (len > iadev->rx_buf_sz) {
37235 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37236- atomic_inc(&vcc->stats->rx_err);
37237+ atomic_inc_unchecked(&vcc->stats->rx_err);
37238 goto out_free_desc;
37239 }
37240
37241@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37242 ia_vcc = INPH_IA_VCC(vcc);
37243 if (ia_vcc == NULL)
37244 {
37245- atomic_inc(&vcc->stats->rx_err);
37246+ atomic_inc_unchecked(&vcc->stats->rx_err);
37247 atm_return(vcc, skb->truesize);
37248 dev_kfree_skb_any(skb);
37249 goto INCR_DLE;
37250@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37251 if ((length > iadev->rx_buf_sz) || (length >
37252 (skb->len - sizeof(struct cpcs_trailer))))
37253 {
37254- atomic_inc(&vcc->stats->rx_err);
37255+ atomic_inc_unchecked(&vcc->stats->rx_err);
37256 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37257 length, skb->len);)
37258 atm_return(vcc, skb->truesize);
37259@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37260
37261 IF_RX(printk("rx_dle_intr: skb push");)
37262 vcc->push(vcc,skb);
37263- atomic_inc(&vcc->stats->rx);
37264+ atomic_inc_unchecked(&vcc->stats->rx);
37265 iadev->rx_pkt_cnt++;
37266 }
37267 INCR_DLE:
37268@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37269 {
37270 struct k_sonet_stats *stats;
37271 stats = &PRIV(_ia_dev[board])->sonet_stats;
37272- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37273- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37274- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37275- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37276- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37277- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37278- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37279- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37280- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37281+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37282+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37283+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37284+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37285+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37286+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37287+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37288+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37289+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37290 }
37291 ia_cmds.status = 0;
37292 break;
37293@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37294 if ((desc == 0) || (desc > iadev->num_tx_desc))
37295 {
37296 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37297- atomic_inc(&vcc->stats->tx);
37298+ atomic_inc_unchecked(&vcc->stats->tx);
37299 if (vcc->pop)
37300 vcc->pop(vcc, skb);
37301 else
37302@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37303 ATM_DESC(skb) = vcc->vci;
37304 skb_queue_tail(&iadev->tx_dma_q, skb);
37305
37306- atomic_inc(&vcc->stats->tx);
37307+ atomic_inc_unchecked(&vcc->stats->tx);
37308 iadev->tx_pkt_cnt++;
37309 /* Increment transaction counter */
37310 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
37311
37312 #if 0
37313 /* add flow control logic */
37314- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
37315+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
37316 if (iavcc->vc_desc_cnt > 10) {
37317 vcc->tx_quota = vcc->tx_quota * 3 / 4;
37318 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
37319diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
37320index 93eaf8d..b4ca7da 100644
37321--- a/drivers/atm/lanai.c
37322+++ b/drivers/atm/lanai.c
37323@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
37324 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
37325 lanai_endtx(lanai, lvcc);
37326 lanai_free_skb(lvcc->tx.atmvcc, skb);
37327- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
37328+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
37329 }
37330
37331 /* Try to fill the buffer - don't call unless there is backlog */
37332@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
37333 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
37334 __net_timestamp(skb);
37335 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
37336- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
37337+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
37338 out:
37339 lvcc->rx.buf.ptr = end;
37340 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
37341@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37342 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
37343 "vcc %d\n", lanai->number, (unsigned int) s, vci);
37344 lanai->stats.service_rxnotaal5++;
37345- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37346+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37347 return 0;
37348 }
37349 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
37350@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37351 int bytes;
37352 read_unlock(&vcc_sklist_lock);
37353 DPRINTK("got trashed rx pdu on vci %d\n", vci);
37354- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37355+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37356 lvcc->stats.x.aal5.service_trash++;
37357 bytes = (SERVICE_GET_END(s) * 16) -
37358 (((unsigned long) lvcc->rx.buf.ptr) -
37359@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37360 }
37361 if (s & SERVICE_STREAM) {
37362 read_unlock(&vcc_sklist_lock);
37363- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37364+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37365 lvcc->stats.x.aal5.service_stream++;
37366 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
37367 "PDU on VCI %d!\n", lanai->number, vci);
37368@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
37369 return 0;
37370 }
37371 DPRINTK("got rx crc error on vci %d\n", vci);
37372- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
37373+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
37374 lvcc->stats.x.aal5.service_rxcrc++;
37375 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
37376 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
37377diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
37378index 9988ac9..7c52585 100644
37379--- a/drivers/atm/nicstar.c
37380+++ b/drivers/atm/nicstar.c
37381@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37382 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
37383 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
37384 card->index);
37385- atomic_inc(&vcc->stats->tx_err);
37386+ atomic_inc_unchecked(&vcc->stats->tx_err);
37387 dev_kfree_skb_any(skb);
37388 return -EINVAL;
37389 }
37390@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37391 if (!vc->tx) {
37392 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
37393 card->index);
37394- atomic_inc(&vcc->stats->tx_err);
37395+ atomic_inc_unchecked(&vcc->stats->tx_err);
37396 dev_kfree_skb_any(skb);
37397 return -EINVAL;
37398 }
37399@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37400 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
37401 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
37402 card->index);
37403- atomic_inc(&vcc->stats->tx_err);
37404+ atomic_inc_unchecked(&vcc->stats->tx_err);
37405 dev_kfree_skb_any(skb);
37406 return -EINVAL;
37407 }
37408
37409 if (skb_shinfo(skb)->nr_frags != 0) {
37410 printk("nicstar%d: No scatter-gather yet.\n", card->index);
37411- atomic_inc(&vcc->stats->tx_err);
37412+ atomic_inc_unchecked(&vcc->stats->tx_err);
37413 dev_kfree_skb_any(skb);
37414 return -EINVAL;
37415 }
37416@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
37417 }
37418
37419 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
37420- atomic_inc(&vcc->stats->tx_err);
37421+ atomic_inc_unchecked(&vcc->stats->tx_err);
37422 dev_kfree_skb_any(skb);
37423 return -EIO;
37424 }
37425- atomic_inc(&vcc->stats->tx);
37426+ atomic_inc_unchecked(&vcc->stats->tx);
37427
37428 return 0;
37429 }
37430@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37431 printk
37432 ("nicstar%d: Can't allocate buffers for aal0.\n",
37433 card->index);
37434- atomic_add(i, &vcc->stats->rx_drop);
37435+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37436 break;
37437 }
37438 if (!atm_charge(vcc, sb->truesize)) {
37439 RXPRINTK
37440 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
37441 card->index);
37442- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37443+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
37444 dev_kfree_skb_any(sb);
37445 break;
37446 }
37447@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37448 ATM_SKB(sb)->vcc = vcc;
37449 __net_timestamp(sb);
37450 vcc->push(vcc, sb);
37451- atomic_inc(&vcc->stats->rx);
37452+ atomic_inc_unchecked(&vcc->stats->rx);
37453 cell += ATM_CELL_PAYLOAD;
37454 }
37455
37456@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37457 if (iovb == NULL) {
37458 printk("nicstar%d: Out of iovec buffers.\n",
37459 card->index);
37460- atomic_inc(&vcc->stats->rx_drop);
37461+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37462 recycle_rx_buf(card, skb);
37463 return;
37464 }
37465@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37466 small or large buffer itself. */
37467 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
37468 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
37469- atomic_inc(&vcc->stats->rx_err);
37470+ atomic_inc_unchecked(&vcc->stats->rx_err);
37471 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37472 NS_MAX_IOVECS);
37473 NS_PRV_IOVCNT(iovb) = 0;
37474@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37475 ("nicstar%d: Expected a small buffer, and this is not one.\n",
37476 card->index);
37477 which_list(card, skb);
37478- atomic_inc(&vcc->stats->rx_err);
37479+ atomic_inc_unchecked(&vcc->stats->rx_err);
37480 recycle_rx_buf(card, skb);
37481 vc->rx_iov = NULL;
37482 recycle_iov_buf(card, iovb);
37483@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37484 ("nicstar%d: Expected a large buffer, and this is not one.\n",
37485 card->index);
37486 which_list(card, skb);
37487- atomic_inc(&vcc->stats->rx_err);
37488+ atomic_inc_unchecked(&vcc->stats->rx_err);
37489 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37490 NS_PRV_IOVCNT(iovb));
37491 vc->rx_iov = NULL;
37492@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37493 printk(" - PDU size mismatch.\n");
37494 else
37495 printk(".\n");
37496- atomic_inc(&vcc->stats->rx_err);
37497+ atomic_inc_unchecked(&vcc->stats->rx_err);
37498 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
37499 NS_PRV_IOVCNT(iovb));
37500 vc->rx_iov = NULL;
37501@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37502 /* skb points to a small buffer */
37503 if (!atm_charge(vcc, skb->truesize)) {
37504 push_rxbufs(card, skb);
37505- atomic_inc(&vcc->stats->rx_drop);
37506+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37507 } else {
37508 skb_put(skb, len);
37509 dequeue_sm_buf(card, skb);
37510@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37511 ATM_SKB(skb)->vcc = vcc;
37512 __net_timestamp(skb);
37513 vcc->push(vcc, skb);
37514- atomic_inc(&vcc->stats->rx);
37515+ atomic_inc_unchecked(&vcc->stats->rx);
37516 }
37517 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
37518 struct sk_buff *sb;
37519@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37520 if (len <= NS_SMBUFSIZE) {
37521 if (!atm_charge(vcc, sb->truesize)) {
37522 push_rxbufs(card, sb);
37523- atomic_inc(&vcc->stats->rx_drop);
37524+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37525 } else {
37526 skb_put(sb, len);
37527 dequeue_sm_buf(card, sb);
37528@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37529 ATM_SKB(sb)->vcc = vcc;
37530 __net_timestamp(sb);
37531 vcc->push(vcc, sb);
37532- atomic_inc(&vcc->stats->rx);
37533+ atomic_inc_unchecked(&vcc->stats->rx);
37534 }
37535
37536 push_rxbufs(card, skb);
37537@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37538
37539 if (!atm_charge(vcc, skb->truesize)) {
37540 push_rxbufs(card, skb);
37541- atomic_inc(&vcc->stats->rx_drop);
37542+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37543 } else {
37544 dequeue_lg_buf(card, skb);
37545 #ifdef NS_USE_DESTRUCTORS
37546@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37547 ATM_SKB(skb)->vcc = vcc;
37548 __net_timestamp(skb);
37549 vcc->push(vcc, skb);
37550- atomic_inc(&vcc->stats->rx);
37551+ atomic_inc_unchecked(&vcc->stats->rx);
37552 }
37553
37554 push_rxbufs(card, sb);
37555@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37556 printk
37557 ("nicstar%d: Out of huge buffers.\n",
37558 card->index);
37559- atomic_inc(&vcc->stats->rx_drop);
37560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37561 recycle_iovec_rx_bufs(card,
37562 (struct iovec *)
37563 iovb->data,
37564@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37565 card->hbpool.count++;
37566 } else
37567 dev_kfree_skb_any(hb);
37568- atomic_inc(&vcc->stats->rx_drop);
37569+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37570 } else {
37571 /* Copy the small buffer to the huge buffer */
37572 sb = (struct sk_buff *)iov->iov_base;
37573@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
37574 #endif /* NS_USE_DESTRUCTORS */
37575 __net_timestamp(hb);
37576 vcc->push(vcc, hb);
37577- atomic_inc(&vcc->stats->rx);
37578+ atomic_inc_unchecked(&vcc->stats->rx);
37579 }
37580 }
37581
37582diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
37583index 21b0bc6..b5f40ba 100644
37584--- a/drivers/atm/solos-pci.c
37585+++ b/drivers/atm/solos-pci.c
37586@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
37587 }
37588 atm_charge(vcc, skb->truesize);
37589 vcc->push(vcc, skb);
37590- atomic_inc(&vcc->stats->rx);
37591+ atomic_inc_unchecked(&vcc->stats->rx);
37592 break;
37593
37594 case PKT_STATUS:
37595@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
37596 vcc = SKB_CB(oldskb)->vcc;
37597
37598 if (vcc) {
37599- atomic_inc(&vcc->stats->tx);
37600+ atomic_inc_unchecked(&vcc->stats->tx);
37601 solos_pop(vcc, oldskb);
37602 } else {
37603 dev_kfree_skb_irq(oldskb);
37604diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
37605index 0215934..ce9f5b1 100644
37606--- a/drivers/atm/suni.c
37607+++ b/drivers/atm/suni.c
37608@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
37609
37610
37611 #define ADD_LIMITED(s,v) \
37612- atomic_add((v),&stats->s); \
37613- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
37614+ atomic_add_unchecked((v),&stats->s); \
37615+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
37616
37617
37618 static void suni_hz(unsigned long from_timer)
37619diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
37620index 5120a96..e2572bd 100644
37621--- a/drivers/atm/uPD98402.c
37622+++ b/drivers/atm/uPD98402.c
37623@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
37624 struct sonet_stats tmp;
37625 int error = 0;
37626
37627- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37628+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
37629 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
37630 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
37631 if (zero && !error) {
37632@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
37633
37634
37635 #define ADD_LIMITED(s,v) \
37636- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
37637- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
37638- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37639+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
37640+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
37641+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
37642
37643
37644 static void stat_event(struct atm_dev *dev)
37645@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
37646 if (reason & uPD98402_INT_PFM) stat_event(dev);
37647 if (reason & uPD98402_INT_PCO) {
37648 (void) GET(PCOCR); /* clear interrupt cause */
37649- atomic_add(GET(HECCT),
37650+ atomic_add_unchecked(GET(HECCT),
37651 &PRIV(dev)->sonet_stats.uncorr_hcs);
37652 }
37653 if ((reason & uPD98402_INT_RFO) &&
37654@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
37655 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
37656 uPD98402_INT_LOS),PIMR); /* enable them */
37657 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
37658- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37659- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
37660- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
37661+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
37662+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
37663+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
37664 return 0;
37665 }
37666
37667diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
37668index 969c3c2..9b72956 100644
37669--- a/drivers/atm/zatm.c
37670+++ b/drivers/atm/zatm.c
37671@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37672 }
37673 if (!size) {
37674 dev_kfree_skb_irq(skb);
37675- if (vcc) atomic_inc(&vcc->stats->rx_err);
37676+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
37677 continue;
37678 }
37679 if (!atm_charge(vcc,skb->truesize)) {
37680@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
37681 skb->len = size;
37682 ATM_SKB(skb)->vcc = vcc;
37683 vcc->push(vcc,skb);
37684- atomic_inc(&vcc->stats->rx);
37685+ atomic_inc_unchecked(&vcc->stats->rx);
37686 }
37687 zout(pos & 0xffff,MTA(mbx));
37688 #if 0 /* probably a stupid idea */
37689@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
37690 skb_queue_head(&zatm_vcc->backlog,skb);
37691 break;
37692 }
37693- atomic_inc(&vcc->stats->tx);
37694+ atomic_inc_unchecked(&vcc->stats->tx);
37695 wake_up(&zatm_vcc->tx_wait);
37696 }
37697
37698diff --git a/drivers/base/bus.c b/drivers/base/bus.c
37699index 876bae5..8978785 100644
37700--- a/drivers/base/bus.c
37701+++ b/drivers/base/bus.c
37702@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
37703 return -EINVAL;
37704
37705 mutex_lock(&subsys->p->mutex);
37706- list_add_tail(&sif->node, &subsys->p->interfaces);
37707+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
37708 if (sif->add_dev) {
37709 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37710 while ((dev = subsys_dev_iter_next(&iter)))
37711@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
37712 subsys = sif->subsys;
37713
37714 mutex_lock(&subsys->p->mutex);
37715- list_del_init(&sif->node);
37716+ pax_list_del_init((struct list_head *)&sif->node);
37717 if (sif->remove_dev) {
37718 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
37719 while ((dev = subsys_dev_iter_next(&iter)))
37720diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
37721index 25798db..15f130e 100644
37722--- a/drivers/base/devtmpfs.c
37723+++ b/drivers/base/devtmpfs.c
37724@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
37725 if (!thread)
37726 return 0;
37727
37728- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
37729+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
37730 if (err)
37731 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
37732 else
37733@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
37734 *err = sys_unshare(CLONE_NEWNS);
37735 if (*err)
37736 goto out;
37737- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
37738+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
37739 if (*err)
37740 goto out;
37741- sys_chdir("/.."); /* will traverse into overmounted root */
37742- sys_chroot(".");
37743+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
37744+ sys_chroot((char __force_user *)".");
37745 complete(&setup_done);
37746 while (1) {
37747 spin_lock(&req_lock);
37748diff --git a/drivers/base/node.c b/drivers/base/node.c
37749index a3b82e9..f90a8ce 100644
37750--- a/drivers/base/node.c
37751+++ b/drivers/base/node.c
37752@@ -614,7 +614,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
37753 struct node_attr {
37754 struct device_attribute attr;
37755 enum node_states state;
37756-};
37757+} __do_const;
37758
37759 static ssize_t show_node_state(struct device *dev,
37760 struct device_attribute *attr, char *buf)
37761diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
37762index 0d8780c..0b5df3f 100644
37763--- a/drivers/base/power/domain.c
37764+++ b/drivers/base/power/domain.c
37765@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
37766 {
37767 struct cpuidle_driver *cpuidle_drv;
37768 struct gpd_cpuidle_data *cpuidle_data;
37769- struct cpuidle_state *idle_state;
37770+ cpuidle_state_no_const *idle_state;
37771 int ret = 0;
37772
37773 if (IS_ERR_OR_NULL(genpd) || state < 0)
37774@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
37775 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
37776 {
37777 struct gpd_cpuidle_data *cpuidle_data;
37778- struct cpuidle_state *idle_state;
37779+ cpuidle_state_no_const *idle_state;
37780 int ret = 0;
37781
37782 if (IS_ERR_OR_NULL(genpd))
37783@@ -2222,7 +2222,10 @@ int genpd_dev_pm_attach(struct device *dev)
37784 return ret;
37785 }
37786
37787- dev->pm_domain->detach = genpd_dev_pm_detach;
37788+ pax_open_kernel();
37789+ *(void **)&dev->pm_domain->detach = genpd_dev_pm_detach;
37790+ pax_close_kernel();
37791+
37792 pm_genpd_poweron(pd);
37793
37794 return 0;
37795diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
37796index d2be3f9..0a3167a 100644
37797--- a/drivers/base/power/sysfs.c
37798+++ b/drivers/base/power/sysfs.c
37799@@ -181,7 +181,7 @@ static ssize_t rtpm_status_show(struct device *dev,
37800 return -EIO;
37801 }
37802 }
37803- return sprintf(buf, p);
37804+ return sprintf(buf, "%s", p);
37805 }
37806
37807 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
37808diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
37809index c2744b3..08fac19 100644
37810--- a/drivers/base/power/wakeup.c
37811+++ b/drivers/base/power/wakeup.c
37812@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
37813 * They need to be modified together atomically, so it's better to use one
37814 * atomic variable to hold them both.
37815 */
37816-static atomic_t combined_event_count = ATOMIC_INIT(0);
37817+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
37818
37819 #define IN_PROGRESS_BITS (sizeof(int) * 4)
37820 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
37821
37822 static void split_counters(unsigned int *cnt, unsigned int *inpr)
37823 {
37824- unsigned int comb = atomic_read(&combined_event_count);
37825+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
37826
37827 *cnt = (comb >> IN_PROGRESS_BITS);
37828 *inpr = comb & MAX_IN_PROGRESS;
37829@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
37830 ws->start_prevent_time = ws->last_time;
37831
37832 /* Increment the counter of events in progress. */
37833- cec = atomic_inc_return(&combined_event_count);
37834+ cec = atomic_inc_return_unchecked(&combined_event_count);
37835
37836 trace_wakeup_source_activate(ws->name, cec);
37837 }
37838@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
37839 * Increment the counter of registered wakeup events and decrement the
37840 * couter of wakeup events in progress simultaneously.
37841 */
37842- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
37843+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
37844 trace_wakeup_source_deactivate(ws->name, cec);
37845
37846 split_counters(&cnt, &inpr);
37847diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
37848index 8d98a32..61d3165 100644
37849--- a/drivers/base/syscore.c
37850+++ b/drivers/base/syscore.c
37851@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
37852 void register_syscore_ops(struct syscore_ops *ops)
37853 {
37854 mutex_lock(&syscore_ops_lock);
37855- list_add_tail(&ops->node, &syscore_ops_list);
37856+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
37857 mutex_unlock(&syscore_ops_lock);
37858 }
37859 EXPORT_SYMBOL_GPL(register_syscore_ops);
37860@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
37861 void unregister_syscore_ops(struct syscore_ops *ops)
37862 {
37863 mutex_lock(&syscore_ops_lock);
37864- list_del(&ops->node);
37865+ pax_list_del((struct list_head *)&ops->node);
37866 mutex_unlock(&syscore_ops_lock);
37867 }
37868 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
37869diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
37870index ff20f19..018f1da 100644
37871--- a/drivers/block/cciss.c
37872+++ b/drivers/block/cciss.c
37873@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
37874 while (!list_empty(&h->reqQ)) {
37875 c = list_entry(h->reqQ.next, CommandList_struct, list);
37876 /* can't do anything if fifo is full */
37877- if ((h->access.fifo_full(h))) {
37878+ if ((h->access->fifo_full(h))) {
37879 dev_warn(&h->pdev->dev, "fifo full\n");
37880 break;
37881 }
37882@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
37883 h->Qdepth--;
37884
37885 /* Tell the controller execute command */
37886- h->access.submit_command(h, c);
37887+ h->access->submit_command(h, c);
37888
37889 /* Put job onto the completed Q */
37890 addQ(&h->cmpQ, c);
37891@@ -3444,17 +3444,17 @@ startio:
37892
37893 static inline unsigned long get_next_completion(ctlr_info_t *h)
37894 {
37895- return h->access.command_completed(h);
37896+ return h->access->command_completed(h);
37897 }
37898
37899 static inline int interrupt_pending(ctlr_info_t *h)
37900 {
37901- return h->access.intr_pending(h);
37902+ return h->access->intr_pending(h);
37903 }
37904
37905 static inline long interrupt_not_for_us(ctlr_info_t *h)
37906 {
37907- return ((h->access.intr_pending(h) == 0) ||
37908+ return ((h->access->intr_pending(h) == 0) ||
37909 (h->interrupts_enabled == 0));
37910 }
37911
37912@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
37913 u32 a;
37914
37915 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
37916- return h->access.command_completed(h);
37917+ return h->access->command_completed(h);
37918
37919 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
37920 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
37921@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
37922 trans_support & CFGTBL_Trans_use_short_tags);
37923
37924 /* Change the access methods to the performant access methods */
37925- h->access = SA5_performant_access;
37926+ h->access = &SA5_performant_access;
37927 h->transMethod = CFGTBL_Trans_Performant;
37928
37929 return;
37930@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
37931 if (prod_index < 0)
37932 return -ENODEV;
37933 h->product_name = products[prod_index].product_name;
37934- h->access = *(products[prod_index].access);
37935+ h->access = products[prod_index].access;
37936
37937 if (cciss_board_disabled(h)) {
37938 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37939@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
37940 }
37941
37942 /* make sure the board interrupts are off */
37943- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37944+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37945 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
37946 if (rc)
37947 goto clean2;
37948@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
37949 * fake ones to scoop up any residual completions.
37950 */
37951 spin_lock_irqsave(&h->lock, flags);
37952- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37953+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37954 spin_unlock_irqrestore(&h->lock, flags);
37955 free_irq(h->intr[h->intr_mode], h);
37956 rc = cciss_request_irq(h, cciss_msix_discard_completions,
37957@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
37958 dev_info(&h->pdev->dev, "Board READY.\n");
37959 dev_info(&h->pdev->dev,
37960 "Waiting for stale completions to drain.\n");
37961- h->access.set_intr_mask(h, CCISS_INTR_ON);
37962+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37963 msleep(10000);
37964- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37965+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37966
37967 rc = controller_reset_failed(h->cfgtable);
37968 if (rc)
37969@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
37970 cciss_scsi_setup(h);
37971
37972 /* Turn the interrupts on so we can service requests */
37973- h->access.set_intr_mask(h, CCISS_INTR_ON);
37974+ h->access->set_intr_mask(h, CCISS_INTR_ON);
37975
37976 /* Get the firmware version */
37977 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
37978@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
37979 kfree(flush_buf);
37980 if (return_code != IO_OK)
37981 dev_warn(&h->pdev->dev, "Error flushing cache\n");
37982- h->access.set_intr_mask(h, CCISS_INTR_OFF);
37983+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
37984 free_irq(h->intr[h->intr_mode], h);
37985 }
37986
37987diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
37988index 7fda30e..2f27946 100644
37989--- a/drivers/block/cciss.h
37990+++ b/drivers/block/cciss.h
37991@@ -101,7 +101,7 @@ struct ctlr_info
37992 /* information about each logical volume */
37993 drive_info_struct *drv[CISS_MAX_LUN];
37994
37995- struct access_method access;
37996+ struct access_method *access;
37997
37998 /* queue and queue Info */
37999 struct list_head reqQ;
38000@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38001 }
38002
38003 static struct access_method SA5_access = {
38004- SA5_submit_command,
38005- SA5_intr_mask,
38006- SA5_fifo_full,
38007- SA5_intr_pending,
38008- SA5_completed,
38009+ .submit_command = SA5_submit_command,
38010+ .set_intr_mask = SA5_intr_mask,
38011+ .fifo_full = SA5_fifo_full,
38012+ .intr_pending = SA5_intr_pending,
38013+ .command_completed = SA5_completed,
38014 };
38015
38016 static struct access_method SA5B_access = {
38017- SA5_submit_command,
38018- SA5B_intr_mask,
38019- SA5_fifo_full,
38020- SA5B_intr_pending,
38021- SA5_completed,
38022+ .submit_command = SA5_submit_command,
38023+ .set_intr_mask = SA5B_intr_mask,
38024+ .fifo_full = SA5_fifo_full,
38025+ .intr_pending = SA5B_intr_pending,
38026+ .command_completed = SA5_completed,
38027 };
38028
38029 static struct access_method SA5_performant_access = {
38030- SA5_submit_command,
38031- SA5_performant_intr_mask,
38032- SA5_fifo_full,
38033- SA5_performant_intr_pending,
38034- SA5_performant_completed,
38035+ .submit_command = SA5_submit_command,
38036+ .set_intr_mask = SA5_performant_intr_mask,
38037+ .fifo_full = SA5_fifo_full,
38038+ .intr_pending = SA5_performant_intr_pending,
38039+ .command_completed = SA5_performant_completed,
38040 };
38041
38042 struct board_type {
38043diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38044index 2b94403..fd6ad1f 100644
38045--- a/drivers/block/cpqarray.c
38046+++ b/drivers/block/cpqarray.c
38047@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38048 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38049 goto Enomem4;
38050 }
38051- hba[i]->access.set_intr_mask(hba[i], 0);
38052+ hba[i]->access->set_intr_mask(hba[i], 0);
38053 if (request_irq(hba[i]->intr, do_ida_intr,
38054 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38055 {
38056@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38057 add_timer(&hba[i]->timer);
38058
38059 /* Enable IRQ now that spinlock and rate limit timer are set up */
38060- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38061+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38062
38063 for(j=0; j<NWD; j++) {
38064 struct gendisk *disk = ida_gendisk[i][j];
38065@@ -694,7 +694,7 @@ DBGINFO(
38066 for(i=0; i<NR_PRODUCTS; i++) {
38067 if (board_id == products[i].board_id) {
38068 c->product_name = products[i].product_name;
38069- c->access = *(products[i].access);
38070+ c->access = products[i].access;
38071 break;
38072 }
38073 }
38074@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38075 hba[ctlr]->intr = intr;
38076 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38077 hba[ctlr]->product_name = products[j].product_name;
38078- hba[ctlr]->access = *(products[j].access);
38079+ hba[ctlr]->access = products[j].access;
38080 hba[ctlr]->ctlr = ctlr;
38081 hba[ctlr]->board_id = board_id;
38082 hba[ctlr]->pci_dev = NULL; /* not PCI */
38083@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38084
38085 while((c = h->reqQ) != NULL) {
38086 /* Can't do anything if we're busy */
38087- if (h->access.fifo_full(h) == 0)
38088+ if (h->access->fifo_full(h) == 0)
38089 return;
38090
38091 /* Get the first entry from the request Q */
38092@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38093 h->Qdepth--;
38094
38095 /* Tell the controller to do our bidding */
38096- h->access.submit_command(h, c);
38097+ h->access->submit_command(h, c);
38098
38099 /* Get onto the completion Q */
38100 addQ(&h->cmpQ, c);
38101@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38102 unsigned long flags;
38103 __u32 a,a1;
38104
38105- istat = h->access.intr_pending(h);
38106+ istat = h->access->intr_pending(h);
38107 /* Is this interrupt for us? */
38108 if (istat == 0)
38109 return IRQ_NONE;
38110@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38111 */
38112 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38113 if (istat & FIFO_NOT_EMPTY) {
38114- while((a = h->access.command_completed(h))) {
38115+ while((a = h->access->command_completed(h))) {
38116 a1 = a; a &= ~3;
38117 if ((c = h->cmpQ) == NULL)
38118 {
38119@@ -1448,11 +1448,11 @@ static int sendcmd(
38120 /*
38121 * Disable interrupt
38122 */
38123- info_p->access.set_intr_mask(info_p, 0);
38124+ info_p->access->set_intr_mask(info_p, 0);
38125 /* Make sure there is room in the command FIFO */
38126 /* Actually it should be completely empty at this time. */
38127 for (i = 200000; i > 0; i--) {
38128- temp = info_p->access.fifo_full(info_p);
38129+ temp = info_p->access->fifo_full(info_p);
38130 if (temp != 0) {
38131 break;
38132 }
38133@@ -1465,7 +1465,7 @@ DBG(
38134 /*
38135 * Send the cmd
38136 */
38137- info_p->access.submit_command(info_p, c);
38138+ info_p->access->submit_command(info_p, c);
38139 complete = pollcomplete(ctlr);
38140
38141 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38142@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38143 * we check the new geometry. Then turn interrupts back on when
38144 * we're done.
38145 */
38146- host->access.set_intr_mask(host, 0);
38147+ host->access->set_intr_mask(host, 0);
38148 getgeometry(ctlr);
38149- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38150+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38151
38152 for(i=0; i<NWD; i++) {
38153 struct gendisk *disk = ida_gendisk[ctlr][i];
38154@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38155 /* Wait (up to 2 seconds) for a command to complete */
38156
38157 for (i = 200000; i > 0; i--) {
38158- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38159+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38160 if (done == 0) {
38161 udelay(10); /* a short fixed delay */
38162 } else
38163diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38164index be73e9d..7fbf140 100644
38165--- a/drivers/block/cpqarray.h
38166+++ b/drivers/block/cpqarray.h
38167@@ -99,7 +99,7 @@ struct ctlr_info {
38168 drv_info_t drv[NWD];
38169 struct proc_dir_entry *proc;
38170
38171- struct access_method access;
38172+ struct access_method *access;
38173
38174 cmdlist_t *reqQ;
38175 cmdlist_t *cmpQ;
38176diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38177index 434c77d..6d3219a 100644
38178--- a/drivers/block/drbd/drbd_bitmap.c
38179+++ b/drivers/block/drbd/drbd_bitmap.c
38180@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38181 submit_bio(rw, bio);
38182 /* this should not count as user activity and cause the
38183 * resync to throttle -- see drbd_rs_should_slow_down(). */
38184- atomic_add(len >> 9, &device->rs_sect_ev);
38185+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38186 }
38187 }
38188
38189diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38190index b905e98..0812ed8 100644
38191--- a/drivers/block/drbd/drbd_int.h
38192+++ b/drivers/block/drbd/drbd_int.h
38193@@ -385,7 +385,7 @@ struct drbd_epoch {
38194 struct drbd_connection *connection;
38195 struct list_head list;
38196 unsigned int barrier_nr;
38197- atomic_t epoch_size; /* increased on every request added. */
38198+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38199 atomic_t active; /* increased on every req. added, and dec on every finished. */
38200 unsigned long flags;
38201 };
38202@@ -946,7 +946,7 @@ struct drbd_device {
38203 unsigned int al_tr_number;
38204 int al_tr_cycle;
38205 wait_queue_head_t seq_wait;
38206- atomic_t packet_seq;
38207+ atomic_unchecked_t packet_seq;
38208 unsigned int peer_seq;
38209 spinlock_t peer_seq_lock;
38210 unsigned long comm_bm_set; /* communicated number of set bits. */
38211@@ -955,8 +955,8 @@ struct drbd_device {
38212 struct mutex own_state_mutex;
38213 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38214 char congestion_reason; /* Why we where congested... */
38215- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38216- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38217+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38218+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38219 int rs_last_sect_ev; /* counter to compare with */
38220 int rs_last_events; /* counter of read or write "events" (unit sectors)
38221 * on the lower level device when we last looked. */
38222diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38223index 1fc8342..7e7742b 100644
38224--- a/drivers/block/drbd/drbd_main.c
38225+++ b/drivers/block/drbd/drbd_main.c
38226@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38227 p->sector = sector;
38228 p->block_id = block_id;
38229 p->blksize = blksize;
38230- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38231+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38232 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38233 }
38234
38235@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38236 return -EIO;
38237 p->sector = cpu_to_be64(req->i.sector);
38238 p->block_id = (unsigned long)req;
38239- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38240+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38241 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38242 if (device->state.conn >= C_SYNC_SOURCE &&
38243 device->state.conn <= C_PAUSED_SYNC_T)
38244@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38245 atomic_set(&device->unacked_cnt, 0);
38246 atomic_set(&device->local_cnt, 0);
38247 atomic_set(&device->pp_in_use_by_net, 0);
38248- atomic_set(&device->rs_sect_in, 0);
38249- atomic_set(&device->rs_sect_ev, 0);
38250+ atomic_set_unchecked(&device->rs_sect_in, 0);
38251+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38252 atomic_set(&device->ap_in_flight, 0);
38253 atomic_set(&device->md_io.in_use, 0);
38254
38255@@ -2684,8 +2684,8 @@ void drbd_destroy_connection(struct kref *kref)
38256 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38257 struct drbd_resource *resource = connection->resource;
38258
38259- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38260- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38261+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38262+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38263 kfree(connection->current_epoch);
38264
38265 idr_destroy(&connection->peer_devices);
38266diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38267index 74df8cf..e41fc24 100644
38268--- a/drivers/block/drbd/drbd_nl.c
38269+++ b/drivers/block/drbd/drbd_nl.c
38270@@ -3637,13 +3637,13 @@ finish:
38271
38272 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38273 {
38274- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38275+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38276 struct sk_buff *msg;
38277 struct drbd_genlmsghdr *d_out;
38278 unsigned seq;
38279 int err = -ENOMEM;
38280
38281- seq = atomic_inc_return(&drbd_genl_seq);
38282+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
38283 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
38284 if (!msg)
38285 goto failed;
38286diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
38287index d169b4a..481463f 100644
38288--- a/drivers/block/drbd/drbd_receiver.c
38289+++ b/drivers/block/drbd/drbd_receiver.c
38290@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
38291 struct drbd_device *device = peer_device->device;
38292 int err;
38293
38294- atomic_set(&device->packet_seq, 0);
38295+ atomic_set_unchecked(&device->packet_seq, 0);
38296 device->peer_seq = 0;
38297
38298 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
38299@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38300 do {
38301 next_epoch = NULL;
38302
38303- epoch_size = atomic_read(&epoch->epoch_size);
38304+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
38305
38306 switch (ev & ~EV_CLEANUP) {
38307 case EV_PUT:
38308@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
38309 rv = FE_DESTROYED;
38310 } else {
38311 epoch->flags = 0;
38312- atomic_set(&epoch->epoch_size, 0);
38313+ atomic_set_unchecked(&epoch->epoch_size, 0);
38314 /* atomic_set(&epoch->active, 0); is already zero */
38315 if (rv == FE_STILL_LIVE)
38316 rv = FE_RECYCLED;
38317@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38318 conn_wait_active_ee_empty(connection);
38319 drbd_flush(connection);
38320
38321- if (atomic_read(&connection->current_epoch->epoch_size)) {
38322+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38323 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
38324 if (epoch)
38325 break;
38326@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
38327 }
38328
38329 epoch->flags = 0;
38330- atomic_set(&epoch->epoch_size, 0);
38331+ atomic_set_unchecked(&epoch->epoch_size, 0);
38332 atomic_set(&epoch->active, 0);
38333
38334 spin_lock(&connection->epoch_lock);
38335- if (atomic_read(&connection->current_epoch->epoch_size)) {
38336+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
38337 list_add(&epoch->list, &connection->current_epoch->list);
38338 connection->current_epoch = epoch;
38339 connection->epochs++;
38340@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
38341 list_add_tail(&peer_req->w.list, &device->sync_ee);
38342 spin_unlock_irq(&device->resource->req_lock);
38343
38344- atomic_add(pi->size >> 9, &device->rs_sect_ev);
38345+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
38346 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
38347 return 0;
38348
38349@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
38350 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38351 }
38352
38353- atomic_add(pi->size >> 9, &device->rs_sect_in);
38354+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
38355
38356 return err;
38357 }
38358@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38359
38360 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
38361 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
38362- atomic_inc(&connection->current_epoch->epoch_size);
38363+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
38364 err2 = drbd_drain_block(peer_device, pi->size);
38365 if (!err)
38366 err = err2;
38367@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
38368
38369 spin_lock(&connection->epoch_lock);
38370 peer_req->epoch = connection->current_epoch;
38371- atomic_inc(&peer_req->epoch->epoch_size);
38372+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
38373 atomic_inc(&peer_req->epoch->active);
38374 spin_unlock(&connection->epoch_lock);
38375
38376@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
38377
38378 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38379 (int)part_stat_read(&disk->part0, sectors[1]) -
38380- atomic_read(&device->rs_sect_ev);
38381+ atomic_read_unchecked(&device->rs_sect_ev);
38382
38383 if (atomic_read(&device->ap_actlog_cnt)
38384 || curr_events - device->rs_last_events > 64) {
38385@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38386 device->use_csums = true;
38387 } else if (pi->cmd == P_OV_REPLY) {
38388 /* track progress, we may need to throttle */
38389- atomic_add(size >> 9, &device->rs_sect_in);
38390+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
38391 peer_req->w.cb = w_e_end_ov_reply;
38392 dec_rs_pending(device);
38393 /* drbd_rs_begin_io done when we sent this request,
38394@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
38395 goto out_free_e;
38396
38397 submit_for_resync:
38398- atomic_add(size >> 9, &device->rs_sect_ev);
38399+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38400
38401 submit:
38402 update_receiver_timing_details(connection, drbd_submit_peer_request);
38403@@ -4564,7 +4564,7 @@ struct data_cmd {
38404 int expect_payload;
38405 size_t pkt_size;
38406 int (*fn)(struct drbd_connection *, struct packet_info *);
38407-};
38408+} __do_const;
38409
38410 static struct data_cmd drbd_cmd_handler[] = {
38411 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
38412@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
38413 if (!list_empty(&connection->current_epoch->list))
38414 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
38415 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
38416- atomic_set(&connection->current_epoch->epoch_size, 0);
38417+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
38418 connection->send.seen_any_write_yet = false;
38419
38420 drbd_info(connection, "Connection closed\n");
38421@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
38422 put_ldev(device);
38423 }
38424 dec_rs_pending(device);
38425- atomic_add(blksize >> 9, &device->rs_sect_in);
38426+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
38427
38428 return 0;
38429 }
38430@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
38431 struct asender_cmd {
38432 size_t pkt_size;
38433 int (*fn)(struct drbd_connection *connection, struct packet_info *);
38434-};
38435+} __do_const;
38436
38437 static struct asender_cmd asender_tbl[] = {
38438 [P_PING] = { 0, got_Ping },
38439diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
38440index d0fae55..4469096 100644
38441--- a/drivers/block/drbd/drbd_worker.c
38442+++ b/drivers/block/drbd/drbd_worker.c
38443@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
38444 list_add_tail(&peer_req->w.list, &device->read_ee);
38445 spin_unlock_irq(&device->resource->req_lock);
38446
38447- atomic_add(size >> 9, &device->rs_sect_ev);
38448+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
38449 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
38450 return 0;
38451
38452@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
38453 unsigned int sect_in; /* Number of sectors that came in since the last turn */
38454 int number, mxb;
38455
38456- sect_in = atomic_xchg(&device->rs_sect_in, 0);
38457+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
38458 device->rs_in_flight -= sect_in;
38459
38460 rcu_read_lock();
38461@@ -1595,8 +1595,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
38462 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
38463 struct fifo_buffer *plan;
38464
38465- atomic_set(&device->rs_sect_in, 0);
38466- atomic_set(&device->rs_sect_ev, 0);
38467+ atomic_set_unchecked(&device->rs_sect_in, 0);
38468+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38469 device->rs_in_flight = 0;
38470 device->rs_last_events =
38471 (int)part_stat_read(&disk->part0, sectors[0]) +
38472diff --git a/drivers/block/loop.c b/drivers/block/loop.c
38473index 6cb1beb..bf490f7 100644
38474--- a/drivers/block/loop.c
38475+++ b/drivers/block/loop.c
38476@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
38477
38478 file_start_write(file);
38479 set_fs(get_ds());
38480- bw = file->f_op->write(file, buf, len, &pos);
38481+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
38482 set_fs(old_fs);
38483 file_end_write(file);
38484 if (likely(bw == len))
38485diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
38486index d826bf3..8eb406c 100644
38487--- a/drivers/block/nvme-core.c
38488+++ b/drivers/block/nvme-core.c
38489@@ -76,7 +76,6 @@ static LIST_HEAD(dev_list);
38490 static struct task_struct *nvme_thread;
38491 static struct workqueue_struct *nvme_workq;
38492 static wait_queue_head_t nvme_kthread_wait;
38493-static struct notifier_block nvme_nb;
38494
38495 static void nvme_reset_failed_dev(struct work_struct *ws);
38496 static int nvme_process_cq(struct nvme_queue *nvmeq);
38497@@ -2955,7 +2954,6 @@ static int __init nvme_init(void)
38498 static void __exit nvme_exit(void)
38499 {
38500 pci_unregister_driver(&nvme_driver);
38501- unregister_hotcpu_notifier(&nvme_nb);
38502 unregister_blkdev(nvme_major, "nvme");
38503 destroy_workqueue(nvme_workq);
38504 BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
38505diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
38506index 09e628da..7607aaa 100644
38507--- a/drivers/block/pktcdvd.c
38508+++ b/drivers/block/pktcdvd.c
38509@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
38510
38511 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
38512 {
38513- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
38514+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
38515 }
38516
38517 /*
38518@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
38519 return -EROFS;
38520 }
38521 pd->settings.fp = ti.fp;
38522- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
38523+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
38524
38525 if (ti.nwa_v) {
38526 pd->nwa = be32_to_cpu(ti.next_writable);
38527diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
38528index 8a86b62..f54c87e 100644
38529--- a/drivers/block/rbd.c
38530+++ b/drivers/block/rbd.c
38531@@ -63,7 +63,7 @@
38532 * If the counter is already at its maximum value returns
38533 * -EINVAL without updating it.
38534 */
38535-static int atomic_inc_return_safe(atomic_t *v)
38536+static int __intentional_overflow(-1) atomic_inc_return_safe(atomic_t *v)
38537 {
38538 unsigned int counter;
38539
38540diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
38541index e5565fb..71be10b4 100644
38542--- a/drivers/block/smart1,2.h
38543+++ b/drivers/block/smart1,2.h
38544@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
38545 }
38546
38547 static struct access_method smart4_access = {
38548- smart4_submit_command,
38549- smart4_intr_mask,
38550- smart4_fifo_full,
38551- smart4_intr_pending,
38552- smart4_completed,
38553+ .submit_command = smart4_submit_command,
38554+ .set_intr_mask = smart4_intr_mask,
38555+ .fifo_full = smart4_fifo_full,
38556+ .intr_pending = smart4_intr_pending,
38557+ .command_completed = smart4_completed,
38558 };
38559
38560 /*
38561@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
38562 }
38563
38564 static struct access_method smart2_access = {
38565- smart2_submit_command,
38566- smart2_intr_mask,
38567- smart2_fifo_full,
38568- smart2_intr_pending,
38569- smart2_completed,
38570+ .submit_command = smart2_submit_command,
38571+ .set_intr_mask = smart2_intr_mask,
38572+ .fifo_full = smart2_fifo_full,
38573+ .intr_pending = smart2_intr_pending,
38574+ .command_completed = smart2_completed,
38575 };
38576
38577 /*
38578@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
38579 }
38580
38581 static struct access_method smart2e_access = {
38582- smart2e_submit_command,
38583- smart2e_intr_mask,
38584- smart2e_fifo_full,
38585- smart2e_intr_pending,
38586- smart2e_completed,
38587+ .submit_command = smart2e_submit_command,
38588+ .set_intr_mask = smart2e_intr_mask,
38589+ .fifo_full = smart2e_fifo_full,
38590+ .intr_pending = smart2e_intr_pending,
38591+ .command_completed = smart2e_completed,
38592 };
38593
38594 /*
38595@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
38596 }
38597
38598 static struct access_method smart1_access = {
38599- smart1_submit_command,
38600- smart1_intr_mask,
38601- smart1_fifo_full,
38602- smart1_intr_pending,
38603- smart1_completed,
38604+ .submit_command = smart1_submit_command,
38605+ .set_intr_mask = smart1_intr_mask,
38606+ .fifo_full = smart1_fifo_full,
38607+ .intr_pending = smart1_intr_pending,
38608+ .command_completed = smart1_completed,
38609 };
38610diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
38611index 55c135b..9f8d60c 100644
38612--- a/drivers/bluetooth/btwilink.c
38613+++ b/drivers/bluetooth/btwilink.c
38614@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
38615
38616 static int bt_ti_probe(struct platform_device *pdev)
38617 {
38618- static struct ti_st *hst;
38619+ struct ti_st *hst;
38620 struct hci_dev *hdev;
38621 int err;
38622
38623diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
38624index 5d28a45..a538f90 100644
38625--- a/drivers/cdrom/cdrom.c
38626+++ b/drivers/cdrom/cdrom.c
38627@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
38628 ENSURE(reset, CDC_RESET);
38629 ENSURE(generic_packet, CDC_GENERIC_PACKET);
38630 cdi->mc_flags = 0;
38631- cdo->n_minors = 0;
38632 cdi->options = CDO_USE_FFLAGS;
38633
38634 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
38635@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
38636 else
38637 cdi->cdda_method = CDDA_OLD;
38638
38639- if (!cdo->generic_packet)
38640- cdo->generic_packet = cdrom_dummy_generic_packet;
38641+ if (!cdo->generic_packet) {
38642+ pax_open_kernel();
38643+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
38644+ pax_close_kernel();
38645+ }
38646
38647 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
38648 mutex_lock(&cdrom_mutex);
38649@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
38650 if (cdi->exit)
38651 cdi->exit(cdi);
38652
38653- cdi->ops->n_minors--;
38654 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
38655 }
38656
38657@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
38658 */
38659 nr = nframes;
38660 do {
38661- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38662+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
38663 if (cgc.buffer)
38664 break;
38665
38666@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
38667 struct cdrom_device_info *cdi;
38668 int ret;
38669
38670- ret = scnprintf(info + *pos, max_size - *pos, header);
38671+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
38672 if (!ret)
38673 return 1;
38674
38675diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
38676index 584bc31..e64a12c 100644
38677--- a/drivers/cdrom/gdrom.c
38678+++ b/drivers/cdrom/gdrom.c
38679@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
38680 .audio_ioctl = gdrom_audio_ioctl,
38681 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
38682 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
38683- .n_minors = 1,
38684 };
38685
38686 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
38687diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
38688index efefd12..4f1d494 100644
38689--- a/drivers/char/Kconfig
38690+++ b/drivers/char/Kconfig
38691@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
38692
38693 config DEVKMEM
38694 bool "/dev/kmem virtual device support"
38695- default y
38696+ default n
38697+ depends on !GRKERNSEC_KMEM
38698 help
38699 Say Y here if you want to support the /dev/kmem device. The
38700 /dev/kmem device is rarely used, but can be used for certain
38701@@ -577,6 +578,7 @@ config DEVPORT
38702 bool
38703 depends on !M68K
38704 depends on ISA || PCI
38705+ depends on !GRKERNSEC_KMEM
38706 default y
38707
38708 source "drivers/s390/char/Kconfig"
38709diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
38710index a48e05b..6bac831 100644
38711--- a/drivers/char/agp/compat_ioctl.c
38712+++ b/drivers/char/agp/compat_ioctl.c
38713@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
38714 return -ENOMEM;
38715 }
38716
38717- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
38718+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
38719 sizeof(*usegment) * ureserve.seg_count)) {
38720 kfree(usegment);
38721 kfree(ksegment);
38722diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
38723index 09f17eb..8531d2f 100644
38724--- a/drivers/char/agp/frontend.c
38725+++ b/drivers/char/agp/frontend.c
38726@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38727 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
38728 return -EFAULT;
38729
38730- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
38731+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
38732 return -EFAULT;
38733
38734 client = agp_find_client_by_pid(reserve.pid);
38735@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
38736 if (segment == NULL)
38737 return -ENOMEM;
38738
38739- if (copy_from_user(segment, (void __user *) reserve.seg_list,
38740+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
38741 sizeof(struct agp_segment) * reserve.seg_count)) {
38742 kfree(segment);
38743 return -EFAULT;
38744diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
38745index 4f94375..413694e 100644
38746--- a/drivers/char/genrtc.c
38747+++ b/drivers/char/genrtc.c
38748@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
38749 switch (cmd) {
38750
38751 case RTC_PLL_GET:
38752+ memset(&pll, 0, sizeof(pll));
38753 if (get_rtc_pll(&pll))
38754 return -EINVAL;
38755 else
38756diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
38757index d5d4cd8..22d561d 100644
38758--- a/drivers/char/hpet.c
38759+++ b/drivers/char/hpet.c
38760@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
38761 }
38762
38763 static int
38764-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
38765+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
38766 struct hpet_info *info)
38767 {
38768 struct hpet_timer __iomem *timer;
38769diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
38770index 6b65fa4..8ebbc99 100644
38771--- a/drivers/char/ipmi/ipmi_msghandler.c
38772+++ b/drivers/char/ipmi/ipmi_msghandler.c
38773@@ -436,7 +436,7 @@ struct ipmi_smi {
38774 struct proc_dir_entry *proc_dir;
38775 char proc_dir_name[10];
38776
38777- atomic_t stats[IPMI_NUM_STATS];
38778+ atomic_unchecked_t stats[IPMI_NUM_STATS];
38779
38780 /*
38781 * run_to_completion duplicate of smb_info, smi_info
38782@@ -468,9 +468,9 @@ static LIST_HEAD(smi_watchers);
38783 static DEFINE_MUTEX(smi_watchers_mutex);
38784
38785 #define ipmi_inc_stat(intf, stat) \
38786- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
38787+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
38788 #define ipmi_get_stat(intf, stat) \
38789- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
38790+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
38791
38792 static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
38793 "ACPI", "SMBIOS", "PCI",
38794@@ -2837,7 +2837,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
38795 INIT_LIST_HEAD(&intf->cmd_rcvrs);
38796 init_waitqueue_head(&intf->waitq);
38797 for (i = 0; i < IPMI_NUM_STATS; i++)
38798- atomic_set(&intf->stats[i], 0);
38799+ atomic_set_unchecked(&intf->stats[i], 0);
38800
38801 intf->proc_dir = NULL;
38802
38803diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
38804index 967b73a..946e94c 100644
38805--- a/drivers/char/ipmi/ipmi_si_intf.c
38806+++ b/drivers/char/ipmi/ipmi_si_intf.c
38807@@ -284,7 +284,7 @@ struct smi_info {
38808 unsigned char slave_addr;
38809
38810 /* Counters and things for the proc filesystem. */
38811- atomic_t stats[SI_NUM_STATS];
38812+ atomic_unchecked_t stats[SI_NUM_STATS];
38813
38814 struct task_struct *thread;
38815
38816@@ -293,9 +293,9 @@ struct smi_info {
38817 };
38818
38819 #define smi_inc_stat(smi, stat) \
38820- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
38821+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
38822 #define smi_get_stat(smi, stat) \
38823- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
38824+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
38825
38826 #define SI_MAX_PARMS 4
38827
38828@@ -3412,7 +3412,7 @@ static int try_smi_init(struct smi_info *new_smi)
38829 atomic_set(&new_smi->req_events, 0);
38830 new_smi->run_to_completion = false;
38831 for (i = 0; i < SI_NUM_STATS; i++)
38832- atomic_set(&new_smi->stats[i], 0);
38833+ atomic_set_unchecked(&new_smi->stats[i], 0);
38834
38835 new_smi->interrupt_disabled = true;
38836 atomic_set(&new_smi->need_watch, 0);
38837diff --git a/drivers/char/mem.c b/drivers/char/mem.c
38838index 4c58333..d5cca27 100644
38839--- a/drivers/char/mem.c
38840+++ b/drivers/char/mem.c
38841@@ -18,6 +18,7 @@
38842 #include <linux/raw.h>
38843 #include <linux/tty.h>
38844 #include <linux/capability.h>
38845+#include <linux/security.h>
38846 #include <linux/ptrace.h>
38847 #include <linux/device.h>
38848 #include <linux/highmem.h>
38849@@ -36,6 +37,10 @@
38850
38851 #define DEVPORT_MINOR 4
38852
38853+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38854+extern const struct file_operations grsec_fops;
38855+#endif
38856+
38857 static inline unsigned long size_inside_page(unsigned long start,
38858 unsigned long size)
38859 {
38860@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38861
38862 while (cursor < to) {
38863 if (!devmem_is_allowed(pfn)) {
38864+#ifdef CONFIG_GRKERNSEC_KMEM
38865+ gr_handle_mem_readwrite(from, to);
38866+#else
38867 printk(KERN_INFO
38868 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
38869 current->comm, from, to);
38870+#endif
38871 return 0;
38872 }
38873 cursor += PAGE_SIZE;
38874@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38875 }
38876 return 1;
38877 }
38878+#elif defined(CONFIG_GRKERNSEC_KMEM)
38879+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38880+{
38881+ return 0;
38882+}
38883 #else
38884 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
38885 {
38886@@ -124,7 +138,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38887 #endif
38888
38889 while (count > 0) {
38890- unsigned long remaining;
38891+ unsigned long remaining = 0;
38892+ char *temp;
38893
38894 sz = size_inside_page(p, count);
38895
38896@@ -140,7 +155,24 @@ static ssize_t read_mem(struct file *file, char __user *buf,
38897 if (!ptr)
38898 return -EFAULT;
38899
38900- remaining = copy_to_user(buf, ptr, sz);
38901+#ifdef CONFIG_PAX_USERCOPY
38902+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38903+ if (!temp) {
38904+ unxlate_dev_mem_ptr(p, ptr);
38905+ return -ENOMEM;
38906+ }
38907+ remaining = probe_kernel_read(temp, ptr, sz);
38908+#else
38909+ temp = ptr;
38910+#endif
38911+
38912+ if (!remaining)
38913+ remaining = copy_to_user(buf, temp, sz);
38914+
38915+#ifdef CONFIG_PAX_USERCOPY
38916+ kfree(temp);
38917+#endif
38918+
38919 unxlate_dev_mem_ptr(p, ptr);
38920 if (remaining)
38921 return -EFAULT;
38922@@ -372,9 +404,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38923 size_t count, loff_t *ppos)
38924 {
38925 unsigned long p = *ppos;
38926- ssize_t low_count, read, sz;
38927+ ssize_t low_count, read, sz, err = 0;
38928 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
38929- int err = 0;
38930
38931 read = 0;
38932 if (p < (unsigned long) high_memory) {
38933@@ -396,6 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38934 }
38935 #endif
38936 while (low_count > 0) {
38937+ char *temp;
38938+
38939 sz = size_inside_page(p, low_count);
38940
38941 /*
38942@@ -405,7 +438,23 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
38943 */
38944 kbuf = xlate_dev_kmem_ptr((void *)p);
38945
38946- if (copy_to_user(buf, kbuf, sz))
38947+#ifdef CONFIG_PAX_USERCOPY
38948+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
38949+ if (!temp)
38950+ return -ENOMEM;
38951+ err = probe_kernel_read(temp, kbuf, sz);
38952+#else
38953+ temp = kbuf;
38954+#endif
38955+
38956+ if (!err)
38957+ err = copy_to_user(buf, temp, sz);
38958+
38959+#ifdef CONFIG_PAX_USERCOPY
38960+ kfree(temp);
38961+#endif
38962+
38963+ if (err)
38964 return -EFAULT;
38965 buf += sz;
38966 p += sz;
38967@@ -800,6 +849,9 @@ static const struct memdev {
38968 #ifdef CONFIG_PRINTK
38969 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
38970 #endif
38971+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
38972+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
38973+#endif
38974 };
38975
38976 static int memory_open(struct inode *inode, struct file *filp)
38977@@ -871,7 +923,7 @@ static int __init chr_dev_init(void)
38978 continue;
38979
38980 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
38981- NULL, devlist[minor].name);
38982+ NULL, "%s", devlist[minor].name);
38983 }
38984
38985 return tty_init();
38986diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
38987index 9df78e2..01ba9ae 100644
38988--- a/drivers/char/nvram.c
38989+++ b/drivers/char/nvram.c
38990@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
38991
38992 spin_unlock_irq(&rtc_lock);
38993
38994- if (copy_to_user(buf, contents, tmp - contents))
38995+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
38996 return -EFAULT;
38997
38998 *ppos = i;
38999diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39000index 0ea9986..e7b07e4 100644
39001--- a/drivers/char/pcmcia/synclink_cs.c
39002+++ b/drivers/char/pcmcia/synclink_cs.c
39003@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39004
39005 if (debug_level >= DEBUG_LEVEL_INFO)
39006 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39007- __FILE__, __LINE__, info->device_name, port->count);
39008+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39009
39010 if (tty_port_close_start(port, tty, filp) == 0)
39011 goto cleanup;
39012@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39013 cleanup:
39014 if (debug_level >= DEBUG_LEVEL_INFO)
39015 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39016- tty->driver->name, port->count);
39017+ tty->driver->name, atomic_read(&port->count));
39018 }
39019
39020 /* Wait until the transmitter is empty.
39021@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39022
39023 if (debug_level >= DEBUG_LEVEL_INFO)
39024 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39025- __FILE__, __LINE__, tty->driver->name, port->count);
39026+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39027
39028 /* If port is closing, signal caller to try again */
39029 if (port->flags & ASYNC_CLOSING){
39030@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39031 goto cleanup;
39032 }
39033 spin_lock(&port->lock);
39034- port->count++;
39035+ atomic_inc(&port->count);
39036 spin_unlock(&port->lock);
39037 spin_unlock_irqrestore(&info->netlock, flags);
39038
39039- if (port->count == 1) {
39040+ if (atomic_read(&port->count) == 1) {
39041 /* 1st open on this device, init hardware */
39042 retval = startup(info, tty);
39043 if (retval < 0)
39044@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39045 unsigned short new_crctype;
39046
39047 /* return error if TTY interface open */
39048- if (info->port.count)
39049+ if (atomic_read(&info->port.count))
39050 return -EBUSY;
39051
39052 switch (encoding)
39053@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39054
39055 /* arbitrate between network and tty opens */
39056 spin_lock_irqsave(&info->netlock, flags);
39057- if (info->port.count != 0 || info->netcount != 0) {
39058+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39059 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39060 spin_unlock_irqrestore(&info->netlock, flags);
39061 return -EBUSY;
39062@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39063 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39064
39065 /* return error if TTY interface open */
39066- if (info->port.count)
39067+ if (atomic_read(&info->port.count))
39068 return -EBUSY;
39069
39070 if (cmd != SIOCWANDEV)
39071diff --git a/drivers/char/random.c b/drivers/char/random.c
39072index 9cd6968..6416f00 100644
39073--- a/drivers/char/random.c
39074+++ b/drivers/char/random.c
39075@@ -289,9 +289,6 @@
39076 /*
39077 * To allow fractional bits to be tracked, the entropy_count field is
39078 * denominated in units of 1/8th bits.
39079- *
39080- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39081- * credit_entropy_bits() needs to be 64 bits wide.
39082 */
39083 #define ENTROPY_SHIFT 3
39084 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39085@@ -439,9 +436,9 @@ struct entropy_store {
39086 };
39087
39088 static void push_to_pool(struct work_struct *work);
39089-static __u32 input_pool_data[INPUT_POOL_WORDS];
39090-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39091-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39092+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39093+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39094+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39095
39096 static struct entropy_store input_pool = {
39097 .poolinfo = &poolinfo_table[0],
39098@@ -635,7 +632,7 @@ retry:
39099 /* The +2 corresponds to the /4 in the denominator */
39100
39101 do {
39102- unsigned int anfrac = min(pnfrac, pool_size/2);
39103+ u64 anfrac = min(pnfrac, pool_size/2);
39104 unsigned int add =
39105 ((pool_size - entropy_count)*anfrac*3) >> s;
39106
39107@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39108
39109 extract_buf(r, tmp);
39110 i = min_t(int, nbytes, EXTRACT_SIZE);
39111- if (copy_to_user(buf, tmp, i)) {
39112+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39113 ret = -EFAULT;
39114 break;
39115 }
39116@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39117 static int proc_do_uuid(struct ctl_table *table, int write,
39118 void __user *buffer, size_t *lenp, loff_t *ppos)
39119 {
39120- struct ctl_table fake_table;
39121+ ctl_table_no_const fake_table;
39122 unsigned char buf[64], tmp_uuid[16], *uuid;
39123
39124 uuid = table->data;
39125@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39126 static int proc_do_entropy(struct ctl_table *table, int write,
39127 void __user *buffer, size_t *lenp, loff_t *ppos)
39128 {
39129- struct ctl_table fake_table;
39130+ ctl_table_no_const fake_table;
39131 int entropy_count;
39132
39133 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39134diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39135index e496dae..b793e7d 100644
39136--- a/drivers/char/sonypi.c
39137+++ b/drivers/char/sonypi.c
39138@@ -54,6 +54,7 @@
39139
39140 #include <asm/uaccess.h>
39141 #include <asm/io.h>
39142+#include <asm/local.h>
39143
39144 #include <linux/sonypi.h>
39145
39146@@ -490,7 +491,7 @@ static struct sonypi_device {
39147 spinlock_t fifo_lock;
39148 wait_queue_head_t fifo_proc_list;
39149 struct fasync_struct *fifo_async;
39150- int open_count;
39151+ local_t open_count;
39152 int model;
39153 struct input_dev *input_jog_dev;
39154 struct input_dev *input_key_dev;
39155@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39156 static int sonypi_misc_release(struct inode *inode, struct file *file)
39157 {
39158 mutex_lock(&sonypi_device.lock);
39159- sonypi_device.open_count--;
39160+ local_dec(&sonypi_device.open_count);
39161 mutex_unlock(&sonypi_device.lock);
39162 return 0;
39163 }
39164@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39165 {
39166 mutex_lock(&sonypi_device.lock);
39167 /* Flush input queue on first open */
39168- if (!sonypi_device.open_count)
39169+ if (!local_read(&sonypi_device.open_count))
39170 kfifo_reset(&sonypi_device.fifo);
39171- sonypi_device.open_count++;
39172+ local_inc(&sonypi_device.open_count);
39173 mutex_unlock(&sonypi_device.lock);
39174
39175 return 0;
39176diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39177index 565a947..dcdc06e 100644
39178--- a/drivers/char/tpm/tpm_acpi.c
39179+++ b/drivers/char/tpm/tpm_acpi.c
39180@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39181 virt = acpi_os_map_iomem(start, len);
39182 if (!virt) {
39183 kfree(log->bios_event_log);
39184+ log->bios_event_log = NULL;
39185 printk("%s: ERROR - Unable to map memory\n", __func__);
39186 return -EIO;
39187 }
39188
39189- memcpy_fromio(log->bios_event_log, virt, len);
39190+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39191
39192 acpi_os_unmap_iomem(virt, len);
39193 return 0;
39194diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39195index 3a56a13..f8cbd25 100644
39196--- a/drivers/char/tpm/tpm_eventlog.c
39197+++ b/drivers/char/tpm/tpm_eventlog.c
39198@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39199 event = addr;
39200
39201 if ((event->event_type == 0 && event->event_size == 0) ||
39202- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39203+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39204 return NULL;
39205
39206 return addr;
39207@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39208 return NULL;
39209
39210 if ((event->event_type == 0 && event->event_size == 0) ||
39211- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39212+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39213 return NULL;
39214
39215 (*pos)++;
39216@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39217 int i;
39218
39219 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39220- seq_putc(m, data[i]);
39221+ if (!seq_putc(m, data[i]))
39222+ return -EFAULT;
39223
39224 return 0;
39225 }
39226diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39227index c3aac4c..88de09f9 100644
39228--- a/drivers/char/virtio_console.c
39229+++ b/drivers/char/virtio_console.c
39230@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
39231 if (to_user) {
39232 ssize_t ret;
39233
39234- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39235+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39236 if (ret)
39237 return -EFAULT;
39238 } else {
39239@@ -789,7 +789,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39240 if (!port_has_data(port) && !port->host_connected)
39241 return 0;
39242
39243- return fill_readbuf(port, ubuf, count, true);
39244+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39245 }
39246
39247 static int wait_port_writable(struct port *port, bool nonblock)
39248diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39249index 4386697..754ceca 100644
39250--- a/drivers/clk/clk-composite.c
39251+++ b/drivers/clk/clk-composite.c
39252@@ -192,7 +192,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39253 struct clk *clk;
39254 struct clk_init_data init;
39255 struct clk_composite *composite;
39256- struct clk_ops *clk_composite_ops;
39257+ clk_ops_no_const *clk_composite_ops;
39258
39259 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39260 if (!composite) {
39261diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39262index dd3a78c..386d49c 100644
39263--- a/drivers/clk/socfpga/clk-gate.c
39264+++ b/drivers/clk/socfpga/clk-gate.c
39265@@ -22,6 +22,7 @@
39266 #include <linux/mfd/syscon.h>
39267 #include <linux/of.h>
39268 #include <linux/regmap.h>
39269+#include <asm/pgtable.h>
39270
39271 #include "clk.h"
39272
39273@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
39274 return 0;
39275 }
39276
39277-static struct clk_ops gateclk_ops = {
39278+static clk_ops_no_const gateclk_ops __read_only = {
39279 .prepare = socfpga_clk_prepare,
39280 .recalc_rate = socfpga_clk_recalc_rate,
39281 .get_parent = socfpga_clk_get_parent,
39282@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
39283 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
39284 socfpga_clk->hw.bit_idx = clk_gate[1];
39285
39286- gateclk_ops.enable = clk_gate_ops.enable;
39287- gateclk_ops.disable = clk_gate_ops.disable;
39288+ pax_open_kernel();
39289+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
39290+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
39291+ pax_close_kernel();
39292 }
39293
39294 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
39295diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
39296index de6da95..c98278b 100644
39297--- a/drivers/clk/socfpga/clk-pll.c
39298+++ b/drivers/clk/socfpga/clk-pll.c
39299@@ -21,6 +21,7 @@
39300 #include <linux/io.h>
39301 #include <linux/of.h>
39302 #include <linux/of_address.h>
39303+#include <asm/pgtable.h>
39304
39305 #include "clk.h"
39306
39307@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
39308 CLK_MGR_PLL_CLK_SRC_MASK;
39309 }
39310
39311-static struct clk_ops clk_pll_ops = {
39312+static clk_ops_no_const clk_pll_ops __read_only = {
39313 .recalc_rate = clk_pll_recalc_rate,
39314 .get_parent = clk_pll_get_parent,
39315 };
39316@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
39317 pll_clk->hw.hw.init = &init;
39318
39319 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
39320- clk_pll_ops.enable = clk_gate_ops.enable;
39321- clk_pll_ops.disable = clk_gate_ops.disable;
39322+ pax_open_kernel();
39323+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
39324+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
39325+ pax_close_kernel();
39326
39327 clk = clk_register(NULL, &pll_clk->hw.hw);
39328 if (WARN_ON(IS_ERR(clk))) {
39329diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
39330index b0c18ed..1713a80 100644
39331--- a/drivers/cpufreq/acpi-cpufreq.c
39332+++ b/drivers/cpufreq/acpi-cpufreq.c
39333@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39334 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
39335 per_cpu(acfreq_data, cpu) = data;
39336
39337- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
39338- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39339+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
39340+ pax_open_kernel();
39341+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
39342+ pax_close_kernel();
39343+ }
39344
39345 result = acpi_processor_register_performance(data->acpi_data, cpu);
39346 if (result)
39347@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
39348 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
39349 break;
39350 case ACPI_ADR_SPACE_FIXED_HARDWARE:
39351- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39352+ pax_open_kernel();
39353+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
39354+ pax_close_kernel();
39355 break;
39356 default:
39357 break;
39358@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
39359 if (!msrs)
39360 return;
39361
39362- acpi_cpufreq_driver.boost_supported = true;
39363- acpi_cpufreq_driver.boost_enabled = boost_state(0);
39364+ pax_open_kernel();
39365+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
39366+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
39367+ pax_close_kernel();
39368
39369 cpu_notifier_register_begin();
39370
39371diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
39372index fde97d6..3631eca 100644
39373--- a/drivers/cpufreq/cpufreq-dt.c
39374+++ b/drivers/cpufreq/cpufreq-dt.c
39375@@ -393,7 +393,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
39376 if (!IS_ERR(cpu_reg))
39377 regulator_put(cpu_reg);
39378
39379- dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39380+ pax_open_kernel();
39381+ *(void **)&dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
39382+ pax_close_kernel();
39383
39384 ret = cpufreq_register_driver(&dt_cpufreq_driver);
39385 if (ret)
39386diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
39387index 24736bb..aae33f4 100644
39388--- a/drivers/cpufreq/cpufreq.c
39389+++ b/drivers/cpufreq/cpufreq.c
39390@@ -2138,7 +2138,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
39391 }
39392
39393 mutex_lock(&cpufreq_governor_mutex);
39394- list_del(&governor->governor_list);
39395+ pax_list_del(&governor->governor_list);
39396 mutex_unlock(&cpufreq_governor_mutex);
39397 return;
39398 }
39399@@ -2354,7 +2354,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
39400 return NOTIFY_OK;
39401 }
39402
39403-static struct notifier_block __refdata cpufreq_cpu_notifier = {
39404+static struct notifier_block cpufreq_cpu_notifier = {
39405 .notifier_call = cpufreq_cpu_callback,
39406 };
39407
39408@@ -2394,13 +2394,17 @@ int cpufreq_boost_trigger_state(int state)
39409 return 0;
39410
39411 write_lock_irqsave(&cpufreq_driver_lock, flags);
39412- cpufreq_driver->boost_enabled = state;
39413+ pax_open_kernel();
39414+ *(bool *)&cpufreq_driver->boost_enabled = state;
39415+ pax_close_kernel();
39416 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39417
39418 ret = cpufreq_driver->set_boost(state);
39419 if (ret) {
39420 write_lock_irqsave(&cpufreq_driver_lock, flags);
39421- cpufreq_driver->boost_enabled = !state;
39422+ pax_open_kernel();
39423+ *(bool *)&cpufreq_driver->boost_enabled = !state;
39424+ pax_close_kernel();
39425 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
39426
39427 pr_err("%s: Cannot %s BOOST\n",
39428@@ -2457,8 +2461,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39429
39430 pr_debug("trying to register driver %s\n", driver_data->name);
39431
39432- if (driver_data->setpolicy)
39433- driver_data->flags |= CPUFREQ_CONST_LOOPS;
39434+ if (driver_data->setpolicy) {
39435+ pax_open_kernel();
39436+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
39437+ pax_close_kernel();
39438+ }
39439
39440 write_lock_irqsave(&cpufreq_driver_lock, flags);
39441 if (cpufreq_driver) {
39442@@ -2473,8 +2480,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
39443 * Check if driver provides function to enable boost -
39444 * if not, use cpufreq_boost_set_sw as default
39445 */
39446- if (!cpufreq_driver->set_boost)
39447- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39448+ if (!cpufreq_driver->set_boost) {
39449+ pax_open_kernel();
39450+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
39451+ pax_close_kernel();
39452+ }
39453
39454 ret = cpufreq_sysfs_create_file(&boost.attr);
39455 if (ret) {
39456diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
39457index 1b44496..b80ff5e 100644
39458--- a/drivers/cpufreq/cpufreq_governor.c
39459+++ b/drivers/cpufreq/cpufreq_governor.c
39460@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39461 struct dbs_data *dbs_data;
39462 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
39463 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
39464- struct od_ops *od_ops = NULL;
39465+ const struct od_ops *od_ops = NULL;
39466 struct od_dbs_tuners *od_tuners = NULL;
39467 struct cs_dbs_tuners *cs_tuners = NULL;
39468 struct cpu_dbs_common_info *cpu_cdbs;
39469@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39470
39471 if ((cdata->governor == GOV_CONSERVATIVE) &&
39472 (!policy->governor->initialized)) {
39473- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39474+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39475
39476 cpufreq_register_notifier(cs_ops->notifier_block,
39477 CPUFREQ_TRANSITION_NOTIFIER);
39478@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
39479
39480 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
39481 (policy->governor->initialized == 1)) {
39482- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39483+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
39484
39485 cpufreq_unregister_notifier(cs_ops->notifier_block,
39486 CPUFREQ_TRANSITION_NOTIFIER);
39487diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
39488index cc401d1..8197340 100644
39489--- a/drivers/cpufreq/cpufreq_governor.h
39490+++ b/drivers/cpufreq/cpufreq_governor.h
39491@@ -212,7 +212,7 @@ struct common_dbs_data {
39492 void (*exit)(struct dbs_data *dbs_data);
39493
39494 /* Governor specific ops, see below */
39495- void *gov_ops;
39496+ const void *gov_ops;
39497 };
39498
39499 /* Governor Per policy data */
39500@@ -232,7 +232,7 @@ struct od_ops {
39501 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
39502 unsigned int freq_next, unsigned int relation);
39503 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
39504-};
39505+} __no_const;
39506
39507 struct cs_ops {
39508 struct notifier_block *notifier_block;
39509diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
39510index ad3f38f..8f086cd 100644
39511--- a/drivers/cpufreq/cpufreq_ondemand.c
39512+++ b/drivers/cpufreq/cpufreq_ondemand.c
39513@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
39514
39515 define_get_cpu_dbs_routines(od_cpu_dbs_info);
39516
39517-static struct od_ops od_ops = {
39518+static struct od_ops od_ops __read_only = {
39519 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
39520 .powersave_bias_target = generic_powersave_bias_target,
39521 .freq_increase = dbs_freq_increase,
39522@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
39523 (struct cpufreq_policy *, unsigned int, unsigned int),
39524 unsigned int powersave_bias)
39525 {
39526- od_ops.powersave_bias_target = f;
39527+ pax_open_kernel();
39528+ *(void **)&od_ops.powersave_bias_target = f;
39529+ pax_close_kernel();
39530 od_set_powersave_bias(powersave_bias);
39531 }
39532 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
39533
39534 void od_unregister_powersave_bias_handler(void)
39535 {
39536- od_ops.powersave_bias_target = generic_powersave_bias_target;
39537+ pax_open_kernel();
39538+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
39539+ pax_close_kernel();
39540 od_set_powersave_bias(0);
39541 }
39542 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
39543diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
39544index 742eefb..e2fcfc8 100644
39545--- a/drivers/cpufreq/intel_pstate.c
39546+++ b/drivers/cpufreq/intel_pstate.c
39547@@ -133,10 +133,10 @@ struct pstate_funcs {
39548 struct cpu_defaults {
39549 struct pstate_adjust_policy pid_policy;
39550 struct pstate_funcs funcs;
39551-};
39552+} __do_const;
39553
39554 static struct pstate_adjust_policy pid_params;
39555-static struct pstate_funcs pstate_funcs;
39556+static struct pstate_funcs *pstate_funcs;
39557 static int hwp_active;
39558
39559 struct perf_limits {
39560@@ -653,18 +653,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
39561
39562 cpu->pstate.current_pstate = pstate;
39563
39564- pstate_funcs.set(cpu, pstate);
39565+ pstate_funcs->set(cpu, pstate);
39566 }
39567
39568 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
39569 {
39570- cpu->pstate.min_pstate = pstate_funcs.get_min();
39571- cpu->pstate.max_pstate = pstate_funcs.get_max();
39572- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
39573- cpu->pstate.scaling = pstate_funcs.get_scaling();
39574+ cpu->pstate.min_pstate = pstate_funcs->get_min();
39575+ cpu->pstate.max_pstate = pstate_funcs->get_max();
39576+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
39577+ cpu->pstate.scaling = pstate_funcs->get_scaling();
39578
39579- if (pstate_funcs.get_vid)
39580- pstate_funcs.get_vid(cpu);
39581+ if (pstate_funcs->get_vid)
39582+ pstate_funcs->get_vid(cpu);
39583 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
39584 }
39585
39586@@ -988,9 +988,9 @@ static int intel_pstate_msrs_not_valid(void)
39587 rdmsrl(MSR_IA32_APERF, aperf);
39588 rdmsrl(MSR_IA32_MPERF, mperf);
39589
39590- if (!pstate_funcs.get_max() ||
39591- !pstate_funcs.get_min() ||
39592- !pstate_funcs.get_turbo())
39593+ if (!pstate_funcs->get_max() ||
39594+ !pstate_funcs->get_min() ||
39595+ !pstate_funcs->get_turbo())
39596 return -ENODEV;
39597
39598 rdmsrl(MSR_IA32_APERF, tmp);
39599@@ -1004,7 +1004,7 @@ static int intel_pstate_msrs_not_valid(void)
39600 return 0;
39601 }
39602
39603-static void copy_pid_params(struct pstate_adjust_policy *policy)
39604+static void copy_pid_params(const struct pstate_adjust_policy *policy)
39605 {
39606 pid_params.sample_rate_ms = policy->sample_rate_ms;
39607 pid_params.p_gain_pct = policy->p_gain_pct;
39608@@ -1016,12 +1016,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
39609
39610 static void copy_cpu_funcs(struct pstate_funcs *funcs)
39611 {
39612- pstate_funcs.get_max = funcs->get_max;
39613- pstate_funcs.get_min = funcs->get_min;
39614- pstate_funcs.get_turbo = funcs->get_turbo;
39615- pstate_funcs.get_scaling = funcs->get_scaling;
39616- pstate_funcs.set = funcs->set;
39617- pstate_funcs.get_vid = funcs->get_vid;
39618+ pstate_funcs = funcs;
39619 }
39620
39621 #if IS_ENABLED(CONFIG_ACPI)
39622diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
39623index 529cfd9..0e28fff 100644
39624--- a/drivers/cpufreq/p4-clockmod.c
39625+++ b/drivers/cpufreq/p4-clockmod.c
39626@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39627 case 0x0F: /* Core Duo */
39628 case 0x16: /* Celeron Core */
39629 case 0x1C: /* Atom */
39630- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39631+ pax_open_kernel();
39632+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39633+ pax_close_kernel();
39634 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
39635 case 0x0D: /* Pentium M (Dothan) */
39636- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39637+ pax_open_kernel();
39638+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39639+ pax_close_kernel();
39640 /* fall through */
39641 case 0x09: /* Pentium M (Banias) */
39642 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
39643@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
39644
39645 /* on P-4s, the TSC runs with constant frequency independent whether
39646 * throttling is active or not. */
39647- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39648+ pax_open_kernel();
39649+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
39650+ pax_close_kernel();
39651
39652 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
39653 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
39654diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
39655index 9bb42ba..b01b4a2 100644
39656--- a/drivers/cpufreq/sparc-us3-cpufreq.c
39657+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
39658@@ -18,14 +18,12 @@
39659 #include <asm/head.h>
39660 #include <asm/timer.h>
39661
39662-static struct cpufreq_driver *cpufreq_us3_driver;
39663-
39664 struct us3_freq_percpu_info {
39665 struct cpufreq_frequency_table table[4];
39666 };
39667
39668 /* Indexed by cpu number. */
39669-static struct us3_freq_percpu_info *us3_freq_table;
39670+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
39671
39672 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
39673 * in the Safari config register.
39674@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
39675
39676 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
39677 {
39678- if (cpufreq_us3_driver)
39679- us3_freq_target(policy, 0);
39680+ us3_freq_target(policy, 0);
39681
39682 return 0;
39683 }
39684
39685+static int __init us3_freq_init(void);
39686+static void __exit us3_freq_exit(void);
39687+
39688+static struct cpufreq_driver cpufreq_us3_driver = {
39689+ .init = us3_freq_cpu_init,
39690+ .verify = cpufreq_generic_frequency_table_verify,
39691+ .target_index = us3_freq_target,
39692+ .get = us3_freq_get,
39693+ .exit = us3_freq_cpu_exit,
39694+ .name = "UltraSPARC-III",
39695+
39696+};
39697+
39698 static int __init us3_freq_init(void)
39699 {
39700 unsigned long manuf, impl, ver;
39701- int ret;
39702
39703 if (tlb_type != cheetah && tlb_type != cheetah_plus)
39704 return -ENODEV;
39705@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
39706 (impl == CHEETAH_IMPL ||
39707 impl == CHEETAH_PLUS_IMPL ||
39708 impl == JAGUAR_IMPL ||
39709- impl == PANTHER_IMPL)) {
39710- struct cpufreq_driver *driver;
39711-
39712- ret = -ENOMEM;
39713- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
39714- if (!driver)
39715- goto err_out;
39716-
39717- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
39718- GFP_KERNEL);
39719- if (!us3_freq_table)
39720- goto err_out;
39721-
39722- driver->init = us3_freq_cpu_init;
39723- driver->verify = cpufreq_generic_frequency_table_verify;
39724- driver->target_index = us3_freq_target;
39725- driver->get = us3_freq_get;
39726- driver->exit = us3_freq_cpu_exit;
39727- strcpy(driver->name, "UltraSPARC-III");
39728-
39729- cpufreq_us3_driver = driver;
39730- ret = cpufreq_register_driver(driver);
39731- if (ret)
39732- goto err_out;
39733-
39734- return 0;
39735-
39736-err_out:
39737- if (driver) {
39738- kfree(driver);
39739- cpufreq_us3_driver = NULL;
39740- }
39741- kfree(us3_freq_table);
39742- us3_freq_table = NULL;
39743- return ret;
39744- }
39745+ impl == PANTHER_IMPL))
39746+ return cpufreq_register_driver(&cpufreq_us3_driver);
39747
39748 return -ENODEV;
39749 }
39750
39751 static void __exit us3_freq_exit(void)
39752 {
39753- if (cpufreq_us3_driver) {
39754- cpufreq_unregister_driver(cpufreq_us3_driver);
39755- kfree(cpufreq_us3_driver);
39756- cpufreq_us3_driver = NULL;
39757- kfree(us3_freq_table);
39758- us3_freq_table = NULL;
39759- }
39760+ cpufreq_unregister_driver(&cpufreq_us3_driver);
39761 }
39762
39763 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
39764diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
39765index 7d4a315..21bb886 100644
39766--- a/drivers/cpufreq/speedstep-centrino.c
39767+++ b/drivers/cpufreq/speedstep-centrino.c
39768@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
39769 !cpu_has(cpu, X86_FEATURE_EST))
39770 return -ENODEV;
39771
39772- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
39773- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39774+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
39775+ pax_open_kernel();
39776+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
39777+ pax_close_kernel();
39778+ }
39779
39780 if (policy->cpu != 0)
39781 return -ENODEV;
39782diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
39783index 2697e87..c32476c 100644
39784--- a/drivers/cpuidle/driver.c
39785+++ b/drivers/cpuidle/driver.c
39786@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
39787
39788 static void poll_idle_init(struct cpuidle_driver *drv)
39789 {
39790- struct cpuidle_state *state = &drv->states[0];
39791+ cpuidle_state_no_const *state = &drv->states[0];
39792
39793 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
39794 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
39795diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
39796index fb9f511..213e6cc 100644
39797--- a/drivers/cpuidle/governor.c
39798+++ b/drivers/cpuidle/governor.c
39799@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
39800 mutex_lock(&cpuidle_lock);
39801 if (__cpuidle_find_governor(gov->name) == NULL) {
39802 ret = 0;
39803- list_add_tail(&gov->governor_list, &cpuidle_governors);
39804+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
39805 if (!cpuidle_curr_governor ||
39806 cpuidle_curr_governor->rating < gov->rating)
39807 cpuidle_switch_governor(gov);
39808diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
39809index 832a2c3..1794080 100644
39810--- a/drivers/cpuidle/sysfs.c
39811+++ b/drivers/cpuidle/sysfs.c
39812@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
39813 NULL
39814 };
39815
39816-static struct attribute_group cpuidle_attr_group = {
39817+static attribute_group_no_const cpuidle_attr_group = {
39818 .attrs = cpuidle_default_attrs,
39819 .name = "cpuidle",
39820 };
39821diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
39822index 8d2a772..33826c9 100644
39823--- a/drivers/crypto/hifn_795x.c
39824+++ b/drivers/crypto/hifn_795x.c
39825@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
39826 MODULE_PARM_DESC(hifn_pll_ref,
39827 "PLL reference clock (pci[freq] or ext[freq], default ext)");
39828
39829-static atomic_t hifn_dev_number;
39830+static atomic_unchecked_t hifn_dev_number;
39831
39832 #define ACRYPTO_OP_DECRYPT 0
39833 #define ACRYPTO_OP_ENCRYPT 1
39834@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
39835 goto err_out_disable_pci_device;
39836
39837 snprintf(name, sizeof(name), "hifn%d",
39838- atomic_inc_return(&hifn_dev_number)-1);
39839+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
39840
39841 err = pci_request_regions(pdev, name);
39842 if (err)
39843diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
39844index 30b538d8..1610d75 100644
39845--- a/drivers/devfreq/devfreq.c
39846+++ b/drivers/devfreq/devfreq.c
39847@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
39848 goto err_out;
39849 }
39850
39851- list_add(&governor->node, &devfreq_governor_list);
39852+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
39853
39854 list_for_each_entry(devfreq, &devfreq_list, node) {
39855 int ret = 0;
39856@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
39857 }
39858 }
39859
39860- list_del(&governor->node);
39861+ pax_list_del((struct list_head *)&governor->node);
39862 err_out:
39863 mutex_unlock(&devfreq_list_lock);
39864
39865diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
39866index 3a2adb1..b3be9a3 100644
39867--- a/drivers/dma/sh/shdma-base.c
39868+++ b/drivers/dma/sh/shdma-base.c
39869@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
39870 schan->slave_id = -EINVAL;
39871 }
39872
39873- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
39874- sdev->desc_size, GFP_KERNEL);
39875+ schan->desc = kcalloc(sdev->desc_size,
39876+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
39877 if (!schan->desc) {
39878 ret = -ENOMEM;
39879 goto edescalloc;
39880diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
39881index aec8a84..7b45a1f 100644
39882--- a/drivers/dma/sh/shdmac.c
39883+++ b/drivers/dma/sh/shdmac.c
39884@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
39885 return ret;
39886 }
39887
39888-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
39889+static struct notifier_block sh_dmae_nmi_notifier = {
39890 .notifier_call = sh_dmae_nmi_handler,
39891
39892 /* Run before NMI debug handler and KGDB */
39893diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
39894index 592af5f..bb1d583 100644
39895--- a/drivers/edac/edac_device.c
39896+++ b/drivers/edac/edac_device.c
39897@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
39898 */
39899 int edac_device_alloc_index(void)
39900 {
39901- static atomic_t device_indexes = ATOMIC_INIT(0);
39902+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
39903
39904- return atomic_inc_return(&device_indexes) - 1;
39905+ return atomic_inc_return_unchecked(&device_indexes) - 1;
39906 }
39907 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
39908
39909diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
39910index 670d282..6675f4d 100644
39911--- a/drivers/edac/edac_mc_sysfs.c
39912+++ b/drivers/edac/edac_mc_sysfs.c
39913@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
39914 struct dev_ch_attribute {
39915 struct device_attribute attr;
39916 int channel;
39917-};
39918+} __do_const;
39919
39920 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
39921 struct dev_ch_attribute dev_attr_legacy_##_name = \
39922@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
39923 }
39924
39925 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
39926+ pax_open_kernel();
39927 if (mci->get_sdram_scrub_rate) {
39928- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39929- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39930+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
39931+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
39932 }
39933 if (mci->set_sdram_scrub_rate) {
39934- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39935- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39936+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
39937+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
39938 }
39939+ pax_close_kernel();
39940 err = device_create_file(&mci->dev,
39941 &dev_attr_sdram_scrub_rate);
39942 if (err) {
39943diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
39944index 2cf44b4d..6dd2dc7 100644
39945--- a/drivers/edac/edac_pci.c
39946+++ b/drivers/edac/edac_pci.c
39947@@ -29,7 +29,7 @@
39948
39949 static DEFINE_MUTEX(edac_pci_ctls_mutex);
39950 static LIST_HEAD(edac_pci_list);
39951-static atomic_t pci_indexes = ATOMIC_INIT(0);
39952+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
39953
39954 /*
39955 * edac_pci_alloc_ctl_info
39956@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
39957 */
39958 int edac_pci_alloc_index(void)
39959 {
39960- return atomic_inc_return(&pci_indexes) - 1;
39961+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
39962 }
39963 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
39964
39965diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
39966index 24d877f..4e30133 100644
39967--- a/drivers/edac/edac_pci_sysfs.c
39968+++ b/drivers/edac/edac_pci_sysfs.c
39969@@ -23,8 +23,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
39970 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
39971 static int edac_pci_poll_msec = 1000; /* one second workq period */
39972
39973-static atomic_t pci_parity_count = ATOMIC_INIT(0);
39974-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
39975+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
39976+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
39977
39978 static struct kobject *edac_pci_top_main_kobj;
39979 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
39980@@ -232,7 +232,7 @@ struct edac_pci_dev_attribute {
39981 void *value;
39982 ssize_t(*show) (void *, char *);
39983 ssize_t(*store) (void *, const char *, size_t);
39984-};
39985+} __do_const;
39986
39987 /* Set of show/store abstract level functions for PCI Parity object */
39988 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
39989@@ -576,7 +576,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39990 edac_printk(KERN_CRIT, EDAC_PCI,
39991 "Signaled System Error on %s\n",
39992 pci_name(dev));
39993- atomic_inc(&pci_nonparity_count);
39994+ atomic_inc_unchecked(&pci_nonparity_count);
39995 }
39996
39997 if (status & (PCI_STATUS_PARITY)) {
39998@@ -584,7 +584,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
39999 "Master Data Parity Error on %s\n",
40000 pci_name(dev));
40001
40002- atomic_inc(&pci_parity_count);
40003+ atomic_inc_unchecked(&pci_parity_count);
40004 }
40005
40006 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40007@@ -592,7 +592,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40008 "Detected Parity Error on %s\n",
40009 pci_name(dev));
40010
40011- atomic_inc(&pci_parity_count);
40012+ atomic_inc_unchecked(&pci_parity_count);
40013 }
40014 }
40015
40016@@ -615,7 +615,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40017 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40018 "Signaled System Error on %s\n",
40019 pci_name(dev));
40020- atomic_inc(&pci_nonparity_count);
40021+ atomic_inc_unchecked(&pci_nonparity_count);
40022 }
40023
40024 if (status & (PCI_STATUS_PARITY)) {
40025@@ -623,7 +623,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40026 "Master Data Parity Error on "
40027 "%s\n", pci_name(dev));
40028
40029- atomic_inc(&pci_parity_count);
40030+ atomic_inc_unchecked(&pci_parity_count);
40031 }
40032
40033 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40034@@ -631,7 +631,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40035 "Detected Parity Error on %s\n",
40036 pci_name(dev));
40037
40038- atomic_inc(&pci_parity_count);
40039+ atomic_inc_unchecked(&pci_parity_count);
40040 }
40041 }
40042 }
40043@@ -669,7 +669,7 @@ void edac_pci_do_parity_check(void)
40044 if (!check_pci_errors)
40045 return;
40046
40047- before_count = atomic_read(&pci_parity_count);
40048+ before_count = atomic_read_unchecked(&pci_parity_count);
40049
40050 /* scan all PCI devices looking for a Parity Error on devices and
40051 * bridges.
40052@@ -681,7 +681,7 @@ void edac_pci_do_parity_check(void)
40053 /* Only if operator has selected panic on PCI Error */
40054 if (edac_pci_get_panic_on_pe()) {
40055 /* If the count is different 'after' from 'before' */
40056- if (before_count != atomic_read(&pci_parity_count))
40057+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40058 panic("EDAC: PCI Parity Error");
40059 }
40060 }
40061diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40062index c2359a1..8bd119d 100644
40063--- a/drivers/edac/mce_amd.h
40064+++ b/drivers/edac/mce_amd.h
40065@@ -74,7 +74,7 @@ struct amd_decoder_ops {
40066 bool (*mc0_mce)(u16, u8);
40067 bool (*mc1_mce)(u16, u8);
40068 bool (*mc2_mce)(u16, u8);
40069-};
40070+} __no_const;
40071
40072 void amd_report_gart_errors(bool);
40073 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40074diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40075index 57ea7f4..af06b76 100644
40076--- a/drivers/firewire/core-card.c
40077+++ b/drivers/firewire/core-card.c
40078@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40079 const struct fw_card_driver *driver,
40080 struct device *device)
40081 {
40082- static atomic_t index = ATOMIC_INIT(-1);
40083+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40084
40085- card->index = atomic_inc_return(&index);
40086+ card->index = atomic_inc_return_unchecked(&index);
40087 card->driver = driver;
40088 card->device = device;
40089 card->current_tlabel = 0;
40090@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40091
40092 void fw_core_remove_card(struct fw_card *card)
40093 {
40094- struct fw_card_driver dummy_driver = dummy_driver_template;
40095+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40096
40097 card->driver->update_phy_reg(card, 4,
40098 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40099diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40100index f9e3aee..269dbdb 100644
40101--- a/drivers/firewire/core-device.c
40102+++ b/drivers/firewire/core-device.c
40103@@ -256,7 +256,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40104 struct config_rom_attribute {
40105 struct device_attribute attr;
40106 u32 key;
40107-};
40108+} __do_const;
40109
40110 static ssize_t show_immediate(struct device *dev,
40111 struct device_attribute *dattr, char *buf)
40112diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40113index eb6935c..3cc2bfa 100644
40114--- a/drivers/firewire/core-transaction.c
40115+++ b/drivers/firewire/core-transaction.c
40116@@ -38,6 +38,7 @@
40117 #include <linux/timer.h>
40118 #include <linux/types.h>
40119 #include <linux/workqueue.h>
40120+#include <linux/sched.h>
40121
40122 #include <asm/byteorder.h>
40123
40124diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40125index e1480ff6..1a429bd 100644
40126--- a/drivers/firewire/core.h
40127+++ b/drivers/firewire/core.h
40128@@ -111,6 +111,7 @@ struct fw_card_driver {
40129
40130 int (*stop_iso)(struct fw_iso_context *ctx);
40131 };
40132+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40133
40134 void fw_card_initialize(struct fw_card *card,
40135 const struct fw_card_driver *driver, struct device *device);
40136diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40137index aff9018..fc87ded 100644
40138--- a/drivers/firewire/ohci.c
40139+++ b/drivers/firewire/ohci.c
40140@@ -2054,10 +2054,12 @@ static void bus_reset_work(struct work_struct *work)
40141 be32_to_cpu(ohci->next_header));
40142 }
40143
40144+#ifndef CONFIG_GRKERNSEC
40145 if (param_remote_dma) {
40146 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40147 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40148 }
40149+#endif
40150
40151 spin_unlock_irq(&ohci->lock);
40152
40153@@ -2589,8 +2591,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40154 unsigned long flags;
40155 int n, ret = 0;
40156
40157+#ifndef CONFIG_GRKERNSEC
40158 if (param_remote_dma)
40159 return 0;
40160+#endif
40161
40162 /*
40163 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40164diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40165index 94a58a0..f5eba42 100644
40166--- a/drivers/firmware/dmi-id.c
40167+++ b/drivers/firmware/dmi-id.c
40168@@ -16,7 +16,7 @@
40169 struct dmi_device_attribute{
40170 struct device_attribute dev_attr;
40171 int field;
40172-};
40173+} __do_const;
40174 #define to_dmi_dev_attr(_dev_attr) \
40175 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40176
40177diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40178index 2eebd28b..4261350 100644
40179--- a/drivers/firmware/dmi_scan.c
40180+++ b/drivers/firmware/dmi_scan.c
40181@@ -893,7 +893,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40182 if (buf == NULL)
40183 return -1;
40184
40185- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40186+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40187
40188 dmi_unmap(buf);
40189 return 0;
40190diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40191index 4fd9961..52d60ce 100644
40192--- a/drivers/firmware/efi/cper.c
40193+++ b/drivers/firmware/efi/cper.c
40194@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40195 */
40196 u64 cper_next_record_id(void)
40197 {
40198- static atomic64_t seq;
40199+ static atomic64_unchecked_t seq;
40200
40201- if (!atomic64_read(&seq))
40202- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40203+ if (!atomic64_read_unchecked(&seq))
40204+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40205
40206- return atomic64_inc_return(&seq);
40207+ return atomic64_inc_return_unchecked(&seq);
40208 }
40209 EXPORT_SYMBOL_GPL(cper_next_record_id);
40210
40211diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40212index 9035c1b..aff45f8 100644
40213--- a/drivers/firmware/efi/efi.c
40214+++ b/drivers/firmware/efi/efi.c
40215@@ -151,14 +151,16 @@ static struct attribute_group efi_subsys_attr_group = {
40216 };
40217
40218 static struct efivars generic_efivars;
40219-static struct efivar_operations generic_ops;
40220+static efivar_operations_no_const generic_ops __read_only;
40221
40222 static int generic_ops_register(void)
40223 {
40224- generic_ops.get_variable = efi.get_variable;
40225- generic_ops.set_variable = efi.set_variable;
40226- generic_ops.get_next_variable = efi.get_next_variable;
40227- generic_ops.query_variable_store = efi_query_variable_store;
40228+ pax_open_kernel();
40229+ *(void **)&generic_ops.get_variable = efi.get_variable;
40230+ *(void **)&generic_ops.set_variable = efi.set_variable;
40231+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40232+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40233+ pax_close_kernel();
40234
40235 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40236 }
40237diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40238index f256ecd..387dcb1 100644
40239--- a/drivers/firmware/efi/efivars.c
40240+++ b/drivers/firmware/efi/efivars.c
40241@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40242 static int
40243 create_efivars_bin_attributes(void)
40244 {
40245- struct bin_attribute *attr;
40246+ bin_attribute_no_const *attr;
40247 int error;
40248
40249 /* new_var */
40250diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40251index 2f569aa..c95f4fb 100644
40252--- a/drivers/firmware/google/memconsole.c
40253+++ b/drivers/firmware/google/memconsole.c
40254@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40255 if (!found_memconsole())
40256 return -ENODEV;
40257
40258- memconsole_bin_attr.size = memconsole_length;
40259+ pax_open_kernel();
40260+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40261+ pax_close_kernel();
40262+
40263 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40264 }
40265
40266diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40267index 3cfcfc6..09d6f117 100644
40268--- a/drivers/gpio/gpio-em.c
40269+++ b/drivers/gpio/gpio-em.c
40270@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40271 struct em_gio_priv *p;
40272 struct resource *io[2], *irq[2];
40273 struct gpio_chip *gpio_chip;
40274- struct irq_chip *irq_chip;
40275+ irq_chip_no_const *irq_chip;
40276 const char *name = dev_name(&pdev->dev);
40277 int ret;
40278
40279diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40280index 7818cd1..1be40e5 100644
40281--- a/drivers/gpio/gpio-ich.c
40282+++ b/drivers/gpio/gpio-ich.c
40283@@ -94,7 +94,7 @@ struct ichx_desc {
40284 * this option allows driver caching written output values
40285 */
40286 bool use_outlvl_cache;
40287-};
40288+} __do_const;
40289
40290 static struct {
40291 spinlock_t lock;
40292diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
40293index f476ae2..05e1bdd 100644
40294--- a/drivers/gpio/gpio-omap.c
40295+++ b/drivers/gpio/gpio-omap.c
40296@@ -1188,7 +1188,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
40297 const struct omap_gpio_platform_data *pdata;
40298 struct resource *res;
40299 struct gpio_bank *bank;
40300- struct irq_chip *irqc;
40301+ irq_chip_no_const *irqc;
40302 int ret;
40303
40304 match = of_match_device(of_match_ptr(omap_gpio_match), dev);
40305diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
40306index 584484e..e26ebd6 100644
40307--- a/drivers/gpio/gpio-rcar.c
40308+++ b/drivers/gpio/gpio-rcar.c
40309@@ -366,7 +366,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
40310 struct gpio_rcar_priv *p;
40311 struct resource *io, *irq;
40312 struct gpio_chip *gpio_chip;
40313- struct irq_chip *irq_chip;
40314+ irq_chip_no_const *irq_chip;
40315 struct device *dev = &pdev->dev;
40316 const char *name = dev_name(dev);
40317 int ret;
40318diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
40319index c1caa45..f0f97d2 100644
40320--- a/drivers/gpio/gpio-vr41xx.c
40321+++ b/drivers/gpio/gpio-vr41xx.c
40322@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
40323 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
40324 maskl, pendl, maskh, pendh);
40325
40326- atomic_inc(&irq_err_count);
40327+ atomic_inc_unchecked(&irq_err_count);
40328
40329 return -EINVAL;
40330 }
40331diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
40332index 568aa2b..d1204d8 100644
40333--- a/drivers/gpio/gpiolib.c
40334+++ b/drivers/gpio/gpiolib.c
40335@@ -554,8 +554,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
40336 }
40337
40338 if (gpiochip->irqchip) {
40339- gpiochip->irqchip->irq_request_resources = NULL;
40340- gpiochip->irqchip->irq_release_resources = NULL;
40341+ pax_open_kernel();
40342+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
40343+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
40344+ pax_close_kernel();
40345 gpiochip->irqchip = NULL;
40346 }
40347 }
40348@@ -621,8 +623,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
40349 gpiochip->irqchip = NULL;
40350 return -EINVAL;
40351 }
40352- irqchip->irq_request_resources = gpiochip_irq_reqres;
40353- irqchip->irq_release_resources = gpiochip_irq_relres;
40354+
40355+ pax_open_kernel();
40356+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
40357+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
40358+ pax_close_kernel();
40359
40360 /*
40361 * Prepare the mapping since the irqchip shall be orthogonal to
40362diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
40363index 27a37e5..b6c6c71 100644
40364--- a/drivers/gpu/drm/drm_crtc.c
40365+++ b/drivers/gpu/drm/drm_crtc.c
40366@@ -3953,7 +3953,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
40367 goto done;
40368 }
40369
40370- if (copy_to_user(&enum_ptr[copied].name,
40371+ if (copy_to_user(enum_ptr[copied].name,
40372 &prop_enum->name, DRM_PROP_NAME_LEN)) {
40373 ret = -EFAULT;
40374 goto done;
40375diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
40376index 4f41377..ee33f40 100644
40377--- a/drivers/gpu/drm/drm_drv.c
40378+++ b/drivers/gpu/drm/drm_drv.c
40379@@ -444,7 +444,7 @@ void drm_unplug_dev(struct drm_device *dev)
40380
40381 drm_device_set_unplugged(dev);
40382
40383- if (dev->open_count == 0) {
40384+ if (local_read(&dev->open_count) == 0) {
40385 drm_put_dev(dev);
40386 }
40387 mutex_unlock(&drm_global_mutex);
40388diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
40389index 0b9514b..6acd174 100644
40390--- a/drivers/gpu/drm/drm_fops.c
40391+++ b/drivers/gpu/drm/drm_fops.c
40392@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
40393 return PTR_ERR(minor);
40394
40395 dev = minor->dev;
40396- if (!dev->open_count++)
40397+ if (local_inc_return(&dev->open_count) == 1)
40398 need_setup = 1;
40399
40400 /* share address_space across all char-devs of a single device */
40401@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
40402 return 0;
40403
40404 err_undo:
40405- dev->open_count--;
40406+ local_dec(&dev->open_count);
40407 drm_minor_release(minor);
40408 return retcode;
40409 }
40410@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
40411
40412 mutex_lock(&drm_global_mutex);
40413
40414- DRM_DEBUG("open_count = %d\n", dev->open_count);
40415+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
40416
40417 mutex_lock(&dev->struct_mutex);
40418 list_del(&file_priv->lhead);
40419@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
40420 * Begin inline drm_release
40421 */
40422
40423- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
40424+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
40425 task_pid_nr(current),
40426 (long)old_encode_dev(file_priv->minor->kdev->devt),
40427- dev->open_count);
40428+ local_read(&dev->open_count));
40429
40430 /* Release any auth tokens that might point to this file_priv,
40431 (do that under the drm_global_mutex) */
40432@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
40433 * End inline drm_release
40434 */
40435
40436- if (!--dev->open_count) {
40437+ if (local_dec_and_test(&dev->open_count)) {
40438 retcode = drm_lastclose(dev);
40439 if (drm_device_is_unplugged(dev))
40440 drm_put_dev(dev);
40441diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
40442index 3d2e91c..d31c4c9 100644
40443--- a/drivers/gpu/drm/drm_global.c
40444+++ b/drivers/gpu/drm/drm_global.c
40445@@ -36,7 +36,7 @@
40446 struct drm_global_item {
40447 struct mutex mutex;
40448 void *object;
40449- int refcount;
40450+ atomic_t refcount;
40451 };
40452
40453 static struct drm_global_item glob[DRM_GLOBAL_NUM];
40454@@ -49,7 +49,7 @@ void drm_global_init(void)
40455 struct drm_global_item *item = &glob[i];
40456 mutex_init(&item->mutex);
40457 item->object = NULL;
40458- item->refcount = 0;
40459+ atomic_set(&item->refcount, 0);
40460 }
40461 }
40462
40463@@ -59,7 +59,7 @@ void drm_global_release(void)
40464 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
40465 struct drm_global_item *item = &glob[i];
40466 BUG_ON(item->object != NULL);
40467- BUG_ON(item->refcount != 0);
40468+ BUG_ON(atomic_read(&item->refcount) != 0);
40469 }
40470 }
40471
40472@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40473 struct drm_global_item *item = &glob[ref->global_type];
40474
40475 mutex_lock(&item->mutex);
40476- if (item->refcount == 0) {
40477+ if (atomic_read(&item->refcount) == 0) {
40478 item->object = kzalloc(ref->size, GFP_KERNEL);
40479 if (unlikely(item->object == NULL)) {
40480 ret = -ENOMEM;
40481@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
40482 goto out_err;
40483
40484 }
40485- ++item->refcount;
40486+ atomic_inc(&item->refcount);
40487 ref->object = item->object;
40488 mutex_unlock(&item->mutex);
40489 return 0;
40490@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
40491 struct drm_global_item *item = &glob[ref->global_type];
40492
40493 mutex_lock(&item->mutex);
40494- BUG_ON(item->refcount == 0);
40495+ BUG_ON(atomic_read(&item->refcount) == 0);
40496 BUG_ON(ref->object != item->object);
40497- if (--item->refcount == 0) {
40498+ if (atomic_dec_and_test(&item->refcount)) {
40499 ref->release(ref);
40500 item->object = NULL;
40501 }
40502diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
40503index 51efebd..2b70935 100644
40504--- a/drivers/gpu/drm/drm_info.c
40505+++ b/drivers/gpu/drm/drm_info.c
40506@@ -76,10 +76,13 @@ int drm_vm_info(struct seq_file *m, void *data)
40507 struct drm_local_map *map;
40508 struct drm_map_list *r_list;
40509
40510- /* Hardcoded from _DRM_FRAME_BUFFER,
40511- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
40512- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
40513- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
40514+ static const char * const types[] = {
40515+ [_DRM_FRAME_BUFFER] = "FB",
40516+ [_DRM_REGISTERS] = "REG",
40517+ [_DRM_SHM] = "SHM",
40518+ [_DRM_AGP] = "AGP",
40519+ [_DRM_SCATTER_GATHER] = "SG",
40520+ [_DRM_CONSISTENT] = "PCI"};
40521 const char *type;
40522 int i;
40523
40524@@ -90,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
40525 map = r_list->map;
40526 if (!map)
40527 continue;
40528- if (map->type < 0 || map->type > 5)
40529+ if (map->type >= ARRAY_SIZE(types))
40530 type = "??";
40531 else
40532 type = types[map->type];
40533diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
40534index 2f4c4343..dd12cd2 100644
40535--- a/drivers/gpu/drm/drm_ioc32.c
40536+++ b/drivers/gpu/drm/drm_ioc32.c
40537@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
40538 request = compat_alloc_user_space(nbytes);
40539 if (!access_ok(VERIFY_WRITE, request, nbytes))
40540 return -EFAULT;
40541- list = (struct drm_buf_desc *) (request + 1);
40542+ list = (struct drm_buf_desc __user *) (request + 1);
40543
40544 if (__put_user(count, &request->count)
40545 || __put_user(list, &request->list))
40546@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
40547 request = compat_alloc_user_space(nbytes);
40548 if (!access_ok(VERIFY_WRITE, request, nbytes))
40549 return -EFAULT;
40550- list = (struct drm_buf_pub *) (request + 1);
40551+ list = (struct drm_buf_pub __user *) (request + 1);
40552
40553 if (__put_user(count, &request->count)
40554 || __put_user(list, &request->list))
40555@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
40556 return 0;
40557 }
40558
40559-drm_ioctl_compat_t *drm_compat_ioctls[] = {
40560+drm_ioctl_compat_t drm_compat_ioctls[] = {
40561 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
40562 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
40563 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
40564@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
40565 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40566 {
40567 unsigned int nr = DRM_IOCTL_NR(cmd);
40568- drm_ioctl_compat_t *fn;
40569 int ret;
40570
40571 /* Assume that ioctls without an explicit compat routine will just
40572@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40573 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
40574 return drm_ioctl(filp, cmd, arg);
40575
40576- fn = drm_compat_ioctls[nr];
40577-
40578- if (fn != NULL)
40579- ret = (*fn) (filp, cmd, arg);
40580+ if (drm_compat_ioctls[nr] != NULL)
40581+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
40582 else
40583 ret = drm_ioctl(filp, cmd, arg);
40584
40585diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
40586index 00587a1..57a65ca 100644
40587--- a/drivers/gpu/drm/drm_ioctl.c
40588+++ b/drivers/gpu/drm/drm_ioctl.c
40589@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
40590 struct drm_file *file_priv = filp->private_data;
40591 struct drm_device *dev;
40592 const struct drm_ioctl_desc *ioctl = NULL;
40593- drm_ioctl_t *func;
40594+ drm_ioctl_no_const_t func;
40595 unsigned int nr = DRM_IOCTL_NR(cmd);
40596 int retcode = -EINVAL;
40597 char stack_kdata[128];
40598diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
40599index 93ec5dc..82acbaf 100644
40600--- a/drivers/gpu/drm/i810/i810_drv.h
40601+++ b/drivers/gpu/drm/i810/i810_drv.h
40602@@ -110,8 +110,8 @@ typedef struct drm_i810_private {
40603 int page_flipping;
40604
40605 wait_queue_head_t irq_queue;
40606- atomic_t irq_received;
40607- atomic_t irq_emitted;
40608+ atomic_unchecked_t irq_received;
40609+ atomic_unchecked_t irq_emitted;
40610
40611 int front_offset;
40612 } drm_i810_private_t;
40613diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
40614index ecee3bc..ad5ae67 100644
40615--- a/drivers/gpu/drm/i915/i915_dma.c
40616+++ b/drivers/gpu/drm/i915/i915_dma.c
40617@@ -356,7 +356,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
40618 * locking inversion with the driver load path. And the access here is
40619 * completely racy anyway. So don't bother with locking for now.
40620 */
40621- return dev->open_count == 0;
40622+ return local_read(&dev->open_count) == 0;
40623 }
40624
40625 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
40626diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40627index 1173831..7dfb389 100644
40628--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40629+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
40630@@ -863,12 +863,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
40631 static int
40632 validate_exec_list(struct drm_device *dev,
40633 struct drm_i915_gem_exec_object2 *exec,
40634- int count)
40635+ unsigned int count)
40636 {
40637 unsigned relocs_total = 0;
40638 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
40639 unsigned invalid_flags;
40640- int i;
40641+ unsigned int i;
40642
40643 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
40644 if (USES_FULL_PPGTT(dev))
40645diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
40646index 176de63..1ef9ac7 100644
40647--- a/drivers/gpu/drm/i915/i915_ioc32.c
40648+++ b/drivers/gpu/drm/i915/i915_ioc32.c
40649@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
40650 (unsigned long)request);
40651 }
40652
40653-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40654+static drm_ioctl_compat_t i915_compat_ioctls[] = {
40655 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
40656 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
40657 [DRM_I915_GETPARAM] = compat_i915_getparam,
40658@@ -201,18 +201,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
40659 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40660 {
40661 unsigned int nr = DRM_IOCTL_NR(cmd);
40662- drm_ioctl_compat_t *fn = NULL;
40663 int ret;
40664
40665 if (nr < DRM_COMMAND_BASE)
40666 return drm_compat_ioctl(filp, cmd, arg);
40667
40668- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
40669- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40670-
40671- if (fn != NULL)
40672+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
40673+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
40674 ret = (*fn) (filp, cmd, arg);
40675- else
40676+ } else
40677 ret = drm_ioctl(filp, cmd, arg);
40678
40679 return ret;
40680diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
40681index 791b00e..4d10235 100644
40682--- a/drivers/gpu/drm/i915/intel_display.c
40683+++ b/drivers/gpu/drm/i915/intel_display.c
40684@@ -12939,13 +12939,13 @@ struct intel_quirk {
40685 int subsystem_vendor;
40686 int subsystem_device;
40687 void (*hook)(struct drm_device *dev);
40688-};
40689+} __do_const;
40690
40691 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
40692 struct intel_dmi_quirk {
40693 void (*hook)(struct drm_device *dev);
40694 const struct dmi_system_id (*dmi_id_list)[];
40695-};
40696+} __do_const;
40697
40698 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40699 {
40700@@ -12953,18 +12953,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
40701 return 1;
40702 }
40703
40704-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40705+static const struct dmi_system_id intel_dmi_quirks_table[] = {
40706 {
40707- .dmi_id_list = &(const struct dmi_system_id[]) {
40708- {
40709- .callback = intel_dmi_reverse_brightness,
40710- .ident = "NCR Corporation",
40711- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40712- DMI_MATCH(DMI_PRODUCT_NAME, ""),
40713- },
40714- },
40715- { } /* terminating entry */
40716+ .callback = intel_dmi_reverse_brightness,
40717+ .ident = "NCR Corporation",
40718+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
40719+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
40720 },
40721+ },
40722+ { } /* terminating entry */
40723+};
40724+
40725+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
40726+ {
40727+ .dmi_id_list = &intel_dmi_quirks_table,
40728 .hook = quirk_invert_brightness,
40729 },
40730 };
40731diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
40732index b250130..98df2a4 100644
40733--- a/drivers/gpu/drm/imx/imx-drm-core.c
40734+++ b/drivers/gpu/drm/imx/imx-drm-core.c
40735@@ -356,7 +356,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
40736 if (imxdrm->pipes >= MAX_CRTC)
40737 return -EINVAL;
40738
40739- if (imxdrm->drm->open_count)
40740+ if (local_read(&imxdrm->drm->open_count))
40741 return -EBUSY;
40742
40743 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
40744diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
40745index b4a20149..219ab78 100644
40746--- a/drivers/gpu/drm/mga/mga_drv.h
40747+++ b/drivers/gpu/drm/mga/mga_drv.h
40748@@ -122,9 +122,9 @@ typedef struct drm_mga_private {
40749 u32 clear_cmd;
40750 u32 maccess;
40751
40752- atomic_t vbl_received; /**< Number of vblanks received. */
40753+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
40754 wait_queue_head_t fence_queue;
40755- atomic_t last_fence_retired;
40756+ atomic_unchecked_t last_fence_retired;
40757 u32 next_fence_to_post;
40758
40759 unsigned int fb_cpp;
40760diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
40761index 729bfd5..ead8823 100644
40762--- a/drivers/gpu/drm/mga/mga_ioc32.c
40763+++ b/drivers/gpu/drm/mga/mga_ioc32.c
40764@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
40765 return 0;
40766 }
40767
40768-drm_ioctl_compat_t *mga_compat_ioctls[] = {
40769+drm_ioctl_compat_t mga_compat_ioctls[] = {
40770 [DRM_MGA_INIT] = compat_mga_init,
40771 [DRM_MGA_GETPARAM] = compat_mga_getparam,
40772 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
40773@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
40774 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40775 {
40776 unsigned int nr = DRM_IOCTL_NR(cmd);
40777- drm_ioctl_compat_t *fn = NULL;
40778 int ret;
40779
40780 if (nr < DRM_COMMAND_BASE)
40781 return drm_compat_ioctl(filp, cmd, arg);
40782
40783- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
40784- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40785-
40786- if (fn != NULL)
40787+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
40788+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
40789 ret = (*fn) (filp, cmd, arg);
40790- else
40791+ } else
40792 ret = drm_ioctl(filp, cmd, arg);
40793
40794 return ret;
40795diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
40796index 1b071b8..de8601a 100644
40797--- a/drivers/gpu/drm/mga/mga_irq.c
40798+++ b/drivers/gpu/drm/mga/mga_irq.c
40799@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
40800 if (crtc != 0)
40801 return 0;
40802
40803- return atomic_read(&dev_priv->vbl_received);
40804+ return atomic_read_unchecked(&dev_priv->vbl_received);
40805 }
40806
40807
40808@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40809 /* VBLANK interrupt */
40810 if (status & MGA_VLINEPEN) {
40811 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
40812- atomic_inc(&dev_priv->vbl_received);
40813+ atomic_inc_unchecked(&dev_priv->vbl_received);
40814 drm_handle_vblank(dev, 0);
40815 handled = 1;
40816 }
40817@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
40818 if ((prim_start & ~0x03) != (prim_end & ~0x03))
40819 MGA_WRITE(MGA_PRIMEND, prim_end);
40820
40821- atomic_inc(&dev_priv->last_fence_retired);
40822+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
40823 wake_up(&dev_priv->fence_queue);
40824 handled = 1;
40825 }
40826@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
40827 * using fences.
40828 */
40829 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
40830- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
40831+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
40832 - *sequence) <= (1 << 23)));
40833
40834 *sequence = cur_fence;
40835diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
40836index 7df6acc..84bbe52 100644
40837--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
40838+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
40839@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
40840 struct bit_table {
40841 const char id;
40842 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
40843-};
40844+} __no_const;
40845
40846 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
40847
40848diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
40849index 8ae36f2..1147a30 100644
40850--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
40851+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
40852@@ -121,7 +121,6 @@ struct nouveau_drm {
40853 struct drm_global_reference mem_global_ref;
40854 struct ttm_bo_global_ref bo_global_ref;
40855 struct ttm_bo_device bdev;
40856- atomic_t validate_sequence;
40857 int (*move)(struct nouveau_channel *,
40858 struct ttm_buffer_object *,
40859 struct ttm_mem_reg *, struct ttm_mem_reg *);
40860diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40861index 462679a..88e32a7 100644
40862--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40863+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
40864@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
40865 unsigned long arg)
40866 {
40867 unsigned int nr = DRM_IOCTL_NR(cmd);
40868- drm_ioctl_compat_t *fn = NULL;
40869+ drm_ioctl_compat_t fn = NULL;
40870 int ret;
40871
40872 if (nr < DRM_COMMAND_BASE)
40873diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40874index 3d1cfcb..0542700 100644
40875--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
40876+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
40877@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40878 }
40879
40880 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
40881- nouveau_vram_manager_init,
40882- nouveau_vram_manager_fini,
40883- nouveau_vram_manager_new,
40884- nouveau_vram_manager_del,
40885- nouveau_vram_manager_debug
40886+ .init = nouveau_vram_manager_init,
40887+ .takedown = nouveau_vram_manager_fini,
40888+ .get_node = nouveau_vram_manager_new,
40889+ .put_node = nouveau_vram_manager_del,
40890+ .debug = nouveau_vram_manager_debug
40891 };
40892
40893 static int
40894@@ -195,11 +195,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40895 }
40896
40897 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
40898- nouveau_gart_manager_init,
40899- nouveau_gart_manager_fini,
40900- nouveau_gart_manager_new,
40901- nouveau_gart_manager_del,
40902- nouveau_gart_manager_debug
40903+ .init = nouveau_gart_manager_init,
40904+ .takedown = nouveau_gart_manager_fini,
40905+ .get_node = nouveau_gart_manager_new,
40906+ .put_node = nouveau_gart_manager_del,
40907+ .debug = nouveau_gart_manager_debug
40908 };
40909
40910 /*XXX*/
40911@@ -268,11 +268,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
40912 }
40913
40914 const struct ttm_mem_type_manager_func nv04_gart_manager = {
40915- nv04_gart_manager_init,
40916- nv04_gart_manager_fini,
40917- nv04_gart_manager_new,
40918- nv04_gart_manager_del,
40919- nv04_gart_manager_debug
40920+ .init = nv04_gart_manager_init,
40921+ .takedown = nv04_gart_manager_fini,
40922+ .get_node = nv04_gart_manager_new,
40923+ .put_node = nv04_gart_manager_del,
40924+ .debug = nv04_gart_manager_debug
40925 };
40926
40927 int
40928diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
40929index c7592ec..dd45ebc 100644
40930--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
40931+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
40932@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
40933 * locking inversion with the driver load path. And the access here is
40934 * completely racy anyway. So don't bother with locking for now.
40935 */
40936- return dev->open_count == 0;
40937+ return local_read(&dev->open_count) == 0;
40938 }
40939
40940 static const struct vga_switcheroo_client_ops
40941diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
40942index 9782364..89bd954 100644
40943--- a/drivers/gpu/drm/qxl/qxl_cmd.c
40944+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
40945@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
40946 int ret;
40947
40948 mutex_lock(&qdev->async_io_mutex);
40949- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40950+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40951 if (qdev->last_sent_io_cmd > irq_num) {
40952 if (intr)
40953 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40954- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40955+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40956 else
40957 ret = wait_event_timeout(qdev->io_cmd_event,
40958- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40959+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40960 /* 0 is timeout, just bail the "hw" has gone away */
40961 if (ret <= 0)
40962 goto out;
40963- irq_num = atomic_read(&qdev->irq_received_io_cmd);
40964+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
40965 }
40966 outb(val, addr);
40967 qdev->last_sent_io_cmd = irq_num + 1;
40968 if (intr)
40969 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
40970- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40971+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40972 else
40973 ret = wait_event_timeout(qdev->io_cmd_event,
40974- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40975+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
40976 out:
40977 if (ret > 0)
40978 ret = 0;
40979diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
40980index 6911b8c..89d6867 100644
40981--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
40982+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
40983@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
40984 struct drm_info_node *node = (struct drm_info_node *) m->private;
40985 struct qxl_device *qdev = node->minor->dev->dev_private;
40986
40987- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
40988- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
40989- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
40990- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
40991+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
40992+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
40993+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
40994+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
40995 seq_printf(m, "%d\n", qdev->irq_received_error);
40996 return 0;
40997 }
40998diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
40999index 7c6cafe..460f542 100644
41000--- a/drivers/gpu/drm/qxl/qxl_drv.h
41001+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41002@@ -290,10 +290,10 @@ struct qxl_device {
41003 unsigned int last_sent_io_cmd;
41004
41005 /* interrupt handling */
41006- atomic_t irq_received;
41007- atomic_t irq_received_display;
41008- atomic_t irq_received_cursor;
41009- atomic_t irq_received_io_cmd;
41010+ atomic_unchecked_t irq_received;
41011+ atomic_unchecked_t irq_received_display;
41012+ atomic_unchecked_t irq_received_cursor;
41013+ atomic_unchecked_t irq_received_io_cmd;
41014 unsigned irq_received_error;
41015 wait_queue_head_t display_event;
41016 wait_queue_head_t cursor_event;
41017diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41018index b110883..dd06418 100644
41019--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41020+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41021@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41022
41023 /* TODO copy slow path code from i915 */
41024 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41025- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41026+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41027
41028 {
41029 struct qxl_drawable *draw = fb_cmd;
41030@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41031 struct drm_qxl_reloc reloc;
41032
41033 if (copy_from_user(&reloc,
41034- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41035+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41036 sizeof(reloc))) {
41037 ret = -EFAULT;
41038 goto out_free_bos;
41039@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41040
41041 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41042
41043- struct drm_qxl_command *commands =
41044- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41045+ struct drm_qxl_command __user *commands =
41046+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41047
41048- if (copy_from_user(&user_cmd, &commands[cmd_num],
41049+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41050 sizeof(user_cmd)))
41051 return -EFAULT;
41052
41053diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41054index 0bf1e20..42a7310 100644
41055--- a/drivers/gpu/drm/qxl/qxl_irq.c
41056+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41057@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41058 if (!pending)
41059 return IRQ_NONE;
41060
41061- atomic_inc(&qdev->irq_received);
41062+ atomic_inc_unchecked(&qdev->irq_received);
41063
41064 if (pending & QXL_INTERRUPT_DISPLAY) {
41065- atomic_inc(&qdev->irq_received_display);
41066+ atomic_inc_unchecked(&qdev->irq_received_display);
41067 wake_up_all(&qdev->display_event);
41068 qxl_queue_garbage_collect(qdev, false);
41069 }
41070 if (pending & QXL_INTERRUPT_CURSOR) {
41071- atomic_inc(&qdev->irq_received_cursor);
41072+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41073 wake_up_all(&qdev->cursor_event);
41074 }
41075 if (pending & QXL_INTERRUPT_IO_CMD) {
41076- atomic_inc(&qdev->irq_received_io_cmd);
41077+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41078 wake_up_all(&qdev->io_cmd_event);
41079 }
41080 if (pending & QXL_INTERRUPT_ERROR) {
41081@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41082 init_waitqueue_head(&qdev->io_cmd_event);
41083 INIT_WORK(&qdev->client_monitors_config_work,
41084 qxl_client_monitors_config_work_func);
41085- atomic_set(&qdev->irq_received, 0);
41086- atomic_set(&qdev->irq_received_display, 0);
41087- atomic_set(&qdev->irq_received_cursor, 0);
41088- atomic_set(&qdev->irq_received_io_cmd, 0);
41089+ atomic_set_unchecked(&qdev->irq_received, 0);
41090+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41091+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41092+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41093 qdev->irq_received_error = 0;
41094 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41095 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41096diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41097index 0cbc4c9..0e46686 100644
41098--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41099+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41100@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41101 }
41102 }
41103
41104-static struct vm_operations_struct qxl_ttm_vm_ops;
41105+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41106 static const struct vm_operations_struct *ttm_vm_ops;
41107
41108 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41109@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41110 return r;
41111 if (unlikely(ttm_vm_ops == NULL)) {
41112 ttm_vm_ops = vma->vm_ops;
41113+ pax_open_kernel();
41114 qxl_ttm_vm_ops = *ttm_vm_ops;
41115 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41116+ pax_close_kernel();
41117 }
41118 vma->vm_ops = &qxl_ttm_vm_ops;
41119 return 0;
41120@@ -464,25 +466,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41121 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41122 {
41123 #if defined(CONFIG_DEBUG_FS)
41124- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41125- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41126- unsigned i;
41127+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41128+ {
41129+ .name = "qxl_mem_mm",
41130+ .show = &qxl_mm_dump_table,
41131+ },
41132+ {
41133+ .name = "qxl_surf_mm",
41134+ .show = &qxl_mm_dump_table,
41135+ }
41136+ };
41137
41138- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41139- if (i == 0)
41140- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41141- else
41142- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41143- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41144- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41145- qxl_mem_types_list[i].driver_features = 0;
41146- if (i == 0)
41147- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41148- else
41149- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41150+ pax_open_kernel();
41151+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41152+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41153+ pax_close_kernel();
41154
41155- }
41156- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41157+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41158 #else
41159 return 0;
41160 #endif
41161diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41162index 2c45ac9..5d740f8 100644
41163--- a/drivers/gpu/drm/r128/r128_cce.c
41164+++ b/drivers/gpu/drm/r128/r128_cce.c
41165@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41166
41167 /* GH: Simple idle check.
41168 */
41169- atomic_set(&dev_priv->idle_count, 0);
41170+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41171
41172 /* We don't support anything other than bus-mastering ring mode,
41173 * but the ring can be in either AGP or PCI space for the ring
41174diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41175index 723e5d6..102dbaf 100644
41176--- a/drivers/gpu/drm/r128/r128_drv.h
41177+++ b/drivers/gpu/drm/r128/r128_drv.h
41178@@ -93,14 +93,14 @@ typedef struct drm_r128_private {
41179 int is_pci;
41180 unsigned long cce_buffers_offset;
41181
41182- atomic_t idle_count;
41183+ atomic_unchecked_t idle_count;
41184
41185 int page_flipping;
41186 int current_page;
41187 u32 crtc_offset;
41188 u32 crtc_offset_cntl;
41189
41190- atomic_t vbl_received;
41191+ atomic_unchecked_t vbl_received;
41192
41193 u32 color_fmt;
41194 unsigned int front_offset;
41195diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41196index 663f38c..c689495 100644
41197--- a/drivers/gpu/drm/r128/r128_ioc32.c
41198+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41199@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41200 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41201 }
41202
41203-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41204+drm_ioctl_compat_t r128_compat_ioctls[] = {
41205 [DRM_R128_INIT] = compat_r128_init,
41206 [DRM_R128_DEPTH] = compat_r128_depth,
41207 [DRM_R128_STIPPLE] = compat_r128_stipple,
41208@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41209 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41210 {
41211 unsigned int nr = DRM_IOCTL_NR(cmd);
41212- drm_ioctl_compat_t *fn = NULL;
41213 int ret;
41214
41215 if (nr < DRM_COMMAND_BASE)
41216 return drm_compat_ioctl(filp, cmd, arg);
41217
41218- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41219- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41220-
41221- if (fn != NULL)
41222+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41223+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41224 ret = (*fn) (filp, cmd, arg);
41225- else
41226+ } else
41227 ret = drm_ioctl(filp, cmd, arg);
41228
41229 return ret;
41230diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41231index c2ae496..30b5993 100644
41232--- a/drivers/gpu/drm/r128/r128_irq.c
41233+++ b/drivers/gpu/drm/r128/r128_irq.c
41234@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41235 if (crtc != 0)
41236 return 0;
41237
41238- return atomic_read(&dev_priv->vbl_received);
41239+ return atomic_read_unchecked(&dev_priv->vbl_received);
41240 }
41241
41242 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41243@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41244 /* VBLANK interrupt */
41245 if (status & R128_CRTC_VBLANK_INT) {
41246 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41247- atomic_inc(&dev_priv->vbl_received);
41248+ atomic_inc_unchecked(&dev_priv->vbl_received);
41249 drm_handle_vblank(dev, 0);
41250 return IRQ_HANDLED;
41251 }
41252diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41253index 8fd2d9f..18c9660 100644
41254--- a/drivers/gpu/drm/r128/r128_state.c
41255+++ b/drivers/gpu/drm/r128/r128_state.c
41256@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41257
41258 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41259 {
41260- if (atomic_read(&dev_priv->idle_count) == 0)
41261+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41262 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41263 else
41264- atomic_set(&dev_priv->idle_count, 0);
41265+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41266 }
41267
41268 #endif
41269diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41270index b928c17..e5d9400 100644
41271--- a/drivers/gpu/drm/radeon/mkregtable.c
41272+++ b/drivers/gpu/drm/radeon/mkregtable.c
41273@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41274 regex_t mask_rex;
41275 regmatch_t match[4];
41276 char buf[1024];
41277- size_t end;
41278+ long end;
41279 int len;
41280 int done = 0;
41281 int r;
41282 unsigned o;
41283 struct offset *offset;
41284 char last_reg_s[10];
41285- int last_reg;
41286+ unsigned long last_reg;
41287
41288 if (regcomp
41289 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41290diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41291index bd7519f..e1c2cd95 100644
41292--- a/drivers/gpu/drm/radeon/radeon_device.c
41293+++ b/drivers/gpu/drm/radeon/radeon_device.c
41294@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41295 * locking inversion with the driver load path. And the access here is
41296 * completely racy anyway. So don't bother with locking for now.
41297 */
41298- return dev->open_count == 0;
41299+ return local_read(&dev->open_count) == 0;
41300 }
41301
41302 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41303diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
41304index 46bd393..6ae4719 100644
41305--- a/drivers/gpu/drm/radeon/radeon_drv.h
41306+++ b/drivers/gpu/drm/radeon/radeon_drv.h
41307@@ -264,7 +264,7 @@ typedef struct drm_radeon_private {
41308
41309 /* SW interrupt */
41310 wait_queue_head_t swi_queue;
41311- atomic_t swi_emitted;
41312+ atomic_unchecked_t swi_emitted;
41313 int vblank_crtc;
41314 uint32_t irq_enable_reg;
41315 uint32_t r500_disp_irq_reg;
41316diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
41317index 0b98ea1..0881827 100644
41318--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
41319+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
41320@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41321 request = compat_alloc_user_space(sizeof(*request));
41322 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
41323 || __put_user(req32.param, &request->param)
41324- || __put_user((void __user *)(unsigned long)req32.value,
41325+ || __put_user((unsigned long)req32.value,
41326 &request->value))
41327 return -EFAULT;
41328
41329@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
41330 #define compat_radeon_cp_setparam NULL
41331 #endif /* X86_64 || IA64 */
41332
41333-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41334+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
41335 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
41336 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
41337 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
41338@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
41339 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41340 {
41341 unsigned int nr = DRM_IOCTL_NR(cmd);
41342- drm_ioctl_compat_t *fn = NULL;
41343 int ret;
41344
41345 if (nr < DRM_COMMAND_BASE)
41346 return drm_compat_ioctl(filp, cmd, arg);
41347
41348- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
41349- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41350-
41351- if (fn != NULL)
41352+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
41353+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
41354 ret = (*fn) (filp, cmd, arg);
41355- else
41356+ } else
41357 ret = drm_ioctl(filp, cmd, arg);
41358
41359 return ret;
41360diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
41361index 244b19b..c19226d 100644
41362--- a/drivers/gpu/drm/radeon/radeon_irq.c
41363+++ b/drivers/gpu/drm/radeon/radeon_irq.c
41364@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
41365 unsigned int ret;
41366 RING_LOCALS;
41367
41368- atomic_inc(&dev_priv->swi_emitted);
41369- ret = atomic_read(&dev_priv->swi_emitted);
41370+ atomic_inc_unchecked(&dev_priv->swi_emitted);
41371+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
41372
41373 BEGIN_RING(4);
41374 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
41375@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
41376 drm_radeon_private_t *dev_priv =
41377 (drm_radeon_private_t *) dev->dev_private;
41378
41379- atomic_set(&dev_priv->swi_emitted, 0);
41380+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
41381 init_waitqueue_head(&dev_priv->swi_queue);
41382
41383 dev->max_vblank_count = 0x001fffff;
41384diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
41385index 15aee72..cda326e 100644
41386--- a/drivers/gpu/drm/radeon/radeon_state.c
41387+++ b/drivers/gpu/drm/radeon/radeon_state.c
41388@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
41389 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
41390 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
41391
41392- if (copy_from_user(&depth_boxes, clear->depth_boxes,
41393+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
41394 sarea_priv->nbox * sizeof(depth_boxes[0])))
41395 return -EFAULT;
41396
41397@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
41398 {
41399 drm_radeon_private_t *dev_priv = dev->dev_private;
41400 drm_radeon_getparam_t *param = data;
41401- int value;
41402+ int value = 0;
41403
41404 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
41405
41406diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
41407index b292aca..4e338b5 100644
41408--- a/drivers/gpu/drm/radeon/radeon_ttm.c
41409+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
41410@@ -963,7 +963,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
41411 man->size = size >> PAGE_SHIFT;
41412 }
41413
41414-static struct vm_operations_struct radeon_ttm_vm_ops;
41415+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
41416 static const struct vm_operations_struct *ttm_vm_ops = NULL;
41417
41418 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41419@@ -1004,8 +1004,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
41420 }
41421 if (unlikely(ttm_vm_ops == NULL)) {
41422 ttm_vm_ops = vma->vm_ops;
41423+ pax_open_kernel();
41424 radeon_ttm_vm_ops = *ttm_vm_ops;
41425 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
41426+ pax_close_kernel();
41427 }
41428 vma->vm_ops = &radeon_ttm_vm_ops;
41429 return 0;
41430diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
41431index 978993f..e36e50e 100644
41432--- a/drivers/gpu/drm/tegra/dc.c
41433+++ b/drivers/gpu/drm/tegra/dc.c
41434@@ -1416,7 +1416,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
41435 }
41436
41437 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
41438- dc->debugfs_files[i].data = dc;
41439+ *(void **)&dc->debugfs_files[i].data = dc;
41440
41441 err = drm_debugfs_create_files(dc->debugfs_files,
41442 ARRAY_SIZE(debugfs_files),
41443diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
41444index 33f67fd..55ee9761 100644
41445--- a/drivers/gpu/drm/tegra/dsi.c
41446+++ b/drivers/gpu/drm/tegra/dsi.c
41447@@ -39,7 +39,7 @@ struct tegra_dsi {
41448 struct clk *clk_lp;
41449 struct clk *clk;
41450
41451- struct drm_info_list *debugfs_files;
41452+ drm_info_list_no_const *debugfs_files;
41453 struct drm_minor *minor;
41454 struct dentry *debugfs;
41455
41456diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
41457index ffe2654..03c7b1c 100644
41458--- a/drivers/gpu/drm/tegra/hdmi.c
41459+++ b/drivers/gpu/drm/tegra/hdmi.c
41460@@ -60,7 +60,7 @@ struct tegra_hdmi {
41461 bool stereo;
41462 bool dvi;
41463
41464- struct drm_info_list *debugfs_files;
41465+ drm_info_list_no_const *debugfs_files;
41466 struct drm_minor *minor;
41467 struct dentry *debugfs;
41468 };
41469diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41470index aa0bd054..aea6a01 100644
41471--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
41472+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
41473@@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
41474 }
41475
41476 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
41477- ttm_bo_man_init,
41478- ttm_bo_man_takedown,
41479- ttm_bo_man_get_node,
41480- ttm_bo_man_put_node,
41481- ttm_bo_man_debug
41482+ .init = ttm_bo_man_init,
41483+ .takedown = ttm_bo_man_takedown,
41484+ .get_node = ttm_bo_man_get_node,
41485+ .put_node = ttm_bo_man_put_node,
41486+ .debug = ttm_bo_man_debug
41487 };
41488 EXPORT_SYMBOL(ttm_bo_manager_func);
41489diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
41490index a1803fb..c53f6b0 100644
41491--- a/drivers/gpu/drm/ttm/ttm_memory.c
41492+++ b/drivers/gpu/drm/ttm/ttm_memory.c
41493@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
41494 zone->glob = glob;
41495 glob->zone_kernel = zone;
41496 ret = kobject_init_and_add(
41497- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41498+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41499 if (unlikely(ret != 0)) {
41500 kobject_put(&zone->kobj);
41501 return ret;
41502@@ -348,7 +348,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
41503 zone->glob = glob;
41504 glob->zone_dma32 = zone;
41505 ret = kobject_init_and_add(
41506- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
41507+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
41508 if (unlikely(ret != 0)) {
41509 kobject_put(&zone->kobj);
41510 return ret;
41511diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41512index 025c429..314062f 100644
41513--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
41514+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
41515@@ -54,7 +54,7 @@
41516
41517 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41518 #define SMALL_ALLOCATION 16
41519-#define FREE_ALL_PAGES (~0U)
41520+#define FREE_ALL_PAGES (~0UL)
41521 /* times are in msecs */
41522 #define PAGE_FREE_INTERVAL 1000
41523
41524@@ -299,15 +299,14 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
41525 * @free_all: If set to true will free all pages in pool
41526 * @use_static: Safe to use static buffer
41527 **/
41528-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
41529+static unsigned long ttm_page_pool_free(struct ttm_page_pool *pool, unsigned long nr_free,
41530 bool use_static)
41531 {
41532 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41533 unsigned long irq_flags;
41534 struct page *p;
41535 struct page **pages_to_free;
41536- unsigned freed_pages = 0,
41537- npages_to_free = nr_free;
41538+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41539
41540 if (NUM_PAGES_TO_ALLOC < nr_free)
41541 npages_to_free = NUM_PAGES_TO_ALLOC;
41542@@ -371,7 +370,8 @@ restart:
41543 __list_del(&p->lru, &pool->list);
41544
41545 ttm_pool_update_free_locked(pool, freed_pages);
41546- nr_free -= freed_pages;
41547+ if (likely(nr_free != FREE_ALL_PAGES))
41548+ nr_free -= freed_pages;
41549 }
41550
41551 spin_unlock_irqrestore(&pool->lock, irq_flags);
41552@@ -399,7 +399,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41553 unsigned i;
41554 unsigned pool_offset;
41555 struct ttm_page_pool *pool;
41556- int shrink_pages = sc->nr_to_scan;
41557+ unsigned long shrink_pages = sc->nr_to_scan;
41558 unsigned long freed = 0;
41559
41560 if (!mutex_trylock(&lock))
41561@@ -407,7 +407,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41562 pool_offset = ++start_pool % NUM_POOLS;
41563 /* select start pool in round robin fashion */
41564 for (i = 0; i < NUM_POOLS; ++i) {
41565- unsigned nr_free = shrink_pages;
41566+ unsigned long nr_free = shrink_pages;
41567 if (shrink_pages == 0)
41568 break;
41569 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
41570@@ -673,7 +673,7 @@ out:
41571 }
41572
41573 /* Put all pages in pages list to correct pool to wait for reuse */
41574-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
41575+static void ttm_put_pages(struct page **pages, unsigned long npages, int flags,
41576 enum ttm_caching_state cstate)
41577 {
41578 unsigned long irq_flags;
41579@@ -728,7 +728,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
41580 struct list_head plist;
41581 struct page *p = NULL;
41582 gfp_t gfp_flags = GFP_USER;
41583- unsigned count;
41584+ unsigned long count;
41585 int r;
41586
41587 /* set zero flag for page allocation if required */
41588diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41589index 01e1d27..aaa018a 100644
41590--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41591+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
41592@@ -56,7 +56,7 @@
41593
41594 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
41595 #define SMALL_ALLOCATION 4
41596-#define FREE_ALL_PAGES (~0U)
41597+#define FREE_ALL_PAGES (~0UL)
41598 /* times are in msecs */
41599 #define IS_UNDEFINED (0)
41600 #define IS_WC (1<<1)
41601@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
41602 * @nr_free: If set to true will free all pages in pool
41603 * @use_static: Safe to use static buffer
41604 **/
41605-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41606+static unsigned long ttm_dma_page_pool_free(struct dma_pool *pool, unsigned long nr_free,
41607 bool use_static)
41608 {
41609 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
41610@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
41611 struct dma_page *dma_p, *tmp;
41612 struct page **pages_to_free;
41613 struct list_head d_pages;
41614- unsigned freed_pages = 0,
41615- npages_to_free = nr_free;
41616+ unsigned long freed_pages = 0, npages_to_free = nr_free;
41617
41618 if (NUM_PAGES_TO_ALLOC < nr_free)
41619 npages_to_free = NUM_PAGES_TO_ALLOC;
41620@@ -499,7 +498,8 @@ restart:
41621 /* remove range of pages from the pool */
41622 if (freed_pages) {
41623 ttm_pool_update_free_locked(pool, freed_pages);
41624- nr_free -= freed_pages;
41625+ if (likely(nr_free != FREE_ALL_PAGES))
41626+ nr_free -= freed_pages;
41627 }
41628
41629 spin_unlock_irqrestore(&pool->lock, irq_flags);
41630@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
41631 struct dma_page *d_page, *next;
41632 enum pool_type type;
41633 bool is_cached = false;
41634- unsigned count = 0, i, npages = 0;
41635+ unsigned long count = 0, i, npages = 0;
41636 unsigned long irq_flags;
41637
41638 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
41639@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41640 static unsigned start_pool;
41641 unsigned idx = 0;
41642 unsigned pool_offset;
41643- unsigned shrink_pages = sc->nr_to_scan;
41644+ unsigned long shrink_pages = sc->nr_to_scan;
41645 struct device_pools *p;
41646 unsigned long freed = 0;
41647
41648@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41649 goto out;
41650 pool_offset = ++start_pool % _manager->npools;
41651 list_for_each_entry(p, &_manager->pools, pools) {
41652- unsigned nr_free;
41653+ unsigned long nr_free;
41654
41655 if (!p->dev)
41656 continue;
41657@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
41658 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
41659 freed += nr_free - shrink_pages;
41660
41661- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
41662+ pr_debug("%s: (%s:%d) Asked to shrink %lu, have %lu more to go\n",
41663 p->pool->dev_name, p->pool->name, current->pid,
41664 nr_free, shrink_pages);
41665 }
41666diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
41667index 8cbcb45..a4d9cf7 100644
41668--- a/drivers/gpu/drm/udl/udl_fb.c
41669+++ b/drivers/gpu/drm/udl/udl_fb.c
41670@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
41671 fb_deferred_io_cleanup(info);
41672 kfree(info->fbdefio);
41673 info->fbdefio = NULL;
41674- info->fbops->fb_mmap = udl_fb_mmap;
41675 }
41676
41677 pr_warn("released /dev/fb%d user=%d count=%d\n",
41678diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
41679index ef8c500..01030c8 100644
41680--- a/drivers/gpu/drm/via/via_drv.h
41681+++ b/drivers/gpu/drm/via/via_drv.h
41682@@ -53,7 +53,7 @@ typedef struct drm_via_ring_buffer {
41683 typedef uint32_t maskarray_t[5];
41684
41685 typedef struct drm_via_irq {
41686- atomic_t irq_received;
41687+ atomic_unchecked_t irq_received;
41688 uint32_t pending_mask;
41689 uint32_t enable_mask;
41690 wait_queue_head_t irq_queue;
41691@@ -77,7 +77,7 @@ typedef struct drm_via_private {
41692 struct timeval last_vblank;
41693 int last_vblank_valid;
41694 unsigned usec_per_vblank;
41695- atomic_t vbl_received;
41696+ atomic_unchecked_t vbl_received;
41697 drm_via_state_t hc_state;
41698 char pci_buf[VIA_PCI_BUF_SIZE];
41699 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
41700diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
41701index 1319433..a993b0c 100644
41702--- a/drivers/gpu/drm/via/via_irq.c
41703+++ b/drivers/gpu/drm/via/via_irq.c
41704@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
41705 if (crtc != 0)
41706 return 0;
41707
41708- return atomic_read(&dev_priv->vbl_received);
41709+ return atomic_read_unchecked(&dev_priv->vbl_received);
41710 }
41711
41712 irqreturn_t via_driver_irq_handler(int irq, void *arg)
41713@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41714
41715 status = VIA_READ(VIA_REG_INTERRUPT);
41716 if (status & VIA_IRQ_VBLANK_PENDING) {
41717- atomic_inc(&dev_priv->vbl_received);
41718- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
41719+ atomic_inc_unchecked(&dev_priv->vbl_received);
41720+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
41721 do_gettimeofday(&cur_vblank);
41722 if (dev_priv->last_vblank_valid) {
41723 dev_priv->usec_per_vblank =
41724@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41725 dev_priv->last_vblank = cur_vblank;
41726 dev_priv->last_vblank_valid = 1;
41727 }
41728- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
41729+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
41730 DRM_DEBUG("US per vblank is: %u\n",
41731 dev_priv->usec_per_vblank);
41732 }
41733@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
41734
41735 for (i = 0; i < dev_priv->num_irqs; ++i) {
41736 if (status & cur_irq->pending_mask) {
41737- atomic_inc(&cur_irq->irq_received);
41738+ atomic_inc_unchecked(&cur_irq->irq_received);
41739 wake_up(&cur_irq->irq_queue);
41740 handled = 1;
41741 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
41742@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
41743 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41744 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
41745 masks[irq][4]));
41746- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
41747+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
41748 } else {
41749 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
41750 (((cur_irq_sequence =
41751- atomic_read(&cur_irq->irq_received)) -
41752+ atomic_read_unchecked(&cur_irq->irq_received)) -
41753 *sequence) <= (1 << 23)));
41754 }
41755 *sequence = cur_irq_sequence;
41756@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
41757 }
41758
41759 for (i = 0; i < dev_priv->num_irqs; ++i) {
41760- atomic_set(&cur_irq->irq_received, 0);
41761+ atomic_set_unchecked(&cur_irq->irq_received, 0);
41762 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
41763 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
41764 init_waitqueue_head(&cur_irq->irq_queue);
41765@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
41766 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
41767 case VIA_IRQ_RELATIVE:
41768 irqwait->request.sequence +=
41769- atomic_read(&cur_irq->irq_received);
41770+ atomic_read_unchecked(&cur_irq->irq_received);
41771 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
41772 case VIA_IRQ_ABSOLUTE:
41773 break;
41774diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41775index d26a6da..5fa41ed 100644
41776--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41777+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
41778@@ -447,7 +447,7 @@ struct vmw_private {
41779 * Fencing and IRQs.
41780 */
41781
41782- atomic_t marker_seq;
41783+ atomic_unchecked_t marker_seq;
41784 wait_queue_head_t fence_queue;
41785 wait_queue_head_t fifo_queue;
41786 spinlock_t waiter_lock;
41787diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41788index 39f2b03..d1b0a64 100644
41789--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41790+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
41791@@ -152,7 +152,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
41792 (unsigned int) min,
41793 (unsigned int) fifo->capabilities);
41794
41795- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41796+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
41797 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
41798 vmw_marker_queue_init(&fifo->marker_queue);
41799 return vmw_fifo_send_fence(dev_priv, &dummy);
41800@@ -372,7 +372,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
41801 if (reserveable)
41802 iowrite32(bytes, fifo_mem +
41803 SVGA_FIFO_RESERVED);
41804- return fifo_mem + (next_cmd >> 2);
41805+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
41806 } else {
41807 need_bounce = true;
41808 }
41809@@ -492,7 +492,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41810
41811 fm = vmw_fifo_reserve(dev_priv, bytes);
41812 if (unlikely(fm == NULL)) {
41813- *seqno = atomic_read(&dev_priv->marker_seq);
41814+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41815 ret = -ENOMEM;
41816 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
41817 false, 3*HZ);
41818@@ -500,7 +500,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
41819 }
41820
41821 do {
41822- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
41823+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
41824 } while (*seqno == 0);
41825
41826 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
41827diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41828index 170b61b..fec7348 100644
41829--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41830+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
41831@@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
41832 }
41833
41834 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
41835- vmw_gmrid_man_init,
41836- vmw_gmrid_man_takedown,
41837- vmw_gmrid_man_get_node,
41838- vmw_gmrid_man_put_node,
41839- vmw_gmrid_man_debug
41840+ .init = vmw_gmrid_man_init,
41841+ .takedown = vmw_gmrid_man_takedown,
41842+ .get_node = vmw_gmrid_man_get_node,
41843+ .put_node = vmw_gmrid_man_put_node,
41844+ .debug = vmw_gmrid_man_debug
41845 };
41846diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41847index 69c8ce2..cacb0ab 100644
41848--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41849+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
41850@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
41851 int ret;
41852
41853 num_clips = arg->num_clips;
41854- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41855+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41856
41857 if (unlikely(num_clips == 0))
41858 return 0;
41859@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
41860 int ret;
41861
41862 num_clips = arg->num_clips;
41863- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
41864+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
41865
41866 if (unlikely(num_clips == 0))
41867 return 0;
41868diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41869index 9fe9827..0aa2fc0 100644
41870--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41871+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
41872@@ -102,7 +102,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
41873 * emitted. Then the fence is stale and signaled.
41874 */
41875
41876- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
41877+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
41878 > VMW_FENCE_WRAP);
41879
41880 return ret;
41881@@ -133,7 +133,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
41882
41883 if (fifo_idle)
41884 down_read(&fifo_state->rwsem);
41885- signal_seq = atomic_read(&dev_priv->marker_seq);
41886+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
41887 ret = 0;
41888
41889 for (;;) {
41890diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41891index efd1ffd..0ae13ca 100644
41892--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41893+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
41894@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
41895 while (!vmw_lag_lt(queue, us)) {
41896 spin_lock(&queue->lock);
41897 if (list_empty(&queue->head))
41898- seqno = atomic_read(&dev_priv->marker_seq);
41899+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
41900 else {
41901 marker = list_first_entry(&queue->head,
41902 struct vmw_marker, head);
41903diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
41904index 37ac7b5..d52a5c9 100644
41905--- a/drivers/gpu/vga/vga_switcheroo.c
41906+++ b/drivers/gpu/vga/vga_switcheroo.c
41907@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
41908
41909 /* this version is for the case where the power switch is separate
41910 to the device being powered down. */
41911-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
41912+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
41913 {
41914 /* copy over all the bus versions */
41915 if (dev->bus && dev->bus->pm) {
41916@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
41917 return ret;
41918 }
41919
41920-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
41921+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
41922 {
41923 /* copy over all the bus versions */
41924 if (dev->bus && dev->bus->pm) {
41925diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
41926index 8b63879..a5a5e72 100644
41927--- a/drivers/hid/hid-core.c
41928+++ b/drivers/hid/hid-core.c
41929@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
41930
41931 int hid_add_device(struct hid_device *hdev)
41932 {
41933- static atomic_t id = ATOMIC_INIT(0);
41934+ static atomic_unchecked_t id = ATOMIC_INIT(0);
41935 int ret;
41936
41937 if (WARN_ON(hdev->status & HID_STAT_ADDED))
41938@@ -2551,7 +2551,7 @@ int hid_add_device(struct hid_device *hdev)
41939 /* XXX hack, any other cleaner solution after the driver core
41940 * is converted to allow more than 20 bytes as the device name? */
41941 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
41942- hdev->vendor, hdev->product, atomic_inc_return(&id));
41943+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
41944
41945 hid_debug_register(hdev, dev_name(&hdev->dev));
41946 ret = device_add(&hdev->dev);
41947diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
41948index 5bc6d80..e47b55a 100644
41949--- a/drivers/hid/hid-logitech-dj.c
41950+++ b/drivers/hid/hid-logitech-dj.c
41951@@ -853,6 +853,12 @@ static int logi_dj_dj_event(struct hid_device *hdev,
41952 * case we forward it to the correct hid device (via hid_input_report()
41953 * ) and return 1 so hid-core does not anything else with it.
41954 */
41955+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41956+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41957+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
41958+ __func__, dj_report->device_index);
41959+ return false;
41960+ }
41961
41962 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
41963 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
41964diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
41965index c13fb5b..55a3802 100644
41966--- a/drivers/hid/hid-wiimote-debug.c
41967+++ b/drivers/hid/hid-wiimote-debug.c
41968@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
41969 else if (size == 0)
41970 return -EIO;
41971
41972- if (copy_to_user(u, buf, size))
41973+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
41974 return -EFAULT;
41975
41976 *off += size;
41977diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
41978index 433f72a..2926005 100644
41979--- a/drivers/hv/channel.c
41980+++ b/drivers/hv/channel.c
41981@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
41982 unsigned long flags;
41983 int ret = 0;
41984
41985- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
41986- atomic_inc(&vmbus_connection.next_gpadl_handle);
41987+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
41988+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
41989
41990 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
41991 if (ret)
41992diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
41993index 3e4235c..877d0e5 100644
41994--- a/drivers/hv/hv.c
41995+++ b/drivers/hv/hv.c
41996@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
41997 u64 output_address = (output) ? virt_to_phys(output) : 0;
41998 u32 output_address_hi = output_address >> 32;
41999 u32 output_address_lo = output_address & 0xFFFFFFFF;
42000- void *hypercall_page = hv_context.hypercall_page;
42001+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42002
42003 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42004 "=a"(hv_status_lo) : "d" (control_hi),
42005@@ -156,7 +156,7 @@ int hv_init(void)
42006 /* See if the hypercall page is already set */
42007 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42008
42009- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42010+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42011
42012 if (!virtaddr)
42013 goto cleanup;
42014diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42015index b958ded..b2452bb 100644
42016--- a/drivers/hv/hv_balloon.c
42017+++ b/drivers/hv/hv_balloon.c
42018@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42019
42020 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42021 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42022-static atomic_t trans_id = ATOMIC_INIT(0);
42023+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42024
42025 static int dm_ring_size = (5 * PAGE_SIZE);
42026
42027@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
42028 pr_info("Memory hot add failed\n");
42029
42030 dm->state = DM_INITIALIZED;
42031- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42032+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42033 vmbus_sendpacket(dm->dev->channel, &resp,
42034 sizeof(struct dm_hot_add_response),
42035 (unsigned long)NULL,
42036@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
42037 memset(&status, 0, sizeof(struct dm_status));
42038 status.hdr.type = DM_STATUS_REPORT;
42039 status.hdr.size = sizeof(struct dm_status);
42040- status.hdr.trans_id = atomic_inc_return(&trans_id);
42041+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42042
42043 /*
42044 * The host expects the guest to report free memory.
42045@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
42046 * send the status. This can happen if we were interrupted
42047 * after we picked our transaction ID.
42048 */
42049- if (status.hdr.trans_id != atomic_read(&trans_id))
42050+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42051 return;
42052
42053 /*
42054@@ -1133,7 +1133,7 @@ static void balloon_up(struct work_struct *dummy)
42055 */
42056
42057 do {
42058- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42059+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42060 ret = vmbus_sendpacket(dm_device.dev->channel,
42061 bl_resp,
42062 bl_resp->hdr.size,
42063@@ -1179,7 +1179,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42064
42065 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42066 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42067- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42068+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42069 resp.hdr.size = sizeof(struct dm_unballoon_response);
42070
42071 vmbus_sendpacket(dm_device.dev->channel, &resp,
42072@@ -1243,7 +1243,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42073 memset(&version_req, 0, sizeof(struct dm_version_request));
42074 version_req.hdr.type = DM_VERSION_REQUEST;
42075 version_req.hdr.size = sizeof(struct dm_version_request);
42076- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42077+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42078 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42079 version_req.is_last_attempt = 1;
42080
42081@@ -1413,7 +1413,7 @@ static int balloon_probe(struct hv_device *dev,
42082 memset(&version_req, 0, sizeof(struct dm_version_request));
42083 version_req.hdr.type = DM_VERSION_REQUEST;
42084 version_req.hdr.size = sizeof(struct dm_version_request);
42085- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42086+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42087 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42088 version_req.is_last_attempt = 0;
42089
42090@@ -1444,7 +1444,7 @@ static int balloon_probe(struct hv_device *dev,
42091 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42092 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42093 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42094- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42095+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42096
42097 cap_msg.caps.cap_bits.balloon = 1;
42098 cap_msg.caps.cap_bits.hot_add = 1;
42099diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42100index c386d8d..d6004c4 100644
42101--- a/drivers/hv/hyperv_vmbus.h
42102+++ b/drivers/hv/hyperv_vmbus.h
42103@@ -611,7 +611,7 @@ enum vmbus_connect_state {
42104 struct vmbus_connection {
42105 enum vmbus_connect_state conn_state;
42106
42107- atomic_t next_gpadl_handle;
42108+ atomic_unchecked_t next_gpadl_handle;
42109
42110 /*
42111 * Represents channel interrupts. Each bit position represents a
42112diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42113index 4d6b269..2e23b86 100644
42114--- a/drivers/hv/vmbus_drv.c
42115+++ b/drivers/hv/vmbus_drv.c
42116@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42117 {
42118 int ret = 0;
42119
42120- static atomic_t device_num = ATOMIC_INIT(0);
42121+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42122
42123 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42124- atomic_inc_return(&device_num));
42125+ atomic_inc_return_unchecked(&device_num));
42126
42127 child_device_obj->device.bus = &hv_bus;
42128 child_device_obj->device.parent = &hv_acpi_dev->dev;
42129diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42130index 579bdf9..75118b5 100644
42131--- a/drivers/hwmon/acpi_power_meter.c
42132+++ b/drivers/hwmon/acpi_power_meter.c
42133@@ -116,7 +116,7 @@ struct sensor_template {
42134 struct device_attribute *devattr,
42135 const char *buf, size_t count);
42136 int index;
42137-};
42138+} __do_const;
42139
42140 /* Averaging interval */
42141 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42142@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42143 struct sensor_template *attrs)
42144 {
42145 struct device *dev = &resource->acpi_dev->dev;
42146- struct sensor_device_attribute *sensors =
42147+ sensor_device_attribute_no_const *sensors =
42148 &resource->sensors[resource->num_sensors];
42149 int res = 0;
42150
42151diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42152index 0af63da..05a183a 100644
42153--- a/drivers/hwmon/applesmc.c
42154+++ b/drivers/hwmon/applesmc.c
42155@@ -1105,7 +1105,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42156 {
42157 struct applesmc_node_group *grp;
42158 struct applesmc_dev_attr *node;
42159- struct attribute *attr;
42160+ attribute_no_const *attr;
42161 int ret, i;
42162
42163 for (grp = groups; grp->format; grp++) {
42164diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42165index cccef87..06ce8ec 100644
42166--- a/drivers/hwmon/asus_atk0110.c
42167+++ b/drivers/hwmon/asus_atk0110.c
42168@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42169 struct atk_sensor_data {
42170 struct list_head list;
42171 struct atk_data *data;
42172- struct device_attribute label_attr;
42173- struct device_attribute input_attr;
42174- struct device_attribute limit1_attr;
42175- struct device_attribute limit2_attr;
42176+ device_attribute_no_const label_attr;
42177+ device_attribute_no_const input_attr;
42178+ device_attribute_no_const limit1_attr;
42179+ device_attribute_no_const limit2_attr;
42180 char label_attr_name[ATTR_NAME_SIZE];
42181 char input_attr_name[ATTR_NAME_SIZE];
42182 char limit1_attr_name[ATTR_NAME_SIZE];
42183@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42184 static struct device_attribute atk_name_attr =
42185 __ATTR(name, 0444, atk_name_show, NULL);
42186
42187-static void atk_init_attribute(struct device_attribute *attr, char *name,
42188+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42189 sysfs_show_func show)
42190 {
42191 sysfs_attr_init(&attr->attr);
42192diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42193index 5b7fec8..05c957a 100644
42194--- a/drivers/hwmon/coretemp.c
42195+++ b/drivers/hwmon/coretemp.c
42196@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42197 return NOTIFY_OK;
42198 }
42199
42200-static struct notifier_block coretemp_cpu_notifier __refdata = {
42201+static struct notifier_block coretemp_cpu_notifier = {
42202 .notifier_call = coretemp_cpu_callback,
42203 };
42204
42205diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42206index 7a8a6fb..015c1fd 100644
42207--- a/drivers/hwmon/ibmaem.c
42208+++ b/drivers/hwmon/ibmaem.c
42209@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42210 struct aem_rw_sensor_template *rw)
42211 {
42212 struct device *dev = &data->pdev->dev;
42213- struct sensor_device_attribute *sensors = data->sensors;
42214+ sensor_device_attribute_no_const *sensors = data->sensors;
42215 int err;
42216
42217 /* Set up read-only sensors */
42218diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42219index 17ae2eb..21b71dd 100644
42220--- a/drivers/hwmon/iio_hwmon.c
42221+++ b/drivers/hwmon/iio_hwmon.c
42222@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42223 {
42224 struct device *dev = &pdev->dev;
42225 struct iio_hwmon_state *st;
42226- struct sensor_device_attribute *a;
42227+ sensor_device_attribute_no_const *a;
42228 int ret, i;
42229 int in_i = 1, temp_i = 1, curr_i = 1, humidity_i = 1;
42230 enum iio_chan_type type;
42231diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42232index f3830db..9f4d6d5 100644
42233--- a/drivers/hwmon/nct6683.c
42234+++ b/drivers/hwmon/nct6683.c
42235@@ -397,11 +397,11 @@ static struct attribute_group *
42236 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42237 int repeat)
42238 {
42239- struct sensor_device_attribute_2 *a2;
42240- struct sensor_device_attribute *a;
42241+ sensor_device_attribute_2_no_const *a2;
42242+ sensor_device_attribute_no_const *a;
42243 struct sensor_device_template **t;
42244 struct sensor_device_attr_u *su;
42245- struct attribute_group *group;
42246+ attribute_group_no_const *group;
42247 struct attribute **attrs;
42248 int i, j, count;
42249
42250diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42251index 1be4117..88ae1e1 100644
42252--- a/drivers/hwmon/nct6775.c
42253+++ b/drivers/hwmon/nct6775.c
42254@@ -952,10 +952,10 @@ static struct attribute_group *
42255 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42256 int repeat)
42257 {
42258- struct attribute_group *group;
42259+ attribute_group_no_const *group;
42260 struct sensor_device_attr_u *su;
42261- struct sensor_device_attribute *a;
42262- struct sensor_device_attribute_2 *a2;
42263+ sensor_device_attribute_no_const *a;
42264+ sensor_device_attribute_2_no_const *a2;
42265 struct attribute **attrs;
42266 struct sensor_device_template **t;
42267 int i, count;
42268diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42269index f2e47c7..45d7941 100644
42270--- a/drivers/hwmon/pmbus/pmbus_core.c
42271+++ b/drivers/hwmon/pmbus/pmbus_core.c
42272@@ -816,7 +816,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42273 return 0;
42274 }
42275
42276-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42277+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42278 const char *name,
42279 umode_t mode,
42280 ssize_t (*show)(struct device *dev,
42281@@ -833,7 +833,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42282 dev_attr->store = store;
42283 }
42284
42285-static void pmbus_attr_init(struct sensor_device_attribute *a,
42286+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42287 const char *name,
42288 umode_t mode,
42289 ssize_t (*show)(struct device *dev,
42290@@ -855,7 +855,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42291 u16 reg, u8 mask)
42292 {
42293 struct pmbus_boolean *boolean;
42294- struct sensor_device_attribute *a;
42295+ sensor_device_attribute_no_const *a;
42296
42297 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42298 if (!boolean)
42299@@ -880,7 +880,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42300 bool update, bool readonly)
42301 {
42302 struct pmbus_sensor *sensor;
42303- struct device_attribute *a;
42304+ device_attribute_no_const *a;
42305
42306 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42307 if (!sensor)
42308@@ -911,7 +911,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42309 const char *lstring, int index)
42310 {
42311 struct pmbus_label *label;
42312- struct device_attribute *a;
42313+ device_attribute_no_const *a;
42314
42315 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42316 if (!label)
42317diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42318index d4f0935..7420593 100644
42319--- a/drivers/hwmon/sht15.c
42320+++ b/drivers/hwmon/sht15.c
42321@@ -169,7 +169,7 @@ struct sht15_data {
42322 int supply_uv;
42323 bool supply_uv_valid;
42324 struct work_struct update_supply_work;
42325- atomic_t interrupt_handled;
42326+ atomic_unchecked_t interrupt_handled;
42327 };
42328
42329 /**
42330@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42331 ret = gpio_direction_input(data->pdata->gpio_data);
42332 if (ret)
42333 return ret;
42334- atomic_set(&data->interrupt_handled, 0);
42335+ atomic_set_unchecked(&data->interrupt_handled, 0);
42336
42337 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42338 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42339 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42340 /* Only relevant if the interrupt hasn't occurred. */
42341- if (!atomic_read(&data->interrupt_handled))
42342+ if (!atomic_read_unchecked(&data->interrupt_handled))
42343 schedule_work(&data->read_work);
42344 }
42345 ret = wait_event_timeout(data->wait_queue,
42346@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42347
42348 /* First disable the interrupt */
42349 disable_irq_nosync(irq);
42350- atomic_inc(&data->interrupt_handled);
42351+ atomic_inc_unchecked(&data->interrupt_handled);
42352 /* Then schedule a reading work struct */
42353 if (data->state != SHT15_READING_NOTHING)
42354 schedule_work(&data->read_work);
42355@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42356 * If not, then start the interrupt again - care here as could
42357 * have gone low in meantime so verify it hasn't!
42358 */
42359- atomic_set(&data->interrupt_handled, 0);
42360+ atomic_set_unchecked(&data->interrupt_handled, 0);
42361 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42362 /* If still not occurred or another handler was scheduled */
42363 if (gpio_get_value(data->pdata->gpio_data)
42364- || atomic_read(&data->interrupt_handled))
42365+ || atomic_read_unchecked(&data->interrupt_handled))
42366 return;
42367 }
42368
42369diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42370index ac91c07..8e69663 100644
42371--- a/drivers/hwmon/via-cputemp.c
42372+++ b/drivers/hwmon/via-cputemp.c
42373@@ -295,7 +295,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42374 return NOTIFY_OK;
42375 }
42376
42377-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42378+static struct notifier_block via_cputemp_cpu_notifier = {
42379 .notifier_call = via_cputemp_cpu_callback,
42380 };
42381
42382diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42383index 65e3240..e6c511d 100644
42384--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42385+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42386@@ -39,7 +39,7 @@
42387 extern struct i2c_adapter amd756_smbus;
42388
42389 static struct i2c_adapter *s4882_adapter;
42390-static struct i2c_algorithm *s4882_algo;
42391+static i2c_algorithm_no_const *s4882_algo;
42392
42393 /* Wrapper access functions for multiplexed SMBus */
42394 static DEFINE_MUTEX(amd756_lock);
42395diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42396index b19a310..d6eece0 100644
42397--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42398+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42399@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42400 /* usb layer */
42401
42402 /* Send command to device, and get response. */
42403-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42404+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42405 {
42406 int ret = 0;
42407 int actual;
42408diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42409index 88eda09..cf40434 100644
42410--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42411+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42412@@ -37,7 +37,7 @@
42413 extern struct i2c_adapter *nforce2_smbus;
42414
42415 static struct i2c_adapter *s4985_adapter;
42416-static struct i2c_algorithm *s4985_algo;
42417+static i2c_algorithm_no_const *s4985_algo;
42418
42419 /* Wrapper access functions for multiplexed SMBus */
42420 static DEFINE_MUTEX(nforce2_lock);
42421diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42422index 71c7a39..71dd3e0 100644
42423--- a/drivers/i2c/i2c-dev.c
42424+++ b/drivers/i2c/i2c-dev.c
42425@@ -272,7 +272,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42426 break;
42427 }
42428
42429- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
42430+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
42431 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
42432 if (IS_ERR(rdwr_pa[i].buf)) {
42433 res = PTR_ERR(rdwr_pa[i].buf);
42434diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
42435index 0b510ba..4fbb5085 100644
42436--- a/drivers/ide/ide-cd.c
42437+++ b/drivers/ide/ide-cd.c
42438@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
42439 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
42440 if ((unsigned long)buf & alignment
42441 || blk_rq_bytes(rq) & q->dma_pad_mask
42442- || object_is_on_stack(buf))
42443+ || object_starts_on_stack(buf))
42444 drive->dma = 0;
42445 }
42446 }
42447diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
42448index f009d05..d95b613 100644
42449--- a/drivers/iio/industrialio-core.c
42450+++ b/drivers/iio/industrialio-core.c
42451@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
42452 }
42453
42454 static
42455-int __iio_device_attr_init(struct device_attribute *dev_attr,
42456+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
42457 const char *postfix,
42458 struct iio_chan_spec const *chan,
42459 ssize_t (*readfunc)(struct device *dev,
42460diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
42461index e28a494..f7c2671 100644
42462--- a/drivers/infiniband/core/cm.c
42463+++ b/drivers/infiniband/core/cm.c
42464@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
42465
42466 struct cm_counter_group {
42467 struct kobject obj;
42468- atomic_long_t counter[CM_ATTR_COUNT];
42469+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
42470 };
42471
42472 struct cm_counter_attribute {
42473@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
42474 struct ib_mad_send_buf *msg = NULL;
42475 int ret;
42476
42477- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42478+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42479 counter[CM_REQ_COUNTER]);
42480
42481 /* Quick state check to discard duplicate REQs. */
42482@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
42483 if (!cm_id_priv)
42484 return;
42485
42486- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42487+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42488 counter[CM_REP_COUNTER]);
42489 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
42490 if (ret)
42491@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
42492 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
42493 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
42494 spin_unlock_irq(&cm_id_priv->lock);
42495- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42496+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42497 counter[CM_RTU_COUNTER]);
42498 goto out;
42499 }
42500@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
42501 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
42502 dreq_msg->local_comm_id);
42503 if (!cm_id_priv) {
42504- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42505+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42506 counter[CM_DREQ_COUNTER]);
42507 cm_issue_drep(work->port, work->mad_recv_wc);
42508 return -EINVAL;
42509@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
42510 case IB_CM_MRA_REP_RCVD:
42511 break;
42512 case IB_CM_TIMEWAIT:
42513- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42514+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42515 counter[CM_DREQ_COUNTER]);
42516 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42517 goto unlock;
42518@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
42519 cm_free_msg(msg);
42520 goto deref;
42521 case IB_CM_DREQ_RCVD:
42522- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42523+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42524 counter[CM_DREQ_COUNTER]);
42525 goto unlock;
42526 default:
42527@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
42528 ib_modify_mad(cm_id_priv->av.port->mad_agent,
42529 cm_id_priv->msg, timeout)) {
42530 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
42531- atomic_long_inc(&work->port->
42532+ atomic_long_inc_unchecked(&work->port->
42533 counter_group[CM_RECV_DUPLICATES].
42534 counter[CM_MRA_COUNTER]);
42535 goto out;
42536@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
42537 break;
42538 case IB_CM_MRA_REQ_RCVD:
42539 case IB_CM_MRA_REP_RCVD:
42540- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42541+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42542 counter[CM_MRA_COUNTER]);
42543 /* fall through */
42544 default:
42545@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
42546 case IB_CM_LAP_IDLE:
42547 break;
42548 case IB_CM_MRA_LAP_SENT:
42549- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42550+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42551 counter[CM_LAP_COUNTER]);
42552 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
42553 goto unlock;
42554@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
42555 cm_free_msg(msg);
42556 goto deref;
42557 case IB_CM_LAP_RCVD:
42558- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42559+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42560 counter[CM_LAP_COUNTER]);
42561 goto unlock;
42562 default:
42563@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
42564 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
42565 if (cur_cm_id_priv) {
42566 spin_unlock_irq(&cm.lock);
42567- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
42568+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
42569 counter[CM_SIDR_REQ_COUNTER]);
42570 goto out; /* Duplicate message. */
42571 }
42572@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
42573 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
42574 msg->retries = 1;
42575
42576- atomic_long_add(1 + msg->retries,
42577+ atomic_long_add_unchecked(1 + msg->retries,
42578 &port->counter_group[CM_XMIT].counter[attr_index]);
42579 if (msg->retries)
42580- atomic_long_add(msg->retries,
42581+ atomic_long_add_unchecked(msg->retries,
42582 &port->counter_group[CM_XMIT_RETRIES].
42583 counter[attr_index]);
42584
42585@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
42586 }
42587
42588 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
42589- atomic_long_inc(&port->counter_group[CM_RECV].
42590+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
42591 counter[attr_id - CM_ATTR_ID_OFFSET]);
42592
42593 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
42594@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
42595 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
42596
42597 return sprintf(buf, "%ld\n",
42598- atomic_long_read(&group->counter[cm_attr->index]));
42599+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
42600 }
42601
42602 static const struct sysfs_ops cm_counter_ops = {
42603diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
42604index 9f5ad7c..588cd84 100644
42605--- a/drivers/infiniband/core/fmr_pool.c
42606+++ b/drivers/infiniband/core/fmr_pool.c
42607@@ -98,8 +98,8 @@ struct ib_fmr_pool {
42608
42609 struct task_struct *thread;
42610
42611- atomic_t req_ser;
42612- atomic_t flush_ser;
42613+ atomic_unchecked_t req_ser;
42614+ atomic_unchecked_t flush_ser;
42615
42616 wait_queue_head_t force_wait;
42617 };
42618@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42619 struct ib_fmr_pool *pool = pool_ptr;
42620
42621 do {
42622- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
42623+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
42624 ib_fmr_batch_release(pool);
42625
42626- atomic_inc(&pool->flush_ser);
42627+ atomic_inc_unchecked(&pool->flush_ser);
42628 wake_up_interruptible(&pool->force_wait);
42629
42630 if (pool->flush_function)
42631@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
42632 }
42633
42634 set_current_state(TASK_INTERRUPTIBLE);
42635- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
42636+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
42637 !kthread_should_stop())
42638 schedule();
42639 __set_current_state(TASK_RUNNING);
42640@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
42641 pool->dirty_watermark = params->dirty_watermark;
42642 pool->dirty_len = 0;
42643 spin_lock_init(&pool->pool_lock);
42644- atomic_set(&pool->req_ser, 0);
42645- atomic_set(&pool->flush_ser, 0);
42646+ atomic_set_unchecked(&pool->req_ser, 0);
42647+ atomic_set_unchecked(&pool->flush_ser, 0);
42648 init_waitqueue_head(&pool->force_wait);
42649
42650 pool->thread = kthread_run(ib_fmr_cleanup_thread,
42651@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
42652 }
42653 spin_unlock_irq(&pool->pool_lock);
42654
42655- serial = atomic_inc_return(&pool->req_ser);
42656+ serial = atomic_inc_return_unchecked(&pool->req_ser);
42657 wake_up_process(pool->thread);
42658
42659 if (wait_event_interruptible(pool->force_wait,
42660- atomic_read(&pool->flush_ser) - serial >= 0))
42661+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
42662 return -EINTR;
42663
42664 return 0;
42665@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
42666 } else {
42667 list_add_tail(&fmr->list, &pool->dirty_list);
42668 if (++pool->dirty_len >= pool->dirty_watermark) {
42669- atomic_inc(&pool->req_ser);
42670+ atomic_inc_unchecked(&pool->req_ser);
42671 wake_up_process(pool->thread);
42672 }
42673 }
42674diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
42675index 6c52e72..6303e3f 100644
42676--- a/drivers/infiniband/core/uverbs_cmd.c
42677+++ b/drivers/infiniband/core/uverbs_cmd.c
42678@@ -945,6 +945,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
42679 if (copy_from_user(&cmd, buf, sizeof cmd))
42680 return -EFAULT;
42681
42682+ if (!access_ok_noprefault(VERIFY_READ, cmd.start, cmd.length))
42683+ return -EFAULT;
42684+
42685 INIT_UDATA(&udata, buf + sizeof cmd,
42686 (unsigned long) cmd.response + sizeof resp,
42687 in_len - sizeof cmd, out_len - sizeof resp);
42688diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
42689index cb43c22..2e12dd7 100644
42690--- a/drivers/infiniband/hw/cxgb4/mem.c
42691+++ b/drivers/infiniband/hw/cxgb4/mem.c
42692@@ -256,7 +256,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42693 int err;
42694 struct fw_ri_tpte tpt;
42695 u32 stag_idx;
42696- static atomic_t key;
42697+ static atomic_unchecked_t key;
42698
42699 if (c4iw_fatal_error(rdev))
42700 return -EIO;
42701@@ -277,7 +277,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
42702 if (rdev->stats.stag.cur > rdev->stats.stag.max)
42703 rdev->stats.stag.max = rdev->stats.stag.cur;
42704 mutex_unlock(&rdev->stats.lock);
42705- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
42706+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
42707 }
42708 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
42709 __func__, stag_state, type, pdid, stag_idx);
42710diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
42711index 79b3dbc..96e5fcc 100644
42712--- a/drivers/infiniband/hw/ipath/ipath_rc.c
42713+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
42714@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42715 struct ib_atomic_eth *ateth;
42716 struct ipath_ack_entry *e;
42717 u64 vaddr;
42718- atomic64_t *maddr;
42719+ atomic64_unchecked_t *maddr;
42720 u64 sdata;
42721 u32 rkey;
42722 u8 next;
42723@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
42724 IB_ACCESS_REMOTE_ATOMIC)))
42725 goto nack_acc_unlck;
42726 /* Perform atomic OP and save result. */
42727- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42728+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42729 sdata = be64_to_cpu(ateth->swap_data);
42730 e = &qp->s_ack_queue[qp->r_head_ack_queue];
42731 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
42732- (u64) atomic64_add_return(sdata, maddr) - sdata :
42733+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42734 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42735 be64_to_cpu(ateth->compare_data),
42736 sdata);
42737diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
42738index 1f95bba..9530f87 100644
42739--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
42740+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
42741@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
42742 unsigned long flags;
42743 struct ib_wc wc;
42744 u64 sdata;
42745- atomic64_t *maddr;
42746+ atomic64_unchecked_t *maddr;
42747 enum ib_wc_status send_status;
42748
42749 /*
42750@@ -382,11 +382,11 @@ again:
42751 IB_ACCESS_REMOTE_ATOMIC)))
42752 goto acc_err;
42753 /* Perform atomic OP and save result. */
42754- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
42755+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
42756 sdata = wqe->wr.wr.atomic.compare_add;
42757 *(u64 *) sqp->s_sge.sge.vaddr =
42758 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
42759- (u64) atomic64_add_return(sdata, maddr) - sdata :
42760+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
42761 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
42762 sdata, wqe->wr.wr.atomic.swap);
42763 goto send_comp;
42764diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
42765index 729382c..2f82b8d 100644
42766--- a/drivers/infiniband/hw/mlx4/mad.c
42767+++ b/drivers/infiniband/hw/mlx4/mad.c
42768@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
42769
42770 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
42771 {
42772- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
42773+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
42774 cpu_to_be64(0xff00000000000000LL);
42775 }
42776
42777diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
42778index ed327e6..ca1739e0 100644
42779--- a/drivers/infiniband/hw/mlx4/mcg.c
42780+++ b/drivers/infiniband/hw/mlx4/mcg.c
42781@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
42782 {
42783 char name[20];
42784
42785- atomic_set(&ctx->tid, 0);
42786+ atomic_set_unchecked(&ctx->tid, 0);
42787 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
42788 ctx->mcg_wq = create_singlethread_workqueue(name);
42789 if (!ctx->mcg_wq)
42790diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42791index 6eb743f..a7b0f6d 100644
42792--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
42793+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
42794@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
42795 struct list_head mcg_mgid0_list;
42796 struct workqueue_struct *mcg_wq;
42797 struct mlx4_ib_demux_pv_ctx **tun;
42798- atomic_t tid;
42799+ atomic_unchecked_t tid;
42800 int flushing; /* flushing the work queue */
42801 };
42802
42803diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
42804index 9d3e5c1..6f166df 100644
42805--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
42806+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
42807@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
42808 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
42809 }
42810
42811-int mthca_QUERY_FW(struct mthca_dev *dev)
42812+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
42813 {
42814 struct mthca_mailbox *mailbox;
42815 u32 *outbox;
42816@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42817 CMD_TIME_CLASS_B);
42818 }
42819
42820-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42821+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42822 int num_mtt)
42823 {
42824 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
42825@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
42826 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
42827 }
42828
42829-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42830+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
42831 int eq_num)
42832 {
42833 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
42834@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
42835 CMD_TIME_CLASS_B);
42836 }
42837
42838-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42839+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
42840 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
42841 void *in_mad, void *response_mad)
42842 {
42843diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
42844index ded76c1..0cf0a08 100644
42845--- a/drivers/infiniband/hw/mthca/mthca_main.c
42846+++ b/drivers/infiniband/hw/mthca/mthca_main.c
42847@@ -692,7 +692,7 @@ err_close:
42848 return err;
42849 }
42850
42851-static int mthca_setup_hca(struct mthca_dev *dev)
42852+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
42853 {
42854 int err;
42855
42856diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
42857index ed9a989..6aa5dc2 100644
42858--- a/drivers/infiniband/hw/mthca/mthca_mr.c
42859+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
42860@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
42861 * through the bitmaps)
42862 */
42863
42864-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42865+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
42866 {
42867 int o;
42868 int m;
42869@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
42870 return key;
42871 }
42872
42873-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42874+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
42875 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
42876 {
42877 struct mthca_mailbox *mailbox;
42878@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
42879 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
42880 }
42881
42882-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42883+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
42884 u64 *buffer_list, int buffer_size_shift,
42885 int list_len, u64 iova, u64 total_size,
42886 u32 access, struct mthca_mr *mr)
42887diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
42888index 415f8e1..e34214e 100644
42889--- a/drivers/infiniband/hw/mthca/mthca_provider.c
42890+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
42891@@ -764,7 +764,7 @@ unlock:
42892 return 0;
42893 }
42894
42895-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42896+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
42897 {
42898 struct mthca_dev *dev = to_mdev(ibcq->device);
42899 struct mthca_cq *cq = to_mcq(ibcq);
42900diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
42901index 3b2a6dc..bce26ff 100644
42902--- a/drivers/infiniband/hw/nes/nes.c
42903+++ b/drivers/infiniband/hw/nes/nes.c
42904@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
42905 LIST_HEAD(nes_adapter_list);
42906 static LIST_HEAD(nes_dev_list);
42907
42908-atomic_t qps_destroyed;
42909+atomic_unchecked_t qps_destroyed;
42910
42911 static unsigned int ee_flsh_adapter;
42912 static unsigned int sysfs_nonidx_addr;
42913@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
42914 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
42915 struct nes_adapter *nesadapter = nesdev->nesadapter;
42916
42917- atomic_inc(&qps_destroyed);
42918+ atomic_inc_unchecked(&qps_destroyed);
42919
42920 /* Free the control structures */
42921
42922diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
42923index bd9d132..70d84f4 100644
42924--- a/drivers/infiniband/hw/nes/nes.h
42925+++ b/drivers/infiniband/hw/nes/nes.h
42926@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
42927 extern unsigned int wqm_quanta;
42928 extern struct list_head nes_adapter_list;
42929
42930-extern atomic_t cm_connects;
42931-extern atomic_t cm_accepts;
42932-extern atomic_t cm_disconnects;
42933-extern atomic_t cm_closes;
42934-extern atomic_t cm_connecteds;
42935-extern atomic_t cm_connect_reqs;
42936-extern atomic_t cm_rejects;
42937-extern atomic_t mod_qp_timouts;
42938-extern atomic_t qps_created;
42939-extern atomic_t qps_destroyed;
42940-extern atomic_t sw_qps_destroyed;
42941+extern atomic_unchecked_t cm_connects;
42942+extern atomic_unchecked_t cm_accepts;
42943+extern atomic_unchecked_t cm_disconnects;
42944+extern atomic_unchecked_t cm_closes;
42945+extern atomic_unchecked_t cm_connecteds;
42946+extern atomic_unchecked_t cm_connect_reqs;
42947+extern atomic_unchecked_t cm_rejects;
42948+extern atomic_unchecked_t mod_qp_timouts;
42949+extern atomic_unchecked_t qps_created;
42950+extern atomic_unchecked_t qps_destroyed;
42951+extern atomic_unchecked_t sw_qps_destroyed;
42952 extern u32 mh_detected;
42953 extern u32 mh_pauses_sent;
42954 extern u32 cm_packets_sent;
42955@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
42956 extern u32 cm_packets_received;
42957 extern u32 cm_packets_dropped;
42958 extern u32 cm_packets_retrans;
42959-extern atomic_t cm_listens_created;
42960-extern atomic_t cm_listens_destroyed;
42961+extern atomic_unchecked_t cm_listens_created;
42962+extern atomic_unchecked_t cm_listens_destroyed;
42963 extern u32 cm_backlog_drops;
42964-extern atomic_t cm_loopbacks;
42965-extern atomic_t cm_nodes_created;
42966-extern atomic_t cm_nodes_destroyed;
42967-extern atomic_t cm_accel_dropped_pkts;
42968-extern atomic_t cm_resets_recvd;
42969-extern atomic_t pau_qps_created;
42970-extern atomic_t pau_qps_destroyed;
42971+extern atomic_unchecked_t cm_loopbacks;
42972+extern atomic_unchecked_t cm_nodes_created;
42973+extern atomic_unchecked_t cm_nodes_destroyed;
42974+extern atomic_unchecked_t cm_accel_dropped_pkts;
42975+extern atomic_unchecked_t cm_resets_recvd;
42976+extern atomic_unchecked_t pau_qps_created;
42977+extern atomic_unchecked_t pau_qps_destroyed;
42978
42979 extern u32 int_mod_timer_init;
42980 extern u32 int_mod_cq_depth_256;
42981diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
42982index 6f09a72..cf4399d 100644
42983--- a/drivers/infiniband/hw/nes/nes_cm.c
42984+++ b/drivers/infiniband/hw/nes/nes_cm.c
42985@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
42986 u32 cm_packets_retrans;
42987 u32 cm_packets_created;
42988 u32 cm_packets_received;
42989-atomic_t cm_listens_created;
42990-atomic_t cm_listens_destroyed;
42991+atomic_unchecked_t cm_listens_created;
42992+atomic_unchecked_t cm_listens_destroyed;
42993 u32 cm_backlog_drops;
42994-atomic_t cm_loopbacks;
42995-atomic_t cm_nodes_created;
42996-atomic_t cm_nodes_destroyed;
42997-atomic_t cm_accel_dropped_pkts;
42998-atomic_t cm_resets_recvd;
42999+atomic_unchecked_t cm_loopbacks;
43000+atomic_unchecked_t cm_nodes_created;
43001+atomic_unchecked_t cm_nodes_destroyed;
43002+atomic_unchecked_t cm_accel_dropped_pkts;
43003+atomic_unchecked_t cm_resets_recvd;
43004
43005 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43006 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43007@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43008 /* instance of function pointers for client API */
43009 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43010 static struct nes_cm_ops nes_cm_api = {
43011- mini_cm_accelerated,
43012- mini_cm_listen,
43013- mini_cm_del_listen,
43014- mini_cm_connect,
43015- mini_cm_close,
43016- mini_cm_accept,
43017- mini_cm_reject,
43018- mini_cm_recv_pkt,
43019- mini_cm_dealloc_core,
43020- mini_cm_get,
43021- mini_cm_set
43022+ .accelerated = mini_cm_accelerated,
43023+ .listen = mini_cm_listen,
43024+ .stop_listener = mini_cm_del_listen,
43025+ .connect = mini_cm_connect,
43026+ .close = mini_cm_close,
43027+ .accept = mini_cm_accept,
43028+ .reject = mini_cm_reject,
43029+ .recv_pkt = mini_cm_recv_pkt,
43030+ .destroy_cm_core = mini_cm_dealloc_core,
43031+ .get = mini_cm_get,
43032+ .set = mini_cm_set
43033 };
43034
43035 static struct nes_cm_core *g_cm_core;
43036
43037-atomic_t cm_connects;
43038-atomic_t cm_accepts;
43039-atomic_t cm_disconnects;
43040-atomic_t cm_closes;
43041-atomic_t cm_connecteds;
43042-atomic_t cm_connect_reqs;
43043-atomic_t cm_rejects;
43044+atomic_unchecked_t cm_connects;
43045+atomic_unchecked_t cm_accepts;
43046+atomic_unchecked_t cm_disconnects;
43047+atomic_unchecked_t cm_closes;
43048+atomic_unchecked_t cm_connecteds;
43049+atomic_unchecked_t cm_connect_reqs;
43050+atomic_unchecked_t cm_rejects;
43051
43052 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43053 {
43054@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43055 kfree(listener);
43056 listener = NULL;
43057 ret = 0;
43058- atomic_inc(&cm_listens_destroyed);
43059+ atomic_inc_unchecked(&cm_listens_destroyed);
43060 } else {
43061 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43062 }
43063@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43064 cm_node->rem_mac);
43065
43066 add_hte_node(cm_core, cm_node);
43067- atomic_inc(&cm_nodes_created);
43068+ atomic_inc_unchecked(&cm_nodes_created);
43069
43070 return cm_node;
43071 }
43072@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43073 }
43074
43075 atomic_dec(&cm_core->node_cnt);
43076- atomic_inc(&cm_nodes_destroyed);
43077+ atomic_inc_unchecked(&cm_nodes_destroyed);
43078 nesqp = cm_node->nesqp;
43079 if (nesqp) {
43080 nesqp->cm_node = NULL;
43081@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43082
43083 static void drop_packet(struct sk_buff *skb)
43084 {
43085- atomic_inc(&cm_accel_dropped_pkts);
43086+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43087 dev_kfree_skb_any(skb);
43088 }
43089
43090@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43091 {
43092
43093 int reset = 0; /* whether to send reset in case of err.. */
43094- atomic_inc(&cm_resets_recvd);
43095+ atomic_inc_unchecked(&cm_resets_recvd);
43096 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43097 " refcnt=%d\n", cm_node, cm_node->state,
43098 atomic_read(&cm_node->ref_count));
43099@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43100 rem_ref_cm_node(cm_node->cm_core, cm_node);
43101 return NULL;
43102 }
43103- atomic_inc(&cm_loopbacks);
43104+ atomic_inc_unchecked(&cm_loopbacks);
43105 loopbackremotenode->loopbackpartner = cm_node;
43106 loopbackremotenode->tcp_cntxt.rcv_wscale =
43107 NES_CM_DEFAULT_RCV_WND_SCALE;
43108@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43109 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43110 else {
43111 rem_ref_cm_node(cm_core, cm_node);
43112- atomic_inc(&cm_accel_dropped_pkts);
43113+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43114 dev_kfree_skb_any(skb);
43115 }
43116 break;
43117@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43118
43119 if ((cm_id) && (cm_id->event_handler)) {
43120 if (issue_disconn) {
43121- atomic_inc(&cm_disconnects);
43122+ atomic_inc_unchecked(&cm_disconnects);
43123 cm_event.event = IW_CM_EVENT_DISCONNECT;
43124 cm_event.status = disconn_status;
43125 cm_event.local_addr = cm_id->local_addr;
43126@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43127 }
43128
43129 if (issue_close) {
43130- atomic_inc(&cm_closes);
43131+ atomic_inc_unchecked(&cm_closes);
43132 nes_disconnect(nesqp, 1);
43133
43134 cm_id->provider_data = nesqp;
43135@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43136
43137 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43138 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43139- atomic_inc(&cm_accepts);
43140+ atomic_inc_unchecked(&cm_accepts);
43141
43142 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43143 netdev_refcnt_read(nesvnic->netdev));
43144@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43145 struct nes_cm_core *cm_core;
43146 u8 *start_buff;
43147
43148- atomic_inc(&cm_rejects);
43149+ atomic_inc_unchecked(&cm_rejects);
43150 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43151 loopback = cm_node->loopbackpartner;
43152 cm_core = cm_node->cm_core;
43153@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43154 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43155 ntohs(laddr->sin_port));
43156
43157- atomic_inc(&cm_connects);
43158+ atomic_inc_unchecked(&cm_connects);
43159 nesqp->active_conn = 1;
43160
43161 /* cache the cm_id in the qp */
43162@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43163 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43164 return err;
43165 }
43166- atomic_inc(&cm_listens_created);
43167+ atomic_inc_unchecked(&cm_listens_created);
43168 }
43169
43170 cm_id->add_ref(cm_id);
43171@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43172
43173 if (nesqp->destroyed)
43174 return;
43175- atomic_inc(&cm_connecteds);
43176+ atomic_inc_unchecked(&cm_connecteds);
43177 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43178 " local port 0x%04X. jiffies = %lu.\n",
43179 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43180@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43181
43182 cm_id->add_ref(cm_id);
43183 ret = cm_id->event_handler(cm_id, &cm_event);
43184- atomic_inc(&cm_closes);
43185+ atomic_inc_unchecked(&cm_closes);
43186 cm_event.event = IW_CM_EVENT_CLOSE;
43187 cm_event.status = 0;
43188 cm_event.provider_data = cm_id->provider_data;
43189@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43190 return;
43191 cm_id = cm_node->cm_id;
43192
43193- atomic_inc(&cm_connect_reqs);
43194+ atomic_inc_unchecked(&cm_connect_reqs);
43195 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43196 cm_node, cm_id, jiffies);
43197
43198@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43199 return;
43200 cm_id = cm_node->cm_id;
43201
43202- atomic_inc(&cm_connect_reqs);
43203+ atomic_inc_unchecked(&cm_connect_reqs);
43204 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43205 cm_node, cm_id, jiffies);
43206
43207diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43208index 4166452..fc952c3 100644
43209--- a/drivers/infiniband/hw/nes/nes_mgt.c
43210+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43211@@ -40,8 +40,8 @@
43212 #include "nes.h"
43213 #include "nes_mgt.h"
43214
43215-atomic_t pau_qps_created;
43216-atomic_t pau_qps_destroyed;
43217+atomic_unchecked_t pau_qps_created;
43218+atomic_unchecked_t pau_qps_destroyed;
43219
43220 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43221 {
43222@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43223 {
43224 struct sk_buff *skb;
43225 unsigned long flags;
43226- atomic_inc(&pau_qps_destroyed);
43227+ atomic_inc_unchecked(&pau_qps_destroyed);
43228
43229 /* Free packets that have not yet been forwarded */
43230 /* Lock is acquired by skb_dequeue when removing the skb */
43231@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43232 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43233 skb_queue_head_init(&nesqp->pau_list);
43234 spin_lock_init(&nesqp->pau_lock);
43235- atomic_inc(&pau_qps_created);
43236+ atomic_inc_unchecked(&pau_qps_created);
43237 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43238 }
43239
43240diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43241index 49eb511..a774366 100644
43242--- a/drivers/infiniband/hw/nes/nes_nic.c
43243+++ b/drivers/infiniband/hw/nes/nes_nic.c
43244@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43245 target_stat_values[++index] = mh_detected;
43246 target_stat_values[++index] = mh_pauses_sent;
43247 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43248- target_stat_values[++index] = atomic_read(&cm_connects);
43249- target_stat_values[++index] = atomic_read(&cm_accepts);
43250- target_stat_values[++index] = atomic_read(&cm_disconnects);
43251- target_stat_values[++index] = atomic_read(&cm_connecteds);
43252- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43253- target_stat_values[++index] = atomic_read(&cm_rejects);
43254- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43255- target_stat_values[++index] = atomic_read(&qps_created);
43256- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43257- target_stat_values[++index] = atomic_read(&qps_destroyed);
43258- target_stat_values[++index] = atomic_read(&cm_closes);
43259+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43260+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43261+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43262+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43263+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43264+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43265+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43266+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43267+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43268+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43269+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43270 target_stat_values[++index] = cm_packets_sent;
43271 target_stat_values[++index] = cm_packets_bounced;
43272 target_stat_values[++index] = cm_packets_created;
43273 target_stat_values[++index] = cm_packets_received;
43274 target_stat_values[++index] = cm_packets_dropped;
43275 target_stat_values[++index] = cm_packets_retrans;
43276- target_stat_values[++index] = atomic_read(&cm_listens_created);
43277- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43278+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43279+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43280 target_stat_values[++index] = cm_backlog_drops;
43281- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43282- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43283- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43284- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43285- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43286+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43287+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43288+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43289+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43290+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43291 target_stat_values[++index] = nesadapter->free_4kpbl;
43292 target_stat_values[++index] = nesadapter->free_256pbl;
43293 target_stat_values[++index] = int_mod_timer_init;
43294 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43295 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43296 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43297- target_stat_values[++index] = atomic_read(&pau_qps_created);
43298- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43299+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43300+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43301 }
43302
43303 /**
43304diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43305index c0d0296..3185f57 100644
43306--- a/drivers/infiniband/hw/nes/nes_verbs.c
43307+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43308@@ -46,9 +46,9 @@
43309
43310 #include <rdma/ib_umem.h>
43311
43312-atomic_t mod_qp_timouts;
43313-atomic_t qps_created;
43314-atomic_t sw_qps_destroyed;
43315+atomic_unchecked_t mod_qp_timouts;
43316+atomic_unchecked_t qps_created;
43317+atomic_unchecked_t sw_qps_destroyed;
43318
43319 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43320
43321@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43322 if (init_attr->create_flags)
43323 return ERR_PTR(-EINVAL);
43324
43325- atomic_inc(&qps_created);
43326+ atomic_inc_unchecked(&qps_created);
43327 switch (init_attr->qp_type) {
43328 case IB_QPT_RC:
43329 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43330@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43331 struct iw_cm_event cm_event;
43332 int ret = 0;
43333
43334- atomic_inc(&sw_qps_destroyed);
43335+ atomic_inc_unchecked(&sw_qps_destroyed);
43336 nesqp->destroyed = 1;
43337
43338 /* Blow away the connection if it exists. */
43339diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43340index b218254..1d1aa3c 100644
43341--- a/drivers/infiniband/hw/qib/qib.h
43342+++ b/drivers/infiniband/hw/qib/qib.h
43343@@ -52,6 +52,7 @@
43344 #include <linux/kref.h>
43345 #include <linux/sched.h>
43346 #include <linux/kthread.h>
43347+#include <linux/slab.h>
43348
43349 #include "qib_common.h"
43350 #include "qib_verbs.h"
43351diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43352index cdc7df4..a2fdfdb 100644
43353--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43354+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
43355@@ -156,7 +156,7 @@ static size_t ipoib_get_size(const struct net_device *dev)
43356 nla_total_size(2); /* IFLA_IPOIB_UMCAST */
43357 }
43358
43359-static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
43360+static struct rtnl_link_ops ipoib_link_ops = {
43361 .kind = "ipoib",
43362 .maxtype = IFLA_IPOIB_MAX,
43363 .policy = ipoib_policy,
43364diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43365index e853a21..56fc5a8 100644
43366--- a/drivers/input/gameport/gameport.c
43367+++ b/drivers/input/gameport/gameport.c
43368@@ -527,14 +527,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43369 */
43370 static void gameport_init_port(struct gameport *gameport)
43371 {
43372- static atomic_t gameport_no = ATOMIC_INIT(-1);
43373+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(-1);
43374
43375 __module_get(THIS_MODULE);
43376
43377 mutex_init(&gameport->drv_mutex);
43378 device_initialize(&gameport->dev);
43379 dev_set_name(&gameport->dev, "gameport%lu",
43380- (unsigned long)atomic_inc_return(&gameport_no));
43381+ (unsigned long)atomic_inc_return_unchecked(&gameport_no));
43382 gameport->dev.bus = &gameport_bus;
43383 gameport->dev.release = gameport_release_port;
43384 if (gameport->parent)
43385diff --git a/drivers/input/input.c b/drivers/input/input.c
43386index 213e3a1..4fea837 100644
43387--- a/drivers/input/input.c
43388+++ b/drivers/input/input.c
43389@@ -1775,7 +1775,7 @@ EXPORT_SYMBOL_GPL(input_class);
43390 */
43391 struct input_dev *input_allocate_device(void)
43392 {
43393- static atomic_t input_no = ATOMIC_INIT(-1);
43394+ static atomic_unchecked_t input_no = ATOMIC_INIT(-1);
43395 struct input_dev *dev;
43396
43397 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43398@@ -1790,7 +1790,7 @@ struct input_dev *input_allocate_device(void)
43399 INIT_LIST_HEAD(&dev->node);
43400
43401 dev_set_name(&dev->dev, "input%lu",
43402- (unsigned long)atomic_inc_return(&input_no));
43403+ (unsigned long)atomic_inc_return_unchecked(&input_no));
43404
43405 __module_get(THIS_MODULE);
43406 }
43407diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43408index 4a95b22..874c182 100644
43409--- a/drivers/input/joystick/sidewinder.c
43410+++ b/drivers/input/joystick/sidewinder.c
43411@@ -30,6 +30,7 @@
43412 #include <linux/kernel.h>
43413 #include <linux/module.h>
43414 #include <linux/slab.h>
43415+#include <linux/sched.h>
43416 #include <linux/input.h>
43417 #include <linux/gameport.h>
43418 #include <linux/jiffies.h>
43419diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43420index 3aa2f3f..53c00ea 100644
43421--- a/drivers/input/joystick/xpad.c
43422+++ b/drivers/input/joystick/xpad.c
43423@@ -886,7 +886,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43424
43425 static int xpad_led_probe(struct usb_xpad *xpad)
43426 {
43427- static atomic_t led_seq = ATOMIC_INIT(-1);
43428+ static atomic_unchecked_t led_seq = ATOMIC_INIT(-1);
43429 unsigned long led_no;
43430 struct xpad_led *led;
43431 struct led_classdev *led_cdev;
43432@@ -899,7 +899,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43433 if (!led)
43434 return -ENOMEM;
43435
43436- led_no = atomic_inc_return(&led_seq);
43437+ led_no = atomic_inc_return_unchecked(&led_seq);
43438
43439 snprintf(led->name, sizeof(led->name), "xpad%lu", led_no);
43440 led->xpad = xpad;
43441diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
43442index ac1fa5f..5f7502c 100644
43443--- a/drivers/input/misc/ims-pcu.c
43444+++ b/drivers/input/misc/ims-pcu.c
43445@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
43446
43447 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43448 {
43449- static atomic_t device_no = ATOMIC_INIT(-1);
43450+ static atomic_unchecked_t device_no = ATOMIC_INIT(-1);
43451
43452 const struct ims_pcu_device_info *info;
43453 int error;
43454@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
43455 }
43456
43457 /* Device appears to be operable, complete initialization */
43458- pcu->device_no = atomic_inc_return(&device_no);
43459+ pcu->device_no = atomic_inc_return_unchecked(&device_no);
43460
43461 /*
43462 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
43463diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
43464index f4cf664..3204fda 100644
43465--- a/drivers/input/mouse/psmouse.h
43466+++ b/drivers/input/mouse/psmouse.h
43467@@ -117,7 +117,7 @@ struct psmouse_attribute {
43468 ssize_t (*set)(struct psmouse *psmouse, void *data,
43469 const char *buf, size_t count);
43470 bool protect;
43471-};
43472+} __do_const;
43473 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
43474
43475 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
43476diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
43477index b604564..3f14ae4 100644
43478--- a/drivers/input/mousedev.c
43479+++ b/drivers/input/mousedev.c
43480@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
43481
43482 spin_unlock_irq(&client->packet_lock);
43483
43484- if (copy_to_user(buffer, data, count))
43485+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
43486 return -EFAULT;
43487
43488 return count;
43489diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
43490index a05a517..323a2fd 100644
43491--- a/drivers/input/serio/serio.c
43492+++ b/drivers/input/serio/serio.c
43493@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
43494 */
43495 static void serio_init_port(struct serio *serio)
43496 {
43497- static atomic_t serio_no = ATOMIC_INIT(-1);
43498+ static atomic_unchecked_t serio_no = ATOMIC_INIT(-1);
43499
43500 __module_get(THIS_MODULE);
43501
43502@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
43503 mutex_init(&serio->drv_mutex);
43504 device_initialize(&serio->dev);
43505 dev_set_name(&serio->dev, "serio%lu",
43506- (unsigned long)atomic_inc_return(&serio_no));
43507+ (unsigned long)atomic_inc_return_unchecked(&serio_no));
43508 serio->dev.bus = &serio_bus;
43509 serio->dev.release = serio_release_port;
43510 serio->dev.groups = serio_device_attr_groups;
43511diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
43512index 71ef5d6..93380a9 100644
43513--- a/drivers/input/serio/serio_raw.c
43514+++ b/drivers/input/serio/serio_raw.c
43515@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
43516
43517 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43518 {
43519- static atomic_t serio_raw_no = ATOMIC_INIT(-1);
43520+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(-1);
43521 struct serio_raw *serio_raw;
43522 int err;
43523
43524@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
43525 }
43526
43527 snprintf(serio_raw->name, sizeof(serio_raw->name),
43528- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no));
43529+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no));
43530 kref_init(&serio_raw->kref);
43531 INIT_LIST_HEAD(&serio_raw->client_list);
43532 init_waitqueue_head(&serio_raw->wait);
43533diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
43534index 9802485..2e9941d 100644
43535--- a/drivers/iommu/amd_iommu.c
43536+++ b/drivers/iommu/amd_iommu.c
43537@@ -823,11 +823,21 @@ static void copy_cmd_to_buffer(struct amd_iommu *iommu,
43538
43539 static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
43540 {
43541+ phys_addr_t physaddr;
43542 WARN_ON(address & 0x7ULL);
43543
43544 memset(cmd, 0, sizeof(*cmd));
43545- cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
43546- cmd->data[1] = upper_32_bits(__pa(address));
43547+
43548+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
43549+ if (object_starts_on_stack((void *)address)) {
43550+ void *adjbuf = (void *)address - current->stack + current->lowmem_stack;
43551+ physaddr = __pa((u64)adjbuf);
43552+ } else
43553+#endif
43554+ physaddr = __pa(address);
43555+
43556+ cmd->data[0] = lower_32_bits(physaddr) | CMD_COMPL_WAIT_STORE_MASK;
43557+ cmd->data[1] = upper_32_bits(physaddr);
43558 cmd->data[2] = 1;
43559 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
43560 }
43561diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
43562index 6cd47b7..264d14a 100644
43563--- a/drivers/iommu/arm-smmu.c
43564+++ b/drivers/iommu/arm-smmu.c
43565@@ -968,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
43566 cfg->irptndx = cfg->cbndx;
43567 }
43568
43569- ACCESS_ONCE(smmu_domain->smmu) = smmu;
43570+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
43571 arm_smmu_init_context_bank(smmu_domain);
43572 spin_unlock_irqrestore(&smmu_domain->lock, flags);
43573
43574diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
43575index f7718d7..3ef740b 100644
43576--- a/drivers/iommu/iommu.c
43577+++ b/drivers/iommu/iommu.c
43578@@ -802,7 +802,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
43579 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
43580 {
43581 int err;
43582- struct notifier_block *nb;
43583+ notifier_block_no_const *nb;
43584 struct iommu_callback_data cb = {
43585 .ops = ops,
43586 };
43587diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
43588index 89c4846..1de796f 100644
43589--- a/drivers/iommu/irq_remapping.c
43590+++ b/drivers/iommu/irq_remapping.c
43591@@ -353,7 +353,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
43592 void panic_if_irq_remap(const char *msg)
43593 {
43594 if (irq_remapping_enabled)
43595- panic(msg);
43596+ panic("%s", msg);
43597 }
43598
43599 static void ir_ack_apic_edge(struct irq_data *data)
43600@@ -374,10 +374,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
43601
43602 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
43603 {
43604- chip->irq_print_chip = ir_print_prefix;
43605- chip->irq_ack = ir_ack_apic_edge;
43606- chip->irq_eoi = ir_ack_apic_level;
43607- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43608+ pax_open_kernel();
43609+ *(void **)&chip->irq_print_chip = ir_print_prefix;
43610+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
43611+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
43612+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
43613+ pax_close_kernel();
43614 }
43615
43616 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
43617diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
43618index d617ee5..df8be8b 100644
43619--- a/drivers/irqchip/irq-gic.c
43620+++ b/drivers/irqchip/irq-gic.c
43621@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
43622 * Supported arch specific GIC irq extension.
43623 * Default make them NULL.
43624 */
43625-struct irq_chip gic_arch_extn = {
43626+irq_chip_no_const gic_arch_extn = {
43627 .irq_eoi = NULL,
43628 .irq_mask = NULL,
43629 .irq_unmask = NULL,
43630@@ -311,7 +311,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
43631 chained_irq_exit(chip, desc);
43632 }
43633
43634-static struct irq_chip gic_chip = {
43635+static irq_chip_no_const gic_chip __read_only = {
43636 .name = "GIC",
43637 .irq_mask = gic_mask_irq,
43638 .irq_unmask = gic_unmask_irq,
43639diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
43640index 078cac5..fb0f846 100644
43641--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
43642+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
43643@@ -353,7 +353,7 @@ static int intc_irqpin_probe(struct platform_device *pdev)
43644 struct intc_irqpin_iomem *i;
43645 struct resource *io[INTC_IRQPIN_REG_NR];
43646 struct resource *irq;
43647- struct irq_chip *irq_chip;
43648+ irq_chip_no_const *irq_chip;
43649 void (*enable_fn)(struct irq_data *d);
43650 void (*disable_fn)(struct irq_data *d);
43651 const char *name = dev_name(dev);
43652diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
43653index 384e6ed..7a771b2 100644
43654--- a/drivers/irqchip/irq-renesas-irqc.c
43655+++ b/drivers/irqchip/irq-renesas-irqc.c
43656@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
43657 struct irqc_priv *p;
43658 struct resource *io;
43659 struct resource *irq;
43660- struct irq_chip *irq_chip;
43661+ irq_chip_no_const *irq_chip;
43662 const char *name = dev_name(&pdev->dev);
43663 int ret;
43664 int k;
43665diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
43666index 6a2df32..dc962f1 100644
43667--- a/drivers/isdn/capi/capi.c
43668+++ b/drivers/isdn/capi/capi.c
43669@@ -81,8 +81,8 @@ struct capiminor {
43670
43671 struct capi20_appl *ap;
43672 u32 ncci;
43673- atomic_t datahandle;
43674- atomic_t msgid;
43675+ atomic_unchecked_t datahandle;
43676+ atomic_unchecked_t msgid;
43677
43678 struct tty_port port;
43679 int ttyinstop;
43680@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
43681 capimsg_setu16(s, 2, mp->ap->applid);
43682 capimsg_setu8 (s, 4, CAPI_DATA_B3);
43683 capimsg_setu8 (s, 5, CAPI_RESP);
43684- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
43685+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
43686 capimsg_setu32(s, 8, mp->ncci);
43687 capimsg_setu16(s, 12, datahandle);
43688 }
43689@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
43690 mp->outbytes -= len;
43691 spin_unlock_bh(&mp->outlock);
43692
43693- datahandle = atomic_inc_return(&mp->datahandle);
43694+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
43695 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
43696 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43697 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
43698 capimsg_setu16(skb->data, 2, mp->ap->applid);
43699 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
43700 capimsg_setu8 (skb->data, 5, CAPI_REQ);
43701- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
43702+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
43703 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
43704 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
43705 capimsg_setu16(skb->data, 16, len); /* Data length */
43706diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
43707index aecec6d..11e13c5 100644
43708--- a/drivers/isdn/gigaset/bas-gigaset.c
43709+++ b/drivers/isdn/gigaset/bas-gigaset.c
43710@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
43711
43712
43713 static const struct gigaset_ops gigops = {
43714- gigaset_write_cmd,
43715- gigaset_write_room,
43716- gigaset_chars_in_buffer,
43717- gigaset_brkchars,
43718- gigaset_init_bchannel,
43719- gigaset_close_bchannel,
43720- gigaset_initbcshw,
43721- gigaset_freebcshw,
43722- gigaset_reinitbcshw,
43723- gigaset_initcshw,
43724- gigaset_freecshw,
43725- gigaset_set_modem_ctrl,
43726- gigaset_baud_rate,
43727- gigaset_set_line_ctrl,
43728- gigaset_isoc_send_skb,
43729- gigaset_isoc_input,
43730+ .write_cmd = gigaset_write_cmd,
43731+ .write_room = gigaset_write_room,
43732+ .chars_in_buffer = gigaset_chars_in_buffer,
43733+ .brkchars = gigaset_brkchars,
43734+ .init_bchannel = gigaset_init_bchannel,
43735+ .close_bchannel = gigaset_close_bchannel,
43736+ .initbcshw = gigaset_initbcshw,
43737+ .freebcshw = gigaset_freebcshw,
43738+ .reinitbcshw = gigaset_reinitbcshw,
43739+ .initcshw = gigaset_initcshw,
43740+ .freecshw = gigaset_freecshw,
43741+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43742+ .baud_rate = gigaset_baud_rate,
43743+ .set_line_ctrl = gigaset_set_line_ctrl,
43744+ .send_skb = gigaset_isoc_send_skb,
43745+ .handle_input = gigaset_isoc_input,
43746 };
43747
43748 /* bas_gigaset_init
43749diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
43750index 600c79b..3752bab 100644
43751--- a/drivers/isdn/gigaset/interface.c
43752+++ b/drivers/isdn/gigaset/interface.c
43753@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
43754 }
43755 tty->driver_data = cs;
43756
43757- ++cs->port.count;
43758+ atomic_inc(&cs->port.count);
43759
43760- if (cs->port.count == 1) {
43761+ if (atomic_read(&cs->port.count) == 1) {
43762 tty_port_tty_set(&cs->port, tty);
43763 cs->port.low_latency = 1;
43764 }
43765@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
43766
43767 if (!cs->connected)
43768 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
43769- else if (!cs->port.count)
43770+ else if (!atomic_read(&cs->port.count))
43771 dev_warn(cs->dev, "%s: device not opened\n", __func__);
43772- else if (!--cs->port.count)
43773+ else if (!atomic_dec_return(&cs->port.count))
43774 tty_port_tty_set(&cs->port, NULL);
43775
43776 mutex_unlock(&cs->mutex);
43777diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
43778index 8c91fd5..14f13ce 100644
43779--- a/drivers/isdn/gigaset/ser-gigaset.c
43780+++ b/drivers/isdn/gigaset/ser-gigaset.c
43781@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
43782 }
43783
43784 static const struct gigaset_ops ops = {
43785- gigaset_write_cmd,
43786- gigaset_write_room,
43787- gigaset_chars_in_buffer,
43788- gigaset_brkchars,
43789- gigaset_init_bchannel,
43790- gigaset_close_bchannel,
43791- gigaset_initbcshw,
43792- gigaset_freebcshw,
43793- gigaset_reinitbcshw,
43794- gigaset_initcshw,
43795- gigaset_freecshw,
43796- gigaset_set_modem_ctrl,
43797- gigaset_baud_rate,
43798- gigaset_set_line_ctrl,
43799- gigaset_m10x_send_skb, /* asyncdata.c */
43800- gigaset_m10x_input, /* asyncdata.c */
43801+ .write_cmd = gigaset_write_cmd,
43802+ .write_room = gigaset_write_room,
43803+ .chars_in_buffer = gigaset_chars_in_buffer,
43804+ .brkchars = gigaset_brkchars,
43805+ .init_bchannel = gigaset_init_bchannel,
43806+ .close_bchannel = gigaset_close_bchannel,
43807+ .initbcshw = gigaset_initbcshw,
43808+ .freebcshw = gigaset_freebcshw,
43809+ .reinitbcshw = gigaset_reinitbcshw,
43810+ .initcshw = gigaset_initcshw,
43811+ .freecshw = gigaset_freecshw,
43812+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43813+ .baud_rate = gigaset_baud_rate,
43814+ .set_line_ctrl = gigaset_set_line_ctrl,
43815+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
43816+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
43817 };
43818
43819
43820diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
43821index 5f306e2..5342f88 100644
43822--- a/drivers/isdn/gigaset/usb-gigaset.c
43823+++ b/drivers/isdn/gigaset/usb-gigaset.c
43824@@ -543,7 +543,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
43825 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
43826 memcpy(cs->hw.usb->bchars, buf, 6);
43827 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
43828- 0, 0, &buf, 6, 2000);
43829+ 0, 0, buf, 6, 2000);
43830 }
43831
43832 static void gigaset_freebcshw(struct bc_state *bcs)
43833@@ -862,22 +862,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
43834 }
43835
43836 static const struct gigaset_ops ops = {
43837- gigaset_write_cmd,
43838- gigaset_write_room,
43839- gigaset_chars_in_buffer,
43840- gigaset_brkchars,
43841- gigaset_init_bchannel,
43842- gigaset_close_bchannel,
43843- gigaset_initbcshw,
43844- gigaset_freebcshw,
43845- gigaset_reinitbcshw,
43846- gigaset_initcshw,
43847- gigaset_freecshw,
43848- gigaset_set_modem_ctrl,
43849- gigaset_baud_rate,
43850- gigaset_set_line_ctrl,
43851- gigaset_m10x_send_skb,
43852- gigaset_m10x_input,
43853+ .write_cmd = gigaset_write_cmd,
43854+ .write_room = gigaset_write_room,
43855+ .chars_in_buffer = gigaset_chars_in_buffer,
43856+ .brkchars = gigaset_brkchars,
43857+ .init_bchannel = gigaset_init_bchannel,
43858+ .close_bchannel = gigaset_close_bchannel,
43859+ .initbcshw = gigaset_initbcshw,
43860+ .freebcshw = gigaset_freebcshw,
43861+ .reinitbcshw = gigaset_reinitbcshw,
43862+ .initcshw = gigaset_initcshw,
43863+ .freecshw = gigaset_freecshw,
43864+ .set_modem_ctrl = gigaset_set_modem_ctrl,
43865+ .baud_rate = gigaset_baud_rate,
43866+ .set_line_ctrl = gigaset_set_line_ctrl,
43867+ .send_skb = gigaset_m10x_send_skb,
43868+ .handle_input = gigaset_m10x_input,
43869 };
43870
43871 /*
43872diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
43873index 4d9b195..455075c 100644
43874--- a/drivers/isdn/hardware/avm/b1.c
43875+++ b/drivers/isdn/hardware/avm/b1.c
43876@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
43877 }
43878 if (left) {
43879 if (t4file->user) {
43880- if (copy_from_user(buf, dp, left))
43881+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43882 return -EFAULT;
43883 } else {
43884 memcpy(buf, dp, left);
43885@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
43886 }
43887 if (left) {
43888 if (config->user) {
43889- if (copy_from_user(buf, dp, left))
43890+ if (left > sizeof buf || copy_from_user(buf, dp, left))
43891 return -EFAULT;
43892 } else {
43893 memcpy(buf, dp, left);
43894diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
43895index 9b856e1..fa03c92 100644
43896--- a/drivers/isdn/i4l/isdn_common.c
43897+++ b/drivers/isdn/i4l/isdn_common.c
43898@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
43899 } else
43900 return -EINVAL;
43901 case IIOCDBGVAR:
43902+ if (!capable(CAP_SYS_RAWIO))
43903+ return -EPERM;
43904 if (arg) {
43905 if (copy_to_user(argp, &dev, sizeof(ulong)))
43906 return -EFAULT;
43907diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
43908index 91d5730..336523e 100644
43909--- a/drivers/isdn/i4l/isdn_concap.c
43910+++ b/drivers/isdn/i4l/isdn_concap.c
43911@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
43912 }
43913
43914 struct concap_device_ops isdn_concap_reliable_dl_dops = {
43915- &isdn_concap_dl_data_req,
43916- &isdn_concap_dl_connect_req,
43917- &isdn_concap_dl_disconn_req
43918+ .data_req = &isdn_concap_dl_data_req,
43919+ .connect_req = &isdn_concap_dl_connect_req,
43920+ .disconn_req = &isdn_concap_dl_disconn_req
43921 };
43922
43923 /* The following should better go into a dedicated source file such that
43924diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
43925index bc91261..2ef7e36 100644
43926--- a/drivers/isdn/i4l/isdn_tty.c
43927+++ b/drivers/isdn/i4l/isdn_tty.c
43928@@ -1503,9 +1503,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
43929
43930 #ifdef ISDN_DEBUG_MODEM_OPEN
43931 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
43932- port->count);
43933+ atomic_read(&port->count));
43934 #endif
43935- port->count++;
43936+ atomic_inc(&port->count);
43937 port->tty = tty;
43938 /*
43939 * Start up serial port
43940@@ -1549,7 +1549,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43941 #endif
43942 return;
43943 }
43944- if ((tty->count == 1) && (port->count != 1)) {
43945+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
43946 /*
43947 * Uh, oh. tty->count is 1, which means that the tty
43948 * structure will be freed. Info->count should always
43949@@ -1558,15 +1558,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
43950 * serial port won't be shutdown.
43951 */
43952 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
43953- "info->count is %d\n", port->count);
43954- port->count = 1;
43955+ "info->count is %d\n", atomic_read(&port->count));
43956+ atomic_set(&port->count, 1);
43957 }
43958- if (--port->count < 0) {
43959+ if (atomic_dec_return(&port->count) < 0) {
43960 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
43961- info->line, port->count);
43962- port->count = 0;
43963+ info->line, atomic_read(&port->count));
43964+ atomic_set(&port->count, 0);
43965 }
43966- if (port->count) {
43967+ if (atomic_read(&port->count)) {
43968 #ifdef ISDN_DEBUG_MODEM_OPEN
43969 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
43970 #endif
43971@@ -1620,7 +1620,7 @@ isdn_tty_hangup(struct tty_struct *tty)
43972 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
43973 return;
43974 isdn_tty_shutdown(info);
43975- port->count = 0;
43976+ atomic_set(&port->count, 0);
43977 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43978 port->tty = NULL;
43979 wake_up_interruptible(&port->open_wait);
43980@@ -1965,7 +1965,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
43981 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
43982 modem_info *info = &dev->mdm.info[i];
43983
43984- if (info->port.count == 0)
43985+ if (atomic_read(&info->port.count) == 0)
43986 continue;
43987 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
43988 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
43989diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
43990index e2d4e58..40cd045 100644
43991--- a/drivers/isdn/i4l/isdn_x25iface.c
43992+++ b/drivers/isdn/i4l/isdn_x25iface.c
43993@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
43994
43995
43996 static struct concap_proto_ops ix25_pops = {
43997- &isdn_x25iface_proto_new,
43998- &isdn_x25iface_proto_del,
43999- &isdn_x25iface_proto_restart,
44000- &isdn_x25iface_proto_close,
44001- &isdn_x25iface_xmit,
44002- &isdn_x25iface_receive,
44003- &isdn_x25iface_connect_ind,
44004- &isdn_x25iface_disconn_ind
44005+ .proto_new = &isdn_x25iface_proto_new,
44006+ .proto_del = &isdn_x25iface_proto_del,
44007+ .restart = &isdn_x25iface_proto_restart,
44008+ .close = &isdn_x25iface_proto_close,
44009+ .encap_and_xmit = &isdn_x25iface_xmit,
44010+ .data_ind = &isdn_x25iface_receive,
44011+ .connect_ind = &isdn_x25iface_connect_ind,
44012+ .disconn_ind = &isdn_x25iface_disconn_ind
44013 };
44014
44015 /* error message helper function */
44016diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44017index 6a7447c..b4987ea 100644
44018--- a/drivers/isdn/icn/icn.c
44019+++ b/drivers/isdn/icn/icn.c
44020@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44021 if (count > len)
44022 count = len;
44023 if (user) {
44024- if (copy_from_user(msg, buf, count))
44025+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44026 return -EFAULT;
44027 } else
44028 memcpy(msg, buf, count);
44029@@ -1609,7 +1609,7 @@ icn_setup(char *line)
44030 if (ints[0] > 1)
44031 membase = (unsigned long)ints[2];
44032 if (str && *str) {
44033- strcpy(sid, str);
44034+ strlcpy(sid, str, sizeof(sid));
44035 icn_id = sid;
44036 if ((p = strchr(sid, ','))) {
44037 *p++ = 0;
44038diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44039index 87f7dff..7300125 100644
44040--- a/drivers/isdn/mISDN/dsp_cmx.c
44041+++ b/drivers/isdn/mISDN/dsp_cmx.c
44042@@ -1625,7 +1625,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44043 static u16 dsp_count; /* last sample count */
44044 static int dsp_count_valid; /* if we have last sample count */
44045
44046-void
44047+void __intentional_overflow(-1)
44048 dsp_cmx_send(void *arg)
44049 {
44050 struct dsp_conf *conf;
44051diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44052index 0f9ed1e..2715d6f 100644
44053--- a/drivers/leds/leds-clevo-mail.c
44054+++ b/drivers/leds/leds-clevo-mail.c
44055@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44056 * detected as working, but in reality it is not) as low as
44057 * possible.
44058 */
44059-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44060+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44061 {
44062 .callback = clevo_mail_led_dmi_callback,
44063 .ident = "Clevo D410J",
44064diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44065index 046cb70..6b20d39 100644
44066--- a/drivers/leds/leds-ss4200.c
44067+++ b/drivers/leds/leds-ss4200.c
44068@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44069 * detected as working, but in reality it is not) as low as
44070 * possible.
44071 */
44072-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44073+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44074 {
44075 .callback = ss4200_led_dmi_callback,
44076 .ident = "Intel SS4200-E",
44077diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44078index 6590558..a74c5dd 100644
44079--- a/drivers/lguest/core.c
44080+++ b/drivers/lguest/core.c
44081@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44082 * The end address needs +1 because __get_vm_area allocates an
44083 * extra guard page, so we need space for that.
44084 */
44085+
44086+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44087+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44088+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44089+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44090+#else
44091 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44092 VM_ALLOC, switcher_addr, switcher_addr
44093 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44094+#endif
44095+
44096 if (!switcher_vma) {
44097 err = -ENOMEM;
44098 printk("lguest: could not map switcher pages high\n");
44099@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44100 * Now the Switcher is mapped at the right address, we can't fail!
44101 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44102 */
44103- memcpy(switcher_vma->addr, start_switcher_text,
44104+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44105 end_switcher_text - start_switcher_text);
44106
44107 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44108diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44109index e8b55c3..3514c37 100644
44110--- a/drivers/lguest/page_tables.c
44111+++ b/drivers/lguest/page_tables.c
44112@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44113 /*:*/
44114
44115 #ifdef CONFIG_X86_PAE
44116-static void release_pmd(pmd_t *spmd)
44117+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44118 {
44119 /* If the entry's not present, there's nothing to release. */
44120 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44121diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44122index 922a1ac..9dd0c2a 100644
44123--- a/drivers/lguest/x86/core.c
44124+++ b/drivers/lguest/x86/core.c
44125@@ -59,7 +59,7 @@ static struct {
44126 /* Offset from where switcher.S was compiled to where we've copied it */
44127 static unsigned long switcher_offset(void)
44128 {
44129- return switcher_addr - (unsigned long)start_switcher_text;
44130+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44131 }
44132
44133 /* This cpu's struct lguest_pages (after the Switcher text page) */
44134@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44135 * These copies are pretty cheap, so we do them unconditionally: */
44136 /* Save the current Host top-level page directory.
44137 */
44138+
44139+#ifdef CONFIG_PAX_PER_CPU_PGD
44140+ pages->state.host_cr3 = read_cr3();
44141+#else
44142 pages->state.host_cr3 = __pa(current->mm->pgd);
44143+#endif
44144+
44145 /*
44146 * Set up the Guest's page tables to see this CPU's pages (and no
44147 * other CPU's pages).
44148@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44149 * compiled-in switcher code and the high-mapped copy we just made.
44150 */
44151 for (i = 0; i < IDT_ENTRIES; i++)
44152- default_idt_entries[i] += switcher_offset();
44153+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44154
44155 /*
44156 * Set up the Switcher's per-cpu areas.
44157@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44158 * it will be undisturbed when we switch. To change %cs and jump we
44159 * need this structure to feed to Intel's "lcall" instruction.
44160 */
44161- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44162+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44163 lguest_entry.segment = LGUEST_CS;
44164
44165 /*
44166diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44167index 40634b0..4f5855e 100644
44168--- a/drivers/lguest/x86/switcher_32.S
44169+++ b/drivers/lguest/x86/switcher_32.S
44170@@ -87,6 +87,7 @@
44171 #include <asm/page.h>
44172 #include <asm/segment.h>
44173 #include <asm/lguest.h>
44174+#include <asm/processor-flags.h>
44175
44176 // We mark the start of the code to copy
44177 // It's placed in .text tho it's never run here
44178@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44179 // Changes type when we load it: damn Intel!
44180 // For after we switch over our page tables
44181 // That entry will be read-only: we'd crash.
44182+
44183+#ifdef CONFIG_PAX_KERNEXEC
44184+ mov %cr0, %edx
44185+ xor $X86_CR0_WP, %edx
44186+ mov %edx, %cr0
44187+#endif
44188+
44189 movl $(GDT_ENTRY_TSS*8), %edx
44190 ltr %dx
44191
44192@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44193 // Let's clear it again for our return.
44194 // The GDT descriptor of the Host
44195 // Points to the table after two "size" bytes
44196- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44197+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44198 // Clear "used" from type field (byte 5, bit 2)
44199- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44200+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44201+
44202+#ifdef CONFIG_PAX_KERNEXEC
44203+ mov %cr0, %eax
44204+ xor $X86_CR0_WP, %eax
44205+ mov %eax, %cr0
44206+#endif
44207
44208 // Once our page table's switched, the Guest is live!
44209 // The Host fades as we run this final step.
44210@@ -295,13 +309,12 @@ deliver_to_host:
44211 // I consulted gcc, and it gave
44212 // These instructions, which I gladly credit:
44213 leal (%edx,%ebx,8), %eax
44214- movzwl (%eax),%edx
44215- movl 4(%eax), %eax
44216- xorw %ax, %ax
44217- orl %eax, %edx
44218+ movl 4(%eax), %edx
44219+ movw (%eax), %dx
44220 // Now the address of the handler's in %edx
44221 // We call it now: its "iret" drops us home.
44222- jmp *%edx
44223+ ljmp $__KERNEL_CS, $1f
44224+1: jmp *%edx
44225
44226 // Every interrupt can come to us here
44227 // But we must truly tell each apart.
44228diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44229index a08e3ee..df8ade2 100644
44230--- a/drivers/md/bcache/closure.h
44231+++ b/drivers/md/bcache/closure.h
44232@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44233 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44234 struct workqueue_struct *wq)
44235 {
44236- BUG_ON(object_is_on_stack(cl));
44237+ BUG_ON(object_starts_on_stack(cl));
44238 closure_set_ip(cl);
44239 cl->fn = fn;
44240 cl->wq = wq;
44241diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44242index 1695ee5..89f18ab 100644
44243--- a/drivers/md/bitmap.c
44244+++ b/drivers/md/bitmap.c
44245@@ -1784,7 +1784,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44246 chunk_kb ? "KB" : "B");
44247 if (bitmap->storage.file) {
44248 seq_printf(seq, ", file: ");
44249- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44250+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44251 }
44252
44253 seq_printf(seq, "\n");
44254diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44255index 73f791b..8c5d3ac 100644
44256--- a/drivers/md/dm-ioctl.c
44257+++ b/drivers/md/dm-ioctl.c
44258@@ -1772,7 +1772,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44259 cmd == DM_LIST_VERSIONS_CMD)
44260 return 0;
44261
44262- if ((cmd == DM_DEV_CREATE_CMD)) {
44263+ if (cmd == DM_DEV_CREATE_CMD) {
44264 if (!*param->name) {
44265 DMWARN("name not supplied when creating device");
44266 return -EINVAL;
44267diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44268index 089d627..ef7352e 100644
44269--- a/drivers/md/dm-raid1.c
44270+++ b/drivers/md/dm-raid1.c
44271@@ -40,7 +40,7 @@ enum dm_raid1_error {
44272
44273 struct mirror {
44274 struct mirror_set *ms;
44275- atomic_t error_count;
44276+ atomic_unchecked_t error_count;
44277 unsigned long error_type;
44278 struct dm_dev *dev;
44279 sector_t offset;
44280@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44281 struct mirror *m;
44282
44283 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44284- if (!atomic_read(&m->error_count))
44285+ if (!atomic_read_unchecked(&m->error_count))
44286 return m;
44287
44288 return NULL;
44289@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44290 * simple way to tell if a device has encountered
44291 * errors.
44292 */
44293- atomic_inc(&m->error_count);
44294+ atomic_inc_unchecked(&m->error_count);
44295
44296 if (test_and_set_bit(error_type, &m->error_type))
44297 return;
44298@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44299 struct mirror *m = get_default_mirror(ms);
44300
44301 do {
44302- if (likely(!atomic_read(&m->error_count)))
44303+ if (likely(!atomic_read_unchecked(&m->error_count)))
44304 return m;
44305
44306 if (m-- == ms->mirror)
44307@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44308 {
44309 struct mirror *default_mirror = get_default_mirror(m->ms);
44310
44311- return !atomic_read(&default_mirror->error_count);
44312+ return !atomic_read_unchecked(&default_mirror->error_count);
44313 }
44314
44315 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44316@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44317 */
44318 if (likely(region_in_sync(ms, region, 1)))
44319 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44320- else if (m && atomic_read(&m->error_count))
44321+ else if (m && atomic_read_unchecked(&m->error_count))
44322 m = NULL;
44323
44324 if (likely(m))
44325@@ -936,7 +936,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44326 }
44327
44328 ms->mirror[mirror].ms = ms;
44329- atomic_set(&(ms->mirror[mirror].error_count), 0);
44330+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44331 ms->mirror[mirror].error_type = 0;
44332 ms->mirror[mirror].offset = offset;
44333
44334@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
44335 */
44336 static char device_status_char(struct mirror *m)
44337 {
44338- if (!atomic_read(&(m->error_count)))
44339+ if (!atomic_read_unchecked(&(m->error_count)))
44340 return 'A';
44341
44342 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44343diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44344index f478a4c..4b8e5ef 100644
44345--- a/drivers/md/dm-stats.c
44346+++ b/drivers/md/dm-stats.c
44347@@ -382,7 +382,7 @@ do_sync_free:
44348 synchronize_rcu_expedited();
44349 dm_stat_free(&s->rcu_head);
44350 } else {
44351- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44352+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44353 call_rcu(&s->rcu_head, dm_stat_free);
44354 }
44355 return 0;
44356@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44357 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44358 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44359 ));
44360- ACCESS_ONCE(last->last_sector) = end_sector;
44361- ACCESS_ONCE(last->last_rw) = bi_rw;
44362+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44363+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44364 }
44365
44366 rcu_read_lock();
44367diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44368index f8b37d4..5c5cafd 100644
44369--- a/drivers/md/dm-stripe.c
44370+++ b/drivers/md/dm-stripe.c
44371@@ -21,7 +21,7 @@ struct stripe {
44372 struct dm_dev *dev;
44373 sector_t physical_start;
44374
44375- atomic_t error_count;
44376+ atomic_unchecked_t error_count;
44377 };
44378
44379 struct stripe_c {
44380@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44381 kfree(sc);
44382 return r;
44383 }
44384- atomic_set(&(sc->stripe[i].error_count), 0);
44385+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44386 }
44387
44388 ti->private = sc;
44389@@ -332,7 +332,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44390 DMEMIT("%d ", sc->stripes);
44391 for (i = 0; i < sc->stripes; i++) {
44392 DMEMIT("%s ", sc->stripe[i].dev->name);
44393- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44394+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44395 'D' : 'A';
44396 }
44397 buffer[i] = '\0';
44398@@ -377,8 +377,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44399 */
44400 for (i = 0; i < sc->stripes; i++)
44401 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44402- atomic_inc(&(sc->stripe[i].error_count));
44403- if (atomic_read(&(sc->stripe[i].error_count)) <
44404+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44405+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44406 DM_IO_ERROR_THRESHOLD)
44407 schedule_work(&sc->trigger_event);
44408 }
44409diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44410index 3afae9e..4e1c954 100644
44411--- a/drivers/md/dm-table.c
44412+++ b/drivers/md/dm-table.c
44413@@ -303,7 +303,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44414 if (!dev_size)
44415 return 0;
44416
44417- if ((start >= dev_size) || (start + len > dev_size)) {
44418+ if ((start >= dev_size) || (len > dev_size - start)) {
44419 DMWARN("%s: %s too small for target: "
44420 "start=%llu, len=%llu, dev_size=%llu",
44421 dm_device_name(ti->table->md), bdevname(bdev, b),
44422diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44423index 43adbb8..7b34305 100644
44424--- a/drivers/md/dm-thin-metadata.c
44425+++ b/drivers/md/dm-thin-metadata.c
44426@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44427 {
44428 pmd->info.tm = pmd->tm;
44429 pmd->info.levels = 2;
44430- pmd->info.value_type.context = pmd->data_sm;
44431+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44432 pmd->info.value_type.size = sizeof(__le64);
44433 pmd->info.value_type.inc = data_block_inc;
44434 pmd->info.value_type.dec = data_block_dec;
44435@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44436
44437 pmd->bl_info.tm = pmd->tm;
44438 pmd->bl_info.levels = 1;
44439- pmd->bl_info.value_type.context = pmd->data_sm;
44440+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44441 pmd->bl_info.value_type.size = sizeof(__le64);
44442 pmd->bl_info.value_type.inc = data_block_inc;
44443 pmd->bl_info.value_type.dec = data_block_dec;
44444diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44445index b71c600..d0b85b3 100644
44446--- a/drivers/md/dm.c
44447+++ b/drivers/md/dm.c
44448@@ -185,9 +185,9 @@ struct mapped_device {
44449 /*
44450 * Event handling.
44451 */
44452- atomic_t event_nr;
44453+ atomic_unchecked_t event_nr;
44454 wait_queue_head_t eventq;
44455- atomic_t uevent_seq;
44456+ atomic_unchecked_t uevent_seq;
44457 struct list_head uevent_list;
44458 spinlock_t uevent_lock; /* Protect access to uevent_list */
44459
44460@@ -2070,8 +2070,8 @@ static struct mapped_device *alloc_dev(int minor)
44461 spin_lock_init(&md->deferred_lock);
44462 atomic_set(&md->holders, 1);
44463 atomic_set(&md->open_count, 0);
44464- atomic_set(&md->event_nr, 0);
44465- atomic_set(&md->uevent_seq, 0);
44466+ atomic_set_unchecked(&md->event_nr, 0);
44467+ atomic_set_unchecked(&md->uevent_seq, 0);
44468 INIT_LIST_HEAD(&md->uevent_list);
44469 INIT_LIST_HEAD(&md->table_devices);
44470 spin_lock_init(&md->uevent_lock);
44471@@ -2227,7 +2227,7 @@ static void event_callback(void *context)
44472
44473 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44474
44475- atomic_inc(&md->event_nr);
44476+ atomic_inc_unchecked(&md->event_nr);
44477 wake_up(&md->eventq);
44478 }
44479
44480@@ -3055,18 +3055,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44481
44482 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44483 {
44484- return atomic_add_return(1, &md->uevent_seq);
44485+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44486 }
44487
44488 uint32_t dm_get_event_nr(struct mapped_device *md)
44489 {
44490- return atomic_read(&md->event_nr);
44491+ return atomic_read_unchecked(&md->event_nr);
44492 }
44493
44494 int dm_wait_event(struct mapped_device *md, int event_nr)
44495 {
44496 return wait_event_interruptible(md->eventq,
44497- (event_nr != atomic_read(&md->event_nr)));
44498+ (event_nr != atomic_read_unchecked(&md->event_nr)));
44499 }
44500
44501 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
44502diff --git a/drivers/md/md.c b/drivers/md/md.c
44503index 709755f..5bc3fa4 100644
44504--- a/drivers/md/md.c
44505+++ b/drivers/md/md.c
44506@@ -190,10 +190,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
44507 * start build, activate spare
44508 */
44509 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
44510-static atomic_t md_event_count;
44511+static atomic_unchecked_t md_event_count;
44512 void md_new_event(struct mddev *mddev)
44513 {
44514- atomic_inc(&md_event_count);
44515+ atomic_inc_unchecked(&md_event_count);
44516 wake_up(&md_event_waiters);
44517 }
44518 EXPORT_SYMBOL_GPL(md_new_event);
44519@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
44520 */
44521 static void md_new_event_inintr(struct mddev *mddev)
44522 {
44523- atomic_inc(&md_event_count);
44524+ atomic_inc_unchecked(&md_event_count);
44525 wake_up(&md_event_waiters);
44526 }
44527
44528@@ -1422,7 +1422,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
44529 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
44530 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
44531 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
44532- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44533+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
44534
44535 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
44536 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
44537@@ -1673,7 +1673,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
44538 else
44539 sb->resync_offset = cpu_to_le64(0);
44540
44541- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
44542+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
44543
44544 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
44545 sb->size = cpu_to_le64(mddev->dev_sectors);
44546@@ -2543,7 +2543,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
44547 static ssize_t
44548 errors_show(struct md_rdev *rdev, char *page)
44549 {
44550- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
44551+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
44552 }
44553
44554 static ssize_t
44555@@ -2552,7 +2552,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
44556 char *e;
44557 unsigned long n = simple_strtoul(buf, &e, 10);
44558 if (*buf && (*e == 0 || *e == '\n')) {
44559- atomic_set(&rdev->corrected_errors, n);
44560+ atomic_set_unchecked(&rdev->corrected_errors, n);
44561 return len;
44562 }
44563 return -EINVAL;
44564@@ -2997,8 +2997,8 @@ int md_rdev_init(struct md_rdev *rdev)
44565 rdev->sb_loaded = 0;
44566 rdev->bb_page = NULL;
44567 atomic_set(&rdev->nr_pending, 0);
44568- atomic_set(&rdev->read_errors, 0);
44569- atomic_set(&rdev->corrected_errors, 0);
44570+ atomic_set_unchecked(&rdev->read_errors, 0);
44571+ atomic_set_unchecked(&rdev->corrected_errors, 0);
44572
44573 INIT_LIST_HEAD(&rdev->same_set);
44574 init_waitqueue_head(&rdev->blocked_wait);
44575@@ -6865,7 +6865,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
44576
44577 spin_unlock(&pers_lock);
44578 seq_printf(seq, "\n");
44579- seq->poll_event = atomic_read(&md_event_count);
44580+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44581 return 0;
44582 }
44583 if (v == (void*)2) {
44584@@ -6968,7 +6968,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
44585 return error;
44586
44587 seq = file->private_data;
44588- seq->poll_event = atomic_read(&md_event_count);
44589+ seq->poll_event = atomic_read_unchecked(&md_event_count);
44590 return error;
44591 }
44592
44593@@ -6985,7 +6985,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
44594 /* always allow read */
44595 mask = POLLIN | POLLRDNORM;
44596
44597- if (seq->poll_event != atomic_read(&md_event_count))
44598+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
44599 mask |= POLLERR | POLLPRI;
44600 return mask;
44601 }
44602@@ -7032,7 +7032,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
44603 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
44604 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
44605 (int)part_stat_read(&disk->part0, sectors[1]) -
44606- atomic_read(&disk->sync_io);
44607+ atomic_read_unchecked(&disk->sync_io);
44608 /* sync IO will cause sync_io to increase before the disk_stats
44609 * as sync_io is counted when a request starts, and
44610 * disk_stats is counted when it completes.
44611diff --git a/drivers/md/md.h b/drivers/md/md.h
44612index 03cec5b..0a658c1 100644
44613--- a/drivers/md/md.h
44614+++ b/drivers/md/md.h
44615@@ -94,13 +94,13 @@ struct md_rdev {
44616 * only maintained for arrays that
44617 * support hot removal
44618 */
44619- atomic_t read_errors; /* number of consecutive read errors that
44620+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
44621 * we have tried to ignore.
44622 */
44623 struct timespec last_read_error; /* monotonic time since our
44624 * last read error
44625 */
44626- atomic_t corrected_errors; /* number of corrected read errors,
44627+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
44628 * for reporting to userspace and storing
44629 * in superblock.
44630 */
44631@@ -448,7 +448,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
44632
44633 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
44634 {
44635- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44636+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
44637 }
44638
44639 struct md_personality
44640diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
44641index e8a9042..35bd145 100644
44642--- a/drivers/md/persistent-data/dm-space-map-metadata.c
44643+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
44644@@ -683,7 +683,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
44645 * Flick into a mode where all blocks get allocated in the new area.
44646 */
44647 smm->begin = old_len;
44648- memcpy(sm, &bootstrap_ops, sizeof(*sm));
44649+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
44650
44651 /*
44652 * Extend.
44653@@ -714,7 +714,7 @@ out:
44654 /*
44655 * Switch back to normal behaviour.
44656 */
44657- memcpy(sm, &ops, sizeof(*sm));
44658+ memcpy((void *)sm, &ops, sizeof(*sm));
44659 return r;
44660 }
44661
44662diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
44663index 3e6d115..ffecdeb 100644
44664--- a/drivers/md/persistent-data/dm-space-map.h
44665+++ b/drivers/md/persistent-data/dm-space-map.h
44666@@ -71,6 +71,7 @@ struct dm_space_map {
44667 dm_sm_threshold_fn fn,
44668 void *context);
44669 };
44670+typedef struct dm_space_map __no_const dm_space_map_no_const;
44671
44672 /*----------------------------------------------------------------*/
44673
44674diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
44675index 2f2f38f..f6a8ebe 100644
44676--- a/drivers/md/raid1.c
44677+++ b/drivers/md/raid1.c
44678@@ -1932,7 +1932,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
44679 if (r1_sync_page_io(rdev, sect, s,
44680 bio->bi_io_vec[idx].bv_page,
44681 READ) != 0)
44682- atomic_add(s, &rdev->corrected_errors);
44683+ atomic_add_unchecked(s, &rdev->corrected_errors);
44684 }
44685 sectors -= s;
44686 sect += s;
44687@@ -2165,7 +2165,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
44688 !test_bit(Faulty, &rdev->flags)) {
44689 if (r1_sync_page_io(rdev, sect, s,
44690 conf->tmppage, READ)) {
44691- atomic_add(s, &rdev->corrected_errors);
44692+ atomic_add_unchecked(s, &rdev->corrected_errors);
44693 printk(KERN_INFO
44694 "md/raid1:%s: read error corrected "
44695 "(%d sectors at %llu on %s)\n",
44696diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
44697index 32e282f..5cec803 100644
44698--- a/drivers/md/raid10.c
44699+++ b/drivers/md/raid10.c
44700@@ -1944,7 +1944,7 @@ static void end_sync_read(struct bio *bio, int error)
44701 /* The write handler will notice the lack of
44702 * R10BIO_Uptodate and record any errors etc
44703 */
44704- atomic_add(r10_bio->sectors,
44705+ atomic_add_unchecked(r10_bio->sectors,
44706 &conf->mirrors[d].rdev->corrected_errors);
44707
44708 /* for reconstruct, we always reschedule after a read.
44709@@ -2301,7 +2301,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44710 {
44711 struct timespec cur_time_mon;
44712 unsigned long hours_since_last;
44713- unsigned int read_errors = atomic_read(&rdev->read_errors);
44714+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
44715
44716 ktime_get_ts(&cur_time_mon);
44717
44718@@ -2323,9 +2323,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
44719 * overflowing the shift of read_errors by hours_since_last.
44720 */
44721 if (hours_since_last >= 8 * sizeof(read_errors))
44722- atomic_set(&rdev->read_errors, 0);
44723+ atomic_set_unchecked(&rdev->read_errors, 0);
44724 else
44725- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
44726+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
44727 }
44728
44729 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
44730@@ -2379,8 +2379,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44731 return;
44732
44733 check_decay_read_errors(mddev, rdev);
44734- atomic_inc(&rdev->read_errors);
44735- if (atomic_read(&rdev->read_errors) > max_read_errors) {
44736+ atomic_inc_unchecked(&rdev->read_errors);
44737+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
44738 char b[BDEVNAME_SIZE];
44739 bdevname(rdev->bdev, b);
44740
44741@@ -2388,7 +2388,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44742 "md/raid10:%s: %s: Raid device exceeded "
44743 "read_error threshold [cur %d:max %d]\n",
44744 mdname(mddev), b,
44745- atomic_read(&rdev->read_errors), max_read_errors);
44746+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
44747 printk(KERN_NOTICE
44748 "md/raid10:%s: %s: Failing raid device\n",
44749 mdname(mddev), b);
44750@@ -2543,7 +2543,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
44751 sect +
44752 choose_data_offset(r10_bio, rdev)),
44753 bdevname(rdev->bdev, b));
44754- atomic_add(s, &rdev->corrected_errors);
44755+ atomic_add_unchecked(s, &rdev->corrected_errors);
44756 }
44757
44758 rdev_dec_pending(rdev, mddev);
44759diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
44760index 8577cc7..5779d5b 100644
44761--- a/drivers/md/raid5.c
44762+++ b/drivers/md/raid5.c
44763@@ -950,23 +950,23 @@ async_copy_data(int frombio, struct bio *bio, struct page **page,
44764 struct bio_vec bvl;
44765 struct bvec_iter iter;
44766 struct page *bio_page;
44767- int page_offset;
44768+ s64 page_offset;
44769 struct async_submit_ctl submit;
44770 enum async_tx_flags flags = 0;
44771
44772 if (bio->bi_iter.bi_sector >= sector)
44773- page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
44774+ page_offset = (s64)(bio->bi_iter.bi_sector - sector) * 512;
44775 else
44776- page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
44777+ page_offset = (s64)(sector - bio->bi_iter.bi_sector) * -512;
44778
44779 if (frombio)
44780 flags |= ASYNC_TX_FENCE;
44781 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
44782
44783 bio_for_each_segment(bvl, bio, iter) {
44784- int len = bvl.bv_len;
44785- int clen;
44786- int b_offset = 0;
44787+ s64 len = bvl.bv_len;
44788+ s64 clen;
44789+ s64 b_offset = 0;
44790
44791 if (page_offset < 0) {
44792 b_offset = -page_offset;
44793@@ -1730,6 +1730,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
44794 return 1;
44795 }
44796
44797+#ifdef CONFIG_GRKERNSEC_HIDESYM
44798+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
44799+#endif
44800+
44801 static int grow_stripes(struct r5conf *conf, int num)
44802 {
44803 struct kmem_cache *sc;
44804@@ -1741,7 +1745,11 @@ static int grow_stripes(struct r5conf *conf, int num)
44805 "raid%d-%s", conf->level, mdname(conf->mddev));
44806 else
44807 sprintf(conf->cache_name[0],
44808+#ifdef CONFIG_GRKERNSEC_HIDESYM
44809+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
44810+#else
44811 "raid%d-%p", conf->level, conf->mddev);
44812+#endif
44813 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
44814
44815 conf->active_name = 0;
44816@@ -2017,21 +2025,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
44817 mdname(conf->mddev), STRIPE_SECTORS,
44818 (unsigned long long)s,
44819 bdevname(rdev->bdev, b));
44820- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
44821+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
44822 clear_bit(R5_ReadError, &sh->dev[i].flags);
44823 clear_bit(R5_ReWrite, &sh->dev[i].flags);
44824 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
44825 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
44826
44827- if (atomic_read(&rdev->read_errors))
44828- atomic_set(&rdev->read_errors, 0);
44829+ if (atomic_read_unchecked(&rdev->read_errors))
44830+ atomic_set_unchecked(&rdev->read_errors, 0);
44831 } else {
44832 const char *bdn = bdevname(rdev->bdev, b);
44833 int retry = 0;
44834 int set_bad = 0;
44835
44836 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
44837- atomic_inc(&rdev->read_errors);
44838+ atomic_inc_unchecked(&rdev->read_errors);
44839 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
44840 printk_ratelimited(
44841 KERN_WARNING
44842@@ -2059,7 +2067,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
44843 mdname(conf->mddev),
44844 (unsigned long long)s,
44845 bdn);
44846- } else if (atomic_read(&rdev->read_errors)
44847+ } else if (atomic_read_unchecked(&rdev->read_errors)
44848 > conf->max_nr_stripes)
44849 printk(KERN_WARNING
44850 "md/raid:%s: Too many read errors, failing device %s.\n",
44851diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
44852index 983db75..ef9248c 100644
44853--- a/drivers/media/dvb-core/dvbdev.c
44854+++ b/drivers/media/dvb-core/dvbdev.c
44855@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
44856 const struct dvb_device *template, void *priv, int type)
44857 {
44858 struct dvb_device *dvbdev;
44859- struct file_operations *dvbdevfops;
44860+ file_operations_no_const *dvbdevfops;
44861 struct device *clsdev;
44862 int minor;
44863 int id;
44864diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
44865index 6ad22b6..6e90e2a 100644
44866--- a/drivers/media/dvb-frontends/af9033.h
44867+++ b/drivers/media/dvb-frontends/af9033.h
44868@@ -96,6 +96,6 @@ struct af9033_ops {
44869 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
44870 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
44871 int onoff);
44872-};
44873+} __no_const;
44874
44875 #endif /* AF9033_H */
44876diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
44877index 9b6c3bb..baeb5c7 100644
44878--- a/drivers/media/dvb-frontends/dib3000.h
44879+++ b/drivers/media/dvb-frontends/dib3000.h
44880@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
44881 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
44882 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
44883 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
44884-};
44885+} __no_const;
44886
44887 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
44888 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
44889diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
44890index 1fea0e9..321ce8f 100644
44891--- a/drivers/media/dvb-frontends/dib7000p.h
44892+++ b/drivers/media/dvb-frontends/dib7000p.h
44893@@ -64,7 +64,7 @@ struct dib7000p_ops {
44894 int (*get_adc_power)(struct dvb_frontend *fe);
44895 int (*slave_reset)(struct dvb_frontend *fe);
44896 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
44897-};
44898+} __no_const;
44899
44900 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
44901 void *dib7000p_attach(struct dib7000p_ops *ops);
44902diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
44903index 84cc103..5780c54 100644
44904--- a/drivers/media/dvb-frontends/dib8000.h
44905+++ b/drivers/media/dvb-frontends/dib8000.h
44906@@ -61,7 +61,7 @@ struct dib8000_ops {
44907 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
44908 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
44909 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
44910-};
44911+} __no_const;
44912
44913 #if IS_ENABLED(CONFIG_DVB_DIB8000)
44914 void *dib8000_attach(struct dib8000_ops *ops);
44915diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
44916index 860c98fc..497fa25 100644
44917--- a/drivers/media/pci/cx88/cx88-video.c
44918+++ b/drivers/media/pci/cx88/cx88-video.c
44919@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
44920
44921 /* ------------------------------------------------------------------ */
44922
44923-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44924-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44925-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44926+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44927+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44928+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
44929
44930 module_param_array(video_nr, int, NULL, 0444);
44931 module_param_array(vbi_nr, int, NULL, 0444);
44932diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
44933index 802642d..5534900 100644
44934--- a/drivers/media/pci/ivtv/ivtv-driver.c
44935+++ b/drivers/media/pci/ivtv/ivtv-driver.c
44936@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
44937 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
44938
44939 /* ivtv instance counter */
44940-static atomic_t ivtv_instance = ATOMIC_INIT(0);
44941+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
44942
44943 /* Parameter declarations */
44944 static int cardtype[IVTV_MAX_CARDS];
44945diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
44946index 8cbe6b4..ea3601c 100644
44947--- a/drivers/media/pci/solo6x10/solo6x10-core.c
44948+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
44949@@ -424,7 +424,7 @@ static void solo_device_release(struct device *dev)
44950
44951 static int solo_sysfs_init(struct solo_dev *solo_dev)
44952 {
44953- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
44954+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
44955 struct device *dev = &solo_dev->dev;
44956 const char *driver;
44957 int i;
44958diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
44959index c7141f2..5301fec 100644
44960--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
44961+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
44962@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
44963
44964 int solo_g723_init(struct solo_dev *solo_dev)
44965 {
44966- static struct snd_device_ops ops = { NULL };
44967+ static struct snd_device_ops ops = { };
44968 struct snd_card *card;
44969 struct snd_kcontrol_new kctl;
44970 char name[32];
44971diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44972index 8c84846..27b4f83 100644
44973--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
44974+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
44975@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
44976
44977 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
44978 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
44979- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
44980+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
44981 if (p2m_id < 0)
44982 p2m_id = -p2m_id;
44983 }
44984diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
44985index bd8edfa..e82ed85 100644
44986--- a/drivers/media/pci/solo6x10/solo6x10.h
44987+++ b/drivers/media/pci/solo6x10/solo6x10.h
44988@@ -220,7 +220,7 @@ struct solo_dev {
44989
44990 /* P2M DMA Engine */
44991 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
44992- atomic_t p2m_count;
44993+ atomic_unchecked_t p2m_count;
44994 int p2m_jiffies;
44995 unsigned int p2m_timeouts;
44996
44997diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
44998index c135165..dc69499 100644
44999--- a/drivers/media/pci/tw68/tw68-core.c
45000+++ b/drivers/media/pci/tw68/tw68-core.c
45001@@ -60,7 +60,7 @@ static unsigned int card[] = {[0 ... (TW68_MAXBOARDS - 1)] = UNSET };
45002 module_param_array(card, int, NULL, 0444);
45003 MODULE_PARM_DESC(card, "card type");
45004
45005-static atomic_t tw68_instance = ATOMIC_INIT(0);
45006+static atomic_unchecked_t tw68_instance = ATOMIC_INIT(0);
45007
45008 /* ------------------------------------------------------------------ */
45009
45010diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45011index ba2d8f9..1566684 100644
45012--- a/drivers/media/platform/omap/omap_vout.c
45013+++ b/drivers/media/platform/omap/omap_vout.c
45014@@ -63,7 +63,6 @@ enum omap_vout_channels {
45015 OMAP_VIDEO2,
45016 };
45017
45018-static struct videobuf_queue_ops video_vbq_ops;
45019 /* Variables configurable through module params*/
45020 static u32 video1_numbuffers = 3;
45021 static u32 video2_numbuffers = 3;
45022@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
45023 {
45024 struct videobuf_queue *q;
45025 struct omap_vout_device *vout = NULL;
45026+ static struct videobuf_queue_ops video_vbq_ops = {
45027+ .buf_setup = omap_vout_buffer_setup,
45028+ .buf_prepare = omap_vout_buffer_prepare,
45029+ .buf_release = omap_vout_buffer_release,
45030+ .buf_queue = omap_vout_buffer_queue,
45031+ };
45032
45033 vout = video_drvdata(file);
45034 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45035@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
45036 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45037
45038 q = &vout->vbq;
45039- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45040- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45041- video_vbq_ops.buf_release = omap_vout_buffer_release;
45042- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45043 spin_lock_init(&vout->vbq_lock);
45044
45045 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45046diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45047index fb2acc5..a2fcbdc4 100644
45048--- a/drivers/media/platform/s5p-tv/mixer.h
45049+++ b/drivers/media/platform/s5p-tv/mixer.h
45050@@ -156,7 +156,7 @@ struct mxr_layer {
45051 /** layer index (unique identifier) */
45052 int idx;
45053 /** callbacks for layer methods */
45054- struct mxr_layer_ops ops;
45055+ struct mxr_layer_ops *ops;
45056 /** format array */
45057 const struct mxr_format **fmt_array;
45058 /** size of format array */
45059diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45060index 74344c7..a39e70e 100644
45061--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45062+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45063@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45064 {
45065 struct mxr_layer *layer;
45066 int ret;
45067- struct mxr_layer_ops ops = {
45068+ static struct mxr_layer_ops ops = {
45069 .release = mxr_graph_layer_release,
45070 .buffer_set = mxr_graph_buffer_set,
45071 .stream_set = mxr_graph_stream_set,
45072diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45073index b713403..53cb5ad 100644
45074--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45075+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45076@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45077 layer->update_buf = next;
45078 }
45079
45080- layer->ops.buffer_set(layer, layer->update_buf);
45081+ layer->ops->buffer_set(layer, layer->update_buf);
45082
45083 if (done && done != layer->shadow_buf)
45084 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45085diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45086index b4d2696..91df48e 100644
45087--- a/drivers/media/platform/s5p-tv/mixer_video.c
45088+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45089@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45090 layer->geo.src.height = layer->geo.src.full_height;
45091
45092 mxr_geometry_dump(mdev, &layer->geo);
45093- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45094+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45095 mxr_geometry_dump(mdev, &layer->geo);
45096 }
45097
45098@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45099 layer->geo.dst.full_width = mbus_fmt.width;
45100 layer->geo.dst.full_height = mbus_fmt.height;
45101 layer->geo.dst.field = mbus_fmt.field;
45102- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45103+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45104
45105 mxr_geometry_dump(mdev, &layer->geo);
45106 }
45107@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45108 /* set source size to highest accepted value */
45109 geo->src.full_width = max(geo->dst.full_width, pix->width);
45110 geo->src.full_height = max(geo->dst.full_height, pix->height);
45111- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45112+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45113 mxr_geometry_dump(mdev, &layer->geo);
45114 /* set cropping to total visible screen */
45115 geo->src.width = pix->width;
45116@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45117 geo->src.x_offset = 0;
45118 geo->src.y_offset = 0;
45119 /* assure consistency of geometry */
45120- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45121+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45122 mxr_geometry_dump(mdev, &layer->geo);
45123 /* set full size to lowest possible value */
45124 geo->src.full_width = 0;
45125 geo->src.full_height = 0;
45126- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45127+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45128 mxr_geometry_dump(mdev, &layer->geo);
45129
45130 /* returning results */
45131@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45132 target->width = s->r.width;
45133 target->height = s->r.height;
45134
45135- layer->ops.fix_geometry(layer, stage, s->flags);
45136+ layer->ops->fix_geometry(layer, stage, s->flags);
45137
45138 /* retrieve update selection rectangle */
45139 res.left = target->x_offset;
45140@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45141 mxr_output_get(mdev);
45142
45143 mxr_layer_update_output(layer);
45144- layer->ops.format_set(layer);
45145+ layer->ops->format_set(layer);
45146 /* enabling layer in hardware */
45147 spin_lock_irqsave(&layer->enq_slock, flags);
45148 layer->state = MXR_LAYER_STREAMING;
45149 spin_unlock_irqrestore(&layer->enq_slock, flags);
45150
45151- layer->ops.stream_set(layer, MXR_ENABLE);
45152+ layer->ops->stream_set(layer, MXR_ENABLE);
45153 mxr_streamer_get(mdev);
45154
45155 return 0;
45156@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
45157 spin_unlock_irqrestore(&layer->enq_slock, flags);
45158
45159 /* disabling layer in hardware */
45160- layer->ops.stream_set(layer, MXR_DISABLE);
45161+ layer->ops->stream_set(layer, MXR_DISABLE);
45162 /* remove one streamer */
45163 mxr_streamer_put(mdev);
45164 /* allow changes in output configuration */
45165@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45166
45167 void mxr_layer_release(struct mxr_layer *layer)
45168 {
45169- if (layer->ops.release)
45170- layer->ops.release(layer);
45171+ if (layer->ops->release)
45172+ layer->ops->release(layer);
45173 }
45174
45175 void mxr_base_layer_release(struct mxr_layer *layer)
45176@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45177
45178 layer->mdev = mdev;
45179 layer->idx = idx;
45180- layer->ops = *ops;
45181+ layer->ops = ops;
45182
45183 spin_lock_init(&layer->enq_slock);
45184 INIT_LIST_HEAD(&layer->enq_list);
45185diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45186index c9388c4..ce71ece 100644
45187--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45188+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45189@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45190 {
45191 struct mxr_layer *layer;
45192 int ret;
45193- struct mxr_layer_ops ops = {
45194+ static struct mxr_layer_ops ops = {
45195 .release = mxr_vp_layer_release,
45196 .buffer_set = mxr_vp_buffer_set,
45197 .stream_set = mxr_vp_stream_set,
45198diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45199index 82affae..42833ec 100644
45200--- a/drivers/media/radio/radio-cadet.c
45201+++ b/drivers/media/radio/radio-cadet.c
45202@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45203 unsigned char readbuf[RDS_BUFFER];
45204 int i = 0;
45205
45206+ if (count > RDS_BUFFER)
45207+ return -EFAULT;
45208 mutex_lock(&dev->lock);
45209 if (dev->rdsstat == 0)
45210 cadet_start_rds(dev);
45211@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45212 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45213 mutex_unlock(&dev->lock);
45214
45215- if (i && copy_to_user(data, readbuf, i))
45216- return -EFAULT;
45217+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45218+ i = -EFAULT;
45219+
45220 return i;
45221 }
45222
45223diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45224index 5236035..c622c74 100644
45225--- a/drivers/media/radio/radio-maxiradio.c
45226+++ b/drivers/media/radio/radio-maxiradio.c
45227@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45228 /* TEA5757 pin mappings */
45229 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45230
45231-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45232+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45233
45234 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45235 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45236diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45237index 050b3bb..79f62b9 100644
45238--- a/drivers/media/radio/radio-shark.c
45239+++ b/drivers/media/radio/radio-shark.c
45240@@ -79,7 +79,7 @@ struct shark_device {
45241 u32 last_val;
45242 };
45243
45244-static atomic_t shark_instance = ATOMIC_INIT(0);
45245+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45246
45247 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45248 {
45249diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45250index 8654e0d..0608a64 100644
45251--- a/drivers/media/radio/radio-shark2.c
45252+++ b/drivers/media/radio/radio-shark2.c
45253@@ -74,7 +74,7 @@ struct shark_device {
45254 u8 *transfer_buffer;
45255 };
45256
45257-static atomic_t shark_instance = ATOMIC_INIT(0);
45258+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45259
45260 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45261 {
45262diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45263index dccf586..d5db411 100644
45264--- a/drivers/media/radio/radio-si476x.c
45265+++ b/drivers/media/radio/radio-si476x.c
45266@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45267 struct si476x_radio *radio;
45268 struct v4l2_ctrl *ctrl;
45269
45270- static atomic_t instance = ATOMIC_INIT(0);
45271+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45272
45273 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45274 if (!radio)
45275diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
45276index 704397f..4d05977 100644
45277--- a/drivers/media/radio/wl128x/fmdrv_common.c
45278+++ b/drivers/media/radio/wl128x/fmdrv_common.c
45279@@ -71,7 +71,7 @@ module_param(default_rds_buf, uint, 0444);
45280 MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
45281
45282 /* Radio Nr */
45283-static u32 radio_nr = -1;
45284+static int radio_nr = -1;
45285 module_param(radio_nr, int, 0444);
45286 MODULE_PARM_DESC(radio_nr, "Radio Nr");
45287
45288diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45289index 9fd1527..8927230 100644
45290--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45291+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45292@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45293
45294 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45295 {
45296- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45297- char result[64];
45298- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45299- sizeof(result), 0);
45300+ char *buf;
45301+ char *result;
45302+ int retval;
45303+
45304+ buf = kmalloc(2, GFP_KERNEL);
45305+ if (buf == NULL)
45306+ return -ENOMEM;
45307+ result = kmalloc(64, GFP_KERNEL);
45308+ if (result == NULL) {
45309+ kfree(buf);
45310+ return -ENOMEM;
45311+ }
45312+
45313+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45314+ buf[1] = enable ? 1 : 0;
45315+
45316+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45317+
45318+ kfree(buf);
45319+ kfree(result);
45320+ return retval;
45321 }
45322
45323 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45324 {
45325- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45326- char state[3];
45327- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45328+ char *buf;
45329+ char *state;
45330+ int retval;
45331+
45332+ buf = kmalloc(2, GFP_KERNEL);
45333+ if (buf == NULL)
45334+ return -ENOMEM;
45335+ state = kmalloc(3, GFP_KERNEL);
45336+ if (state == NULL) {
45337+ kfree(buf);
45338+ return -ENOMEM;
45339+ }
45340+
45341+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45342+ buf[1] = enable ? 1 : 0;
45343+
45344+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45345+
45346+ kfree(buf);
45347+ kfree(state);
45348+ return retval;
45349 }
45350
45351 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45352 {
45353- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45354- char state[3];
45355+ char *query;
45356+ char *state;
45357 int ret;
45358+ query = kmalloc(1, GFP_KERNEL);
45359+ if (query == NULL)
45360+ return -ENOMEM;
45361+ state = kmalloc(3, GFP_KERNEL);
45362+ if (state == NULL) {
45363+ kfree(query);
45364+ return -ENOMEM;
45365+ }
45366+
45367+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45368
45369 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45370
45371- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45372- sizeof(state), 0);
45373+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45374 if (ret < 0) {
45375 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45376 "state info\n");
45377@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45378
45379 /* Copy this pointer as we are gonna need it in the release phase */
45380 cinergyt2_usb_device = adap->dev;
45381-
45382+ kfree(query);
45383+ kfree(state);
45384 return 0;
45385 }
45386
45387@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45388 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45389 {
45390 struct cinergyt2_state *st = d->priv;
45391- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45392+ u8 *key, *cmd;
45393 int i;
45394
45395+ cmd = kmalloc(1, GFP_KERNEL);
45396+ if (cmd == NULL)
45397+ return -EINVAL;
45398+ key = kzalloc(5, GFP_KERNEL);
45399+ if (key == NULL) {
45400+ kfree(cmd);
45401+ return -EINVAL;
45402+ }
45403+
45404+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45405+
45406 *state = REMOTE_NO_KEY_PRESSED;
45407
45408- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45409+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45410 if (key[4] == 0xff) {
45411 /* key repeat */
45412 st->rc_counter++;
45413@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45414 *event = d->last_event;
45415 deb_rc("repeat key, event %x\n",
45416 *event);
45417- return 0;
45418+ goto out;
45419 }
45420 }
45421 deb_rc("repeated key (non repeatable)\n");
45422 }
45423- return 0;
45424+ goto out;
45425 }
45426
45427 /* hack to pass checksum on the custom field */
45428@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45429
45430 deb_rc("key: %*ph\n", 5, key);
45431 }
45432+out:
45433+ kfree(cmd);
45434+ kfree(key);
45435 return 0;
45436 }
45437
45438diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45439index c890fe4..f9b2ae6 100644
45440--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45441+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45442@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45443 fe_status_t *status)
45444 {
45445 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45446- struct dvbt_get_status_msg result;
45447- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45448+ struct dvbt_get_status_msg *result;
45449+ u8 *cmd;
45450 int ret;
45451
45452- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45453- sizeof(result), 0);
45454+ cmd = kmalloc(1, GFP_KERNEL);
45455+ if (cmd == NULL)
45456+ return -ENOMEM;
45457+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45458+ if (result == NULL) {
45459+ kfree(cmd);
45460+ return -ENOMEM;
45461+ }
45462+
45463+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45464+
45465+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45466+ sizeof(*result), 0);
45467 if (ret < 0)
45468- return ret;
45469+ goto out;
45470
45471 *status = 0;
45472
45473- if (0xffff - le16_to_cpu(result.gain) > 30)
45474+ if (0xffff - le16_to_cpu(result->gain) > 30)
45475 *status |= FE_HAS_SIGNAL;
45476- if (result.lock_bits & (1 << 6))
45477+ if (result->lock_bits & (1 << 6))
45478 *status |= FE_HAS_LOCK;
45479- if (result.lock_bits & (1 << 5))
45480+ if (result->lock_bits & (1 << 5))
45481 *status |= FE_HAS_SYNC;
45482- if (result.lock_bits & (1 << 4))
45483+ if (result->lock_bits & (1 << 4))
45484 *status |= FE_HAS_CARRIER;
45485- if (result.lock_bits & (1 << 1))
45486+ if (result->lock_bits & (1 << 1))
45487 *status |= FE_HAS_VITERBI;
45488
45489 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45490 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45491 *status &= ~FE_HAS_LOCK;
45492
45493- return 0;
45494+out:
45495+ kfree(cmd);
45496+ kfree(result);
45497+ return ret;
45498 }
45499
45500 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45501 {
45502 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45503- struct dvbt_get_status_msg status;
45504- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45505+ struct dvbt_get_status_msg *status;
45506+ char *cmd;
45507 int ret;
45508
45509- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45510- sizeof(status), 0);
45511+ cmd = kmalloc(1, GFP_KERNEL);
45512+ if (cmd == NULL)
45513+ return -ENOMEM;
45514+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45515+ if (status == NULL) {
45516+ kfree(cmd);
45517+ return -ENOMEM;
45518+ }
45519+
45520+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45521+
45522+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45523+ sizeof(*status), 0);
45524 if (ret < 0)
45525- return ret;
45526+ goto out;
45527
45528- *ber = le32_to_cpu(status.viterbi_error_rate);
45529+ *ber = le32_to_cpu(status->viterbi_error_rate);
45530+out:
45531+ kfree(cmd);
45532+ kfree(status);
45533 return 0;
45534 }
45535
45536 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
45537 {
45538 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45539- struct dvbt_get_status_msg status;
45540- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45541+ struct dvbt_get_status_msg *status;
45542+ u8 *cmd;
45543 int ret;
45544
45545- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
45546- sizeof(status), 0);
45547+ cmd = kmalloc(1, GFP_KERNEL);
45548+ if (cmd == NULL)
45549+ return -ENOMEM;
45550+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45551+ if (status == NULL) {
45552+ kfree(cmd);
45553+ return -ENOMEM;
45554+ }
45555+
45556+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45557+
45558+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
45559+ sizeof(*status), 0);
45560 if (ret < 0) {
45561 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
45562 ret);
45563- return ret;
45564+ goto out;
45565 }
45566- *unc = le32_to_cpu(status.uncorrected_block_count);
45567- return 0;
45568+ *unc = le32_to_cpu(status->uncorrected_block_count);
45569+
45570+out:
45571+ kfree(cmd);
45572+ kfree(status);
45573+ return ret;
45574 }
45575
45576 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
45577 u16 *strength)
45578 {
45579 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45580- struct dvbt_get_status_msg status;
45581- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45582+ struct dvbt_get_status_msg *status;
45583+ char *cmd;
45584 int ret;
45585
45586- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45587- sizeof(status), 0);
45588+ cmd = kmalloc(1, GFP_KERNEL);
45589+ if (cmd == NULL)
45590+ return -ENOMEM;
45591+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45592+ if (status == NULL) {
45593+ kfree(cmd);
45594+ return -ENOMEM;
45595+ }
45596+
45597+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45598+
45599+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45600+ sizeof(*status), 0);
45601 if (ret < 0) {
45602 err("cinergyt2_fe_read_signal_strength() Failed!"
45603 " (Error=%d)\n", ret);
45604- return ret;
45605+ goto out;
45606 }
45607- *strength = (0xffff - le16_to_cpu(status.gain));
45608+ *strength = (0xffff - le16_to_cpu(status->gain));
45609+
45610+out:
45611+ kfree(cmd);
45612+ kfree(status);
45613 return 0;
45614 }
45615
45616 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
45617 {
45618 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45619- struct dvbt_get_status_msg status;
45620- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45621+ struct dvbt_get_status_msg *status;
45622+ char *cmd;
45623 int ret;
45624
45625- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45626- sizeof(status), 0);
45627+ cmd = kmalloc(1, GFP_KERNEL);
45628+ if (cmd == NULL)
45629+ return -ENOMEM;
45630+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45631+ if (status == NULL) {
45632+ kfree(cmd);
45633+ return -ENOMEM;
45634+ }
45635+
45636+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45637+
45638+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45639+ sizeof(*status), 0);
45640 if (ret < 0) {
45641 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
45642- return ret;
45643+ goto out;
45644 }
45645- *snr = (status.snr << 8) | status.snr;
45646- return 0;
45647+ *snr = (status->snr << 8) | status->snr;
45648+
45649+out:
45650+ kfree(cmd);
45651+ kfree(status);
45652+ return ret;
45653 }
45654
45655 static int cinergyt2_fe_init(struct dvb_frontend *fe)
45656@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
45657 {
45658 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
45659 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45660- struct dvbt_set_parameters_msg param;
45661- char result[2];
45662+ struct dvbt_set_parameters_msg *param;
45663+ char *result;
45664 int err;
45665
45666- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45667- param.tps = cpu_to_le16(compute_tps(fep));
45668- param.freq = cpu_to_le32(fep->frequency / 1000);
45669- param.flags = 0;
45670+ result = kmalloc(2, GFP_KERNEL);
45671+ if (result == NULL)
45672+ return -ENOMEM;
45673+ param = kmalloc(sizeof(*param), GFP_KERNEL);
45674+ if (param == NULL) {
45675+ kfree(result);
45676+ return -ENOMEM;
45677+ }
45678+
45679+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
45680+ param->tps = cpu_to_le16(compute_tps(fep));
45681+ param->freq = cpu_to_le32(fep->frequency / 1000);
45682+ param->flags = 0;
45683
45684 switch (fep->bandwidth_hz) {
45685 default:
45686 case 8000000:
45687- param.bandwidth = 8;
45688+ param->bandwidth = 8;
45689 break;
45690 case 7000000:
45691- param.bandwidth = 7;
45692+ param->bandwidth = 7;
45693 break;
45694 case 6000000:
45695- param.bandwidth = 6;
45696+ param->bandwidth = 6;
45697 break;
45698 }
45699
45700 err = dvb_usb_generic_rw(state->d,
45701- (char *)&param, sizeof(param),
45702- result, sizeof(result), 0);
45703+ (char *)param, sizeof(*param),
45704+ result, 2, 0);
45705 if (err < 0)
45706 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
45707
45708- return (err < 0) ? err : 0;
45709+ kfree(result);
45710+ kfree(param);
45711+ return err;
45712 }
45713
45714 static void cinergyt2_fe_release(struct dvb_frontend *fe)
45715diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45716index 733a7ff..f8b52e3 100644
45717--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45718+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
45719@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
45720
45721 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
45722 {
45723- struct hexline hx;
45724- u8 reset;
45725+ struct hexline *hx;
45726+ u8 *reset;
45727 int ret,pos=0;
45728
45729+ reset = kmalloc(1, GFP_KERNEL);
45730+ if (reset == NULL)
45731+ return -ENOMEM;
45732+
45733+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
45734+ if (hx == NULL) {
45735+ kfree(reset);
45736+ return -ENOMEM;
45737+ }
45738+
45739 /* stop the CPU */
45740- reset = 1;
45741- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
45742+ reset[0] = 1;
45743+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
45744 err("could not stop the USB controller CPU.");
45745
45746- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
45747- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
45748- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
45749+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
45750+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
45751+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
45752
45753- if (ret != hx.len) {
45754+ if (ret != hx->len) {
45755 err("error while transferring firmware "
45756 "(transferred size: %d, block size: %d)",
45757- ret,hx.len);
45758+ ret,hx->len);
45759 ret = -EINVAL;
45760 break;
45761 }
45762 }
45763 if (ret < 0) {
45764 err("firmware download failed at %d with %d",pos,ret);
45765+ kfree(reset);
45766+ kfree(hx);
45767 return ret;
45768 }
45769
45770 if (ret == 0) {
45771 /* restart the CPU */
45772- reset = 0;
45773- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
45774+ reset[0] = 0;
45775+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
45776 err("could not restart the USB controller CPU.");
45777 ret = -EINVAL;
45778 }
45779 } else
45780 ret = -EIO;
45781
45782+ kfree(reset);
45783+ kfree(hx);
45784+
45785 return ret;
45786 }
45787 EXPORT_SYMBOL(usb_cypress_load_firmware);
45788diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
45789index 1a3df10..57997a5 100644
45790--- a/drivers/media/usb/dvb-usb/dw2102.c
45791+++ b/drivers/media/usb/dvb-usb/dw2102.c
45792@@ -118,7 +118,7 @@ struct su3000_state {
45793
45794 struct s6x0_state {
45795 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
45796-};
45797+} __no_const;
45798
45799 /* debug */
45800 static int dvb_usb_dw2102_debug;
45801diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
45802index 5801ae7..83f71fa 100644
45803--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
45804+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
45805@@ -87,8 +87,11 @@ struct technisat_usb2_state {
45806 static int technisat_usb2_i2c_access(struct usb_device *udev,
45807 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
45808 {
45809- u8 b[64];
45810- int ret, actual_length;
45811+ u8 *b = kmalloc(64, GFP_KERNEL);
45812+ int ret, actual_length, error = 0;
45813+
45814+ if (b == NULL)
45815+ return -ENOMEM;
45816
45817 deb_i2c("i2c-access: %02x, tx: ", device_addr);
45818 debug_dump(tx, txlen, deb_i2c);
45819@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45820
45821 if (ret < 0) {
45822 err("i2c-error: out failed %02x = %d", device_addr, ret);
45823- return -ENODEV;
45824+ error = -ENODEV;
45825+ goto out;
45826 }
45827
45828 ret = usb_bulk_msg(udev,
45829@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45830 b, 64, &actual_length, 1000);
45831 if (ret < 0) {
45832 err("i2c-error: in failed %02x = %d", device_addr, ret);
45833- return -ENODEV;
45834+ error = -ENODEV;
45835+ goto out;
45836 }
45837
45838 if (b[0] != I2C_STATUS_OK) {
45839@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45840 /* handle tuner-i2c-nak */
45841 if (!(b[0] == I2C_STATUS_NAK &&
45842 device_addr == 0x60
45843- /* && device_is_technisat_usb2 */))
45844- return -ENODEV;
45845+ /* && device_is_technisat_usb2 */)) {
45846+ error = -ENODEV;
45847+ goto out;
45848+ }
45849 }
45850
45851 deb_i2c("status: %d, ", b[0]);
45852@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
45853
45854 deb_i2c("\n");
45855
45856- return 0;
45857+out:
45858+ kfree(b);
45859+ return error;
45860 }
45861
45862 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
45863@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45864 {
45865 int ret;
45866
45867- u8 led[8] = {
45868- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45869- 0
45870- };
45871+ u8 *led = kzalloc(8, GFP_KERNEL);
45872+
45873+ if (led == NULL)
45874+ return -ENOMEM;
45875
45876 if (disable_led_control && state != TECH_LED_OFF)
45877 return 0;
45878
45879+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
45880+
45881 switch (state) {
45882 case TECH_LED_ON:
45883 led[1] = 0x82;
45884@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
45885 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
45886 USB_TYPE_VENDOR | USB_DIR_OUT,
45887 0, 0,
45888- led, sizeof(led), 500);
45889+ led, 8, 500);
45890
45891 mutex_unlock(&d->i2c_mutex);
45892+
45893+ kfree(led);
45894+
45895 return ret;
45896 }
45897
45898 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
45899 {
45900 int ret;
45901- u8 b = 0;
45902+ u8 *b = kzalloc(1, GFP_KERNEL);
45903+
45904+ if (b == NULL)
45905+ return -ENOMEM;
45906
45907 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
45908 return -EAGAIN;
45909@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
45910 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
45911 USB_TYPE_VENDOR | USB_DIR_OUT,
45912 (red << 8) | green, 0,
45913- &b, 1, 500);
45914+ b, 1, 500);
45915
45916 mutex_unlock(&d->i2c_mutex);
45917
45918+ kfree(b);
45919+
45920 return ret;
45921 }
45922
45923@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45924 struct dvb_usb_device_description **desc, int *cold)
45925 {
45926 int ret;
45927- u8 version[3];
45928+ u8 *version = kmalloc(3, GFP_KERNEL);
45929
45930 /* first select the interface */
45931 if (usb_set_interface(udev, 0, 1) != 0)
45932@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45933
45934 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
45935
45936+ if (version == NULL)
45937+ return 0;
45938+
45939 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
45940 GET_VERSION_INFO_VENDOR_REQUEST,
45941 USB_TYPE_VENDOR | USB_DIR_IN,
45942 0, 0,
45943- version, sizeof(version), 500);
45944+ version, 3, 500);
45945
45946 if (ret < 0)
45947 *cold = 1;
45948@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
45949 *cold = 0;
45950 }
45951
45952+ kfree(version);
45953+
45954 return 0;
45955 }
45956
45957@@ -594,10 +618,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
45958
45959 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45960 {
45961- u8 buf[62], *b;
45962+ u8 *buf, *b;
45963 int ret;
45964 struct ir_raw_event ev;
45965
45966+ buf = kmalloc(62, GFP_KERNEL);
45967+
45968+ if (buf == NULL)
45969+ return -ENOMEM;
45970+
45971 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
45972 buf[1] = 0x08;
45973 buf[2] = 0x8f;
45974@@ -620,16 +649,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
45975 GET_IR_DATA_VENDOR_REQUEST,
45976 USB_TYPE_VENDOR | USB_DIR_IN,
45977 0x8080, 0,
45978- buf, sizeof(buf), 500);
45979+ buf, 62, 500);
45980
45981 unlock:
45982 mutex_unlock(&d->i2c_mutex);
45983
45984- if (ret < 0)
45985+ if (ret < 0) {
45986+ kfree(buf);
45987 return ret;
45988+ }
45989
45990- if (ret == 1)
45991+ if (ret == 1) {
45992+ kfree(buf);
45993 return 0; /* no key pressed */
45994+ }
45995
45996 /* decoding */
45997 b = buf+1;
45998@@ -656,6 +689,8 @@ unlock:
45999
46000 ir_raw_event_handle(d->rc_dev);
46001
46002+ kfree(buf);
46003+
46004 return 1;
46005 }
46006
46007diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46008index af63543..0436f20 100644
46009--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46010+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46011@@ -429,7 +429,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46012 * by passing a very big num_planes value */
46013 uplane = compat_alloc_user_space(num_planes *
46014 sizeof(struct v4l2_plane));
46015- kp->m.planes = (__force struct v4l2_plane *)uplane;
46016+ kp->m.planes = (__force_kernel struct v4l2_plane *)uplane;
46017
46018 while (--num_planes >= 0) {
46019 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46020@@ -500,7 +500,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46021 if (num_planes == 0)
46022 return 0;
46023
46024- uplane = (__force struct v4l2_plane __user *)kp->m.planes;
46025+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46026 if (get_user(p, &up->m.planes))
46027 return -EFAULT;
46028 uplane32 = compat_ptr(p);
46029@@ -564,7 +564,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46030 get_user(kp->flags, &up->flags) ||
46031 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46032 return -EFAULT;
46033- kp->base = (__force void *)compat_ptr(tmp);
46034+ kp->base = (__force_kernel void *)compat_ptr(tmp);
46035 return 0;
46036 }
46037
46038@@ -669,7 +669,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46039 n * sizeof(struct v4l2_ext_control32)))
46040 return -EFAULT;
46041 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46042- kp->controls = (__force struct v4l2_ext_control *)kcontrols;
46043+ kp->controls = (__force_kernel struct v4l2_ext_control *)kcontrols;
46044 while (--n >= 0) {
46045 u32 id;
46046
46047@@ -696,7 +696,7 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46048 {
46049 struct v4l2_ext_control32 __user *ucontrols;
46050 struct v4l2_ext_control __user *kcontrols =
46051- (__force struct v4l2_ext_control __user *)kp->controls;
46052+ (struct v4l2_ext_control __force_user *)kp->controls;
46053 int n = kp->count;
46054 compat_caddr_t p;
46055
46056@@ -780,7 +780,7 @@ static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46057 get_user(tmp, &up->edid) ||
46058 copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46059 return -EFAULT;
46060- kp->edid = (__force u8 *)compat_ptr(tmp);
46061+ kp->edid = (__force_kernel u8 *)compat_ptr(tmp);
46062 return 0;
46063 }
46064
46065diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46066index 015f92a..59e311e 100644
46067--- a/drivers/media/v4l2-core/v4l2-device.c
46068+++ b/drivers/media/v4l2-core/v4l2-device.c
46069@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46070 EXPORT_SYMBOL_GPL(v4l2_device_put);
46071
46072 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46073- atomic_t *instance)
46074+ atomic_unchecked_t *instance)
46075 {
46076- int num = atomic_inc_return(instance) - 1;
46077+ int num = atomic_inc_return_unchecked(instance) - 1;
46078 int len = strlen(basename);
46079
46080 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46081diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46082index faac2f4..e39dcd9 100644
46083--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46084+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46085@@ -2151,7 +2151,8 @@ struct v4l2_ioctl_info {
46086 struct file *file, void *fh, void *p);
46087 } u;
46088 void (*debug)(const void *arg, bool write_only);
46089-};
46090+} __do_const;
46091+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46092
46093 /* This control needs a priority check */
46094 #define INFO_FL_PRIO (1 << 0)
46095@@ -2335,7 +2336,7 @@ static long __video_do_ioctl(struct file *file,
46096 struct video_device *vfd = video_devdata(file);
46097 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46098 bool write_only = false;
46099- struct v4l2_ioctl_info default_info;
46100+ v4l2_ioctl_info_no_const default_info;
46101 const struct v4l2_ioctl_info *info;
46102 void *fh = file->private_data;
46103 struct v4l2_fh *vfh = NULL;
46104@@ -2422,7 +2423,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46105 ret = -EINVAL;
46106 break;
46107 }
46108- *user_ptr = (void __user *)buf->m.planes;
46109+ *user_ptr = (void __force_user *)buf->m.planes;
46110 *kernel_ptr = (void **)&buf->m.planes;
46111 *array_size = sizeof(struct v4l2_plane) * buf->length;
46112 ret = 1;
46113@@ -2439,7 +2440,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46114 ret = -EINVAL;
46115 break;
46116 }
46117- *user_ptr = (void __user *)edid->edid;
46118+ *user_ptr = (void __force_user *)edid->edid;
46119 *kernel_ptr = (void **)&edid->edid;
46120 *array_size = edid->blocks * 128;
46121 ret = 1;
46122@@ -2457,7 +2458,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46123 ret = -EINVAL;
46124 break;
46125 }
46126- *user_ptr = (void __user *)ctrls->controls;
46127+ *user_ptr = (void __force_user *)ctrls->controls;
46128 *kernel_ptr = (void **)&ctrls->controls;
46129 *array_size = sizeof(struct v4l2_ext_control)
46130 * ctrls->count;
46131@@ -2558,7 +2559,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46132 }
46133
46134 if (has_array_args) {
46135- *kernel_ptr = (void __force *)user_ptr;
46136+ *kernel_ptr = (void __force_kernel *)user_ptr;
46137 if (copy_to_user(user_ptr, mbuf, array_size))
46138 err = -EFAULT;
46139 goto out_array_args;
46140diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
46141index 24696f5..3637780 100644
46142--- a/drivers/memory/omap-gpmc.c
46143+++ b/drivers/memory/omap-gpmc.c
46144@@ -211,7 +211,6 @@ struct omap3_gpmc_regs {
46145 };
46146
46147 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
46148-static struct irq_chip gpmc_irq_chip;
46149 static int gpmc_irq_start;
46150
46151 static struct resource gpmc_mem_root;
46152@@ -939,6 +938,17 @@ static void gpmc_irq_noop(struct irq_data *data) { }
46153
46154 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
46155
46156+static struct irq_chip gpmc_irq_chip = {
46157+ .name = "gpmc",
46158+ .irq_startup = gpmc_irq_noop_ret,
46159+ .irq_enable = gpmc_irq_enable,
46160+ .irq_disable = gpmc_irq_disable,
46161+ .irq_shutdown = gpmc_irq_noop,
46162+ .irq_ack = gpmc_irq_noop,
46163+ .irq_mask = gpmc_irq_noop,
46164+ .irq_unmask = gpmc_irq_noop,
46165+};
46166+
46167 static int gpmc_setup_irq(void)
46168 {
46169 int i;
46170@@ -953,15 +963,6 @@ static int gpmc_setup_irq(void)
46171 return gpmc_irq_start;
46172 }
46173
46174- gpmc_irq_chip.name = "gpmc";
46175- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
46176- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
46177- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
46178- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
46179- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
46180- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
46181- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
46182-
46183 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
46184 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
46185
46186diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46187index 187f836..679544b 100644
46188--- a/drivers/message/fusion/mptbase.c
46189+++ b/drivers/message/fusion/mptbase.c
46190@@ -6746,8 +6746,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46191 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46192 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46193
46194+#ifdef CONFIG_GRKERNSEC_HIDESYM
46195+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46196+#else
46197 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46198 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46199+#endif
46200+
46201 /*
46202 * Rounding UP to nearest 4-kB boundary here...
46203 */
46204@@ -6760,7 +6765,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46205 ioc->facts.GlobalCredits);
46206
46207 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46208+#ifdef CONFIG_GRKERNSEC_HIDESYM
46209+ NULL, NULL);
46210+#else
46211 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46212+#endif
46213 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46214 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46215 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46216diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46217index 5bdaae1..eced16f 100644
46218--- a/drivers/message/fusion/mptsas.c
46219+++ b/drivers/message/fusion/mptsas.c
46220@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46221 return 0;
46222 }
46223
46224+static inline void
46225+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46226+{
46227+ if (phy_info->port_details) {
46228+ phy_info->port_details->rphy = rphy;
46229+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46230+ ioc->name, rphy));
46231+ }
46232+
46233+ if (rphy) {
46234+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46235+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46236+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46237+ ioc->name, rphy, rphy->dev.release));
46238+ }
46239+}
46240+
46241 /* no mutex */
46242 static void
46243 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46244@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46245 return NULL;
46246 }
46247
46248-static inline void
46249-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46250-{
46251- if (phy_info->port_details) {
46252- phy_info->port_details->rphy = rphy;
46253- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46254- ioc->name, rphy));
46255- }
46256-
46257- if (rphy) {
46258- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46259- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46260- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46261- ioc->name, rphy, rphy->dev.release));
46262- }
46263-}
46264-
46265 static inline struct sas_port *
46266 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46267 {
46268diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46269index b7d87cd..3fb36da 100644
46270--- a/drivers/message/i2o/i2o_proc.c
46271+++ b/drivers/message/i2o/i2o_proc.c
46272@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46273 "Array Controller Device"
46274 };
46275
46276-static char *chtostr(char *tmp, u8 *chars, int n)
46277-{
46278- tmp[0] = 0;
46279- return strncat(tmp, (char *)chars, n);
46280-}
46281-
46282 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46283 char *group)
46284 {
46285@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46286 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46287 {
46288 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46289- static u32 work32[5];
46290- static u8 *work8 = (u8 *) work32;
46291- static u16 *work16 = (u16 *) work32;
46292+ u32 work32[5];
46293+ u8 *work8 = (u8 *) work32;
46294+ u16 *work16 = (u16 *) work32;
46295 int token;
46296 u32 hwcap;
46297
46298@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46299 } *result;
46300
46301 i2o_exec_execute_ddm_table ddm_table;
46302- char tmp[28 + 1];
46303
46304 result = kmalloc(sizeof(*result), GFP_KERNEL);
46305 if (!result)
46306@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46307
46308 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46309 seq_printf(seq, "%-#8x", ddm_table.module_id);
46310- seq_printf(seq, "%-29s",
46311- chtostr(tmp, ddm_table.module_name_version, 28));
46312+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46313 seq_printf(seq, "%9d ", ddm_table.data_size);
46314 seq_printf(seq, "%8d", ddm_table.code_size);
46315
46316@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46317
46318 i2o_driver_result_table *result;
46319 i2o_driver_store_table *dst;
46320- char tmp[28 + 1];
46321
46322 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46323 if (result == NULL)
46324@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46325
46326 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46327 seq_printf(seq, "%-#8x", dst->module_id);
46328- seq_printf(seq, "%-29s",
46329- chtostr(tmp, dst->module_name_version, 28));
46330- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46331+ seq_printf(seq, "%-.28s", dst->module_name_version);
46332+ seq_printf(seq, "%-.8s", dst->date);
46333 seq_printf(seq, "%8d ", dst->module_size);
46334 seq_printf(seq, "%8d ", dst->mpb_size);
46335 seq_printf(seq, "0x%04x", dst->module_flags);
46336@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46337 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46338 {
46339 struct i2o_device *d = (struct i2o_device *)seq->private;
46340- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46341+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46342 // == (allow) 512d bytes (max)
46343- static u16 *work16 = (u16 *) work32;
46344+ u16 *work16 = (u16 *) work32;
46345 int token;
46346- char tmp[16 + 1];
46347
46348 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46349
46350@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46351 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46352 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46353 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46354- seq_printf(seq, "Vendor info : %s\n",
46355- chtostr(tmp, (u8 *) (work32 + 2), 16));
46356- seq_printf(seq, "Product info : %s\n",
46357- chtostr(tmp, (u8 *) (work32 + 6), 16));
46358- seq_printf(seq, "Description : %s\n",
46359- chtostr(tmp, (u8 *) (work32 + 10), 16));
46360- seq_printf(seq, "Product rev. : %s\n",
46361- chtostr(tmp, (u8 *) (work32 + 14), 8));
46362+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46363+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46364+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46365+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46366
46367 seq_printf(seq, "Serial number : ");
46368 print_serial_number(seq, (u8 *) (work32 + 16),
46369@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46370 u8 pad[256]; // allow up to 256 byte (max) serial number
46371 } result;
46372
46373- char tmp[24 + 1];
46374-
46375 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46376
46377 if (token < 0) {
46378@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46379 }
46380
46381 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46382- seq_printf(seq, "Module name : %s\n",
46383- chtostr(tmp, result.module_name, 24));
46384- seq_printf(seq, "Module revision : %s\n",
46385- chtostr(tmp, result.module_rev, 8));
46386+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46387+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46388
46389 seq_printf(seq, "Serial number : ");
46390 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46391@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46392 u8 instance_number[4];
46393 } result;
46394
46395- char tmp[64 + 1];
46396-
46397 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46398
46399 if (token < 0) {
46400@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46401 return 0;
46402 }
46403
46404- seq_printf(seq, "Device name : %s\n",
46405- chtostr(tmp, result.device_name, 64));
46406- seq_printf(seq, "Service name : %s\n",
46407- chtostr(tmp, result.service_name, 64));
46408- seq_printf(seq, "Physical name : %s\n",
46409- chtostr(tmp, result.physical_location, 64));
46410- seq_printf(seq, "Instance number : %s\n",
46411- chtostr(tmp, result.instance_number, 4));
46412+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46413+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46414+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46415+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46416
46417 return 0;
46418 }
46419@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46420 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46421 {
46422 struct i2o_device *d = (struct i2o_device *)seq->private;
46423- static u32 work32[12];
46424- static u16 *work16 = (u16 *) work32;
46425- static u8 *work8 = (u8 *) work32;
46426+ u32 work32[12];
46427+ u16 *work16 = (u16 *) work32;
46428+ u8 *work8 = (u8 *) work32;
46429 int token;
46430
46431 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46432diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46433index 92752fb..a7494f6 100644
46434--- a/drivers/message/i2o/iop.c
46435+++ b/drivers/message/i2o/iop.c
46436@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46437
46438 spin_lock_irqsave(&c->context_list_lock, flags);
46439
46440- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46441- atomic_inc(&c->context_list_counter);
46442+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46443+ atomic_inc_unchecked(&c->context_list_counter);
46444
46445- entry->context = atomic_read(&c->context_list_counter);
46446+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46447
46448 list_add(&entry->list, &c->context_list);
46449
46450@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46451
46452 #if BITS_PER_LONG == 64
46453 spin_lock_init(&c->context_list_lock);
46454- atomic_set(&c->context_list_counter, 0);
46455+ atomic_set_unchecked(&c->context_list_counter, 0);
46456 INIT_LIST_HEAD(&c->context_list);
46457 #endif
46458
46459diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46460index 9a8e185..27ff17d 100644
46461--- a/drivers/mfd/ab8500-debugfs.c
46462+++ b/drivers/mfd/ab8500-debugfs.c
46463@@ -100,7 +100,7 @@ static int irq_last;
46464 static u32 *irq_count;
46465 static int num_irqs;
46466
46467-static struct device_attribute **dev_attr;
46468+static device_attribute_no_const **dev_attr;
46469 static char **event_name;
46470
46471 static u8 avg_sample = SAMPLE_16;
46472diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46473index c880c89..45a7c68 100644
46474--- a/drivers/mfd/max8925-i2c.c
46475+++ b/drivers/mfd/max8925-i2c.c
46476@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46477 const struct i2c_device_id *id)
46478 {
46479 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46480- static struct max8925_chip *chip;
46481+ struct max8925_chip *chip;
46482 struct device_node *node = client->dev.of_node;
46483
46484 if (node && !pdata) {
46485diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46486index 7612d89..70549c2 100644
46487--- a/drivers/mfd/tps65910.c
46488+++ b/drivers/mfd/tps65910.c
46489@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46490 struct tps65910_platform_data *pdata)
46491 {
46492 int ret = 0;
46493- static struct regmap_irq_chip *tps6591x_irqs_chip;
46494+ struct regmap_irq_chip *tps6591x_irqs_chip;
46495
46496 if (!irq) {
46497 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46498diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46499index 1b772ef..01e77d33 100644
46500--- a/drivers/mfd/twl4030-irq.c
46501+++ b/drivers/mfd/twl4030-irq.c
46502@@ -34,6 +34,7 @@
46503 #include <linux/of.h>
46504 #include <linux/irqdomain.h>
46505 #include <linux/i2c/twl.h>
46506+#include <asm/pgtable.h>
46507
46508 #include "twl-core.h"
46509
46510@@ -729,10 +730,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46511 * Install an irq handler for each of the SIH modules;
46512 * clone dummy irq_chip since PIH can't *do* anything
46513 */
46514- twl4030_irq_chip = dummy_irq_chip;
46515- twl4030_irq_chip.name = "twl4030";
46516+ pax_open_kernel();
46517+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46518+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46519
46520- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46521+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46522+ pax_close_kernel();
46523
46524 for (i = irq_base; i < irq_end; i++) {
46525 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46526diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46527index 464419b..64bae8d 100644
46528--- a/drivers/misc/c2port/core.c
46529+++ b/drivers/misc/c2port/core.c
46530@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46531 goto error_idr_alloc;
46532 c2dev->id = ret;
46533
46534- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46535+ pax_open_kernel();
46536+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46537+ pax_close_kernel();
46538
46539 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46540 "c2port%d", c2dev->id);
46541diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46542index 8385177..2f54635 100644
46543--- a/drivers/misc/eeprom/sunxi_sid.c
46544+++ b/drivers/misc/eeprom/sunxi_sid.c
46545@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46546
46547 platform_set_drvdata(pdev, sid_data);
46548
46549- sid_bin_attr.size = sid_data->keysize;
46550+ pax_open_kernel();
46551+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46552+ pax_close_kernel();
46553 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46554 return -ENODEV;
46555
46556diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46557index 36f5d52..32311c3 100644
46558--- a/drivers/misc/kgdbts.c
46559+++ b/drivers/misc/kgdbts.c
46560@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46561 char before[BREAK_INSTR_SIZE];
46562 char after[BREAK_INSTR_SIZE];
46563
46564- probe_kernel_read(before, (char *)kgdbts_break_test,
46565+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
46566 BREAK_INSTR_SIZE);
46567 init_simple_test();
46568 ts.tst = plant_and_detach_test;
46569@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
46570 /* Activate test with initial breakpoint */
46571 if (!is_early)
46572 kgdb_breakpoint();
46573- probe_kernel_read(after, (char *)kgdbts_break_test,
46574+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
46575 BREAK_INSTR_SIZE);
46576 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
46577 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
46578diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
46579index 3ef4627..8d00486 100644
46580--- a/drivers/misc/lis3lv02d/lis3lv02d.c
46581+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
46582@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
46583 * the lid is closed. This leads to interrupts as soon as a little move
46584 * is done.
46585 */
46586- atomic_inc(&lis3->count);
46587+ atomic_inc_unchecked(&lis3->count);
46588
46589 wake_up_interruptible(&lis3->misc_wait);
46590 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
46591@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
46592 if (lis3->pm_dev)
46593 pm_runtime_get_sync(lis3->pm_dev);
46594
46595- atomic_set(&lis3->count, 0);
46596+ atomic_set_unchecked(&lis3->count, 0);
46597 return 0;
46598 }
46599
46600@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
46601 add_wait_queue(&lis3->misc_wait, &wait);
46602 while (true) {
46603 set_current_state(TASK_INTERRUPTIBLE);
46604- data = atomic_xchg(&lis3->count, 0);
46605+ data = atomic_xchg_unchecked(&lis3->count, 0);
46606 if (data)
46607 break;
46608
46609@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
46610 struct lis3lv02d, miscdev);
46611
46612 poll_wait(file, &lis3->misc_wait, wait);
46613- if (atomic_read(&lis3->count))
46614+ if (atomic_read_unchecked(&lis3->count))
46615 return POLLIN | POLLRDNORM;
46616 return 0;
46617 }
46618diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
46619index c439c82..1f20f57 100644
46620--- a/drivers/misc/lis3lv02d/lis3lv02d.h
46621+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
46622@@ -297,7 +297,7 @@ struct lis3lv02d {
46623 struct input_polled_dev *idev; /* input device */
46624 struct platform_device *pdev; /* platform device */
46625 struct regulator_bulk_data regulators[2];
46626- atomic_t count; /* interrupt count after last read */
46627+ atomic_unchecked_t count; /* interrupt count after last read */
46628 union axis_conversion ac; /* hw -> logical axis */
46629 int mapped_btns[3];
46630
46631diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
46632index 2f30bad..c4c13d0 100644
46633--- a/drivers/misc/sgi-gru/gruhandles.c
46634+++ b/drivers/misc/sgi-gru/gruhandles.c
46635@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
46636 unsigned long nsec;
46637
46638 nsec = CLKS2NSEC(clks);
46639- atomic_long_inc(&mcs_op_statistics[op].count);
46640- atomic_long_add(nsec, &mcs_op_statistics[op].total);
46641+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
46642+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
46643 if (mcs_op_statistics[op].max < nsec)
46644 mcs_op_statistics[op].max = nsec;
46645 }
46646diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
46647index 4f76359..cdfcb2e 100644
46648--- a/drivers/misc/sgi-gru/gruprocfs.c
46649+++ b/drivers/misc/sgi-gru/gruprocfs.c
46650@@ -32,9 +32,9 @@
46651
46652 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
46653
46654-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
46655+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
46656 {
46657- unsigned long val = atomic_long_read(v);
46658+ unsigned long val = atomic_long_read_unchecked(v);
46659
46660 seq_printf(s, "%16lu %s\n", val, id);
46661 }
46662@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
46663
46664 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
46665 for (op = 0; op < mcsop_last; op++) {
46666- count = atomic_long_read(&mcs_op_statistics[op].count);
46667- total = atomic_long_read(&mcs_op_statistics[op].total);
46668+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
46669+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
46670 max = mcs_op_statistics[op].max;
46671 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
46672 count ? total / count : 0, max);
46673diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
46674index 5c3ce24..4915ccb 100644
46675--- a/drivers/misc/sgi-gru/grutables.h
46676+++ b/drivers/misc/sgi-gru/grutables.h
46677@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
46678 * GRU statistics.
46679 */
46680 struct gru_stats_s {
46681- atomic_long_t vdata_alloc;
46682- atomic_long_t vdata_free;
46683- atomic_long_t gts_alloc;
46684- atomic_long_t gts_free;
46685- atomic_long_t gms_alloc;
46686- atomic_long_t gms_free;
46687- atomic_long_t gts_double_allocate;
46688- atomic_long_t assign_context;
46689- atomic_long_t assign_context_failed;
46690- atomic_long_t free_context;
46691- atomic_long_t load_user_context;
46692- atomic_long_t load_kernel_context;
46693- atomic_long_t lock_kernel_context;
46694- atomic_long_t unlock_kernel_context;
46695- atomic_long_t steal_user_context;
46696- atomic_long_t steal_kernel_context;
46697- atomic_long_t steal_context_failed;
46698- atomic_long_t nopfn;
46699- atomic_long_t asid_new;
46700- atomic_long_t asid_next;
46701- atomic_long_t asid_wrap;
46702- atomic_long_t asid_reuse;
46703- atomic_long_t intr;
46704- atomic_long_t intr_cbr;
46705- atomic_long_t intr_tfh;
46706- atomic_long_t intr_spurious;
46707- atomic_long_t intr_mm_lock_failed;
46708- atomic_long_t call_os;
46709- atomic_long_t call_os_wait_queue;
46710- atomic_long_t user_flush_tlb;
46711- atomic_long_t user_unload_context;
46712- atomic_long_t user_exception;
46713- atomic_long_t set_context_option;
46714- atomic_long_t check_context_retarget_intr;
46715- atomic_long_t check_context_unload;
46716- atomic_long_t tlb_dropin;
46717- atomic_long_t tlb_preload_page;
46718- atomic_long_t tlb_dropin_fail_no_asid;
46719- atomic_long_t tlb_dropin_fail_upm;
46720- atomic_long_t tlb_dropin_fail_invalid;
46721- atomic_long_t tlb_dropin_fail_range_active;
46722- atomic_long_t tlb_dropin_fail_idle;
46723- atomic_long_t tlb_dropin_fail_fmm;
46724- atomic_long_t tlb_dropin_fail_no_exception;
46725- atomic_long_t tfh_stale_on_fault;
46726- atomic_long_t mmu_invalidate_range;
46727- atomic_long_t mmu_invalidate_page;
46728- atomic_long_t flush_tlb;
46729- atomic_long_t flush_tlb_gru;
46730- atomic_long_t flush_tlb_gru_tgh;
46731- atomic_long_t flush_tlb_gru_zero_asid;
46732+ atomic_long_unchecked_t vdata_alloc;
46733+ atomic_long_unchecked_t vdata_free;
46734+ atomic_long_unchecked_t gts_alloc;
46735+ atomic_long_unchecked_t gts_free;
46736+ atomic_long_unchecked_t gms_alloc;
46737+ atomic_long_unchecked_t gms_free;
46738+ atomic_long_unchecked_t gts_double_allocate;
46739+ atomic_long_unchecked_t assign_context;
46740+ atomic_long_unchecked_t assign_context_failed;
46741+ atomic_long_unchecked_t free_context;
46742+ atomic_long_unchecked_t load_user_context;
46743+ atomic_long_unchecked_t load_kernel_context;
46744+ atomic_long_unchecked_t lock_kernel_context;
46745+ atomic_long_unchecked_t unlock_kernel_context;
46746+ atomic_long_unchecked_t steal_user_context;
46747+ atomic_long_unchecked_t steal_kernel_context;
46748+ atomic_long_unchecked_t steal_context_failed;
46749+ atomic_long_unchecked_t nopfn;
46750+ atomic_long_unchecked_t asid_new;
46751+ atomic_long_unchecked_t asid_next;
46752+ atomic_long_unchecked_t asid_wrap;
46753+ atomic_long_unchecked_t asid_reuse;
46754+ atomic_long_unchecked_t intr;
46755+ atomic_long_unchecked_t intr_cbr;
46756+ atomic_long_unchecked_t intr_tfh;
46757+ atomic_long_unchecked_t intr_spurious;
46758+ atomic_long_unchecked_t intr_mm_lock_failed;
46759+ atomic_long_unchecked_t call_os;
46760+ atomic_long_unchecked_t call_os_wait_queue;
46761+ atomic_long_unchecked_t user_flush_tlb;
46762+ atomic_long_unchecked_t user_unload_context;
46763+ atomic_long_unchecked_t user_exception;
46764+ atomic_long_unchecked_t set_context_option;
46765+ atomic_long_unchecked_t check_context_retarget_intr;
46766+ atomic_long_unchecked_t check_context_unload;
46767+ atomic_long_unchecked_t tlb_dropin;
46768+ atomic_long_unchecked_t tlb_preload_page;
46769+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
46770+ atomic_long_unchecked_t tlb_dropin_fail_upm;
46771+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
46772+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
46773+ atomic_long_unchecked_t tlb_dropin_fail_idle;
46774+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
46775+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
46776+ atomic_long_unchecked_t tfh_stale_on_fault;
46777+ atomic_long_unchecked_t mmu_invalidate_range;
46778+ atomic_long_unchecked_t mmu_invalidate_page;
46779+ atomic_long_unchecked_t flush_tlb;
46780+ atomic_long_unchecked_t flush_tlb_gru;
46781+ atomic_long_unchecked_t flush_tlb_gru_tgh;
46782+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
46783
46784- atomic_long_t copy_gpa;
46785- atomic_long_t read_gpa;
46786+ atomic_long_unchecked_t copy_gpa;
46787+ atomic_long_unchecked_t read_gpa;
46788
46789- atomic_long_t mesq_receive;
46790- atomic_long_t mesq_receive_none;
46791- atomic_long_t mesq_send;
46792- atomic_long_t mesq_send_failed;
46793- atomic_long_t mesq_noop;
46794- atomic_long_t mesq_send_unexpected_error;
46795- atomic_long_t mesq_send_lb_overflow;
46796- atomic_long_t mesq_send_qlimit_reached;
46797- atomic_long_t mesq_send_amo_nacked;
46798- atomic_long_t mesq_send_put_nacked;
46799- atomic_long_t mesq_page_overflow;
46800- atomic_long_t mesq_qf_locked;
46801- atomic_long_t mesq_qf_noop_not_full;
46802- atomic_long_t mesq_qf_switch_head_failed;
46803- atomic_long_t mesq_qf_unexpected_error;
46804- atomic_long_t mesq_noop_unexpected_error;
46805- atomic_long_t mesq_noop_lb_overflow;
46806- atomic_long_t mesq_noop_qlimit_reached;
46807- atomic_long_t mesq_noop_amo_nacked;
46808- atomic_long_t mesq_noop_put_nacked;
46809- atomic_long_t mesq_noop_page_overflow;
46810+ atomic_long_unchecked_t mesq_receive;
46811+ atomic_long_unchecked_t mesq_receive_none;
46812+ atomic_long_unchecked_t mesq_send;
46813+ atomic_long_unchecked_t mesq_send_failed;
46814+ atomic_long_unchecked_t mesq_noop;
46815+ atomic_long_unchecked_t mesq_send_unexpected_error;
46816+ atomic_long_unchecked_t mesq_send_lb_overflow;
46817+ atomic_long_unchecked_t mesq_send_qlimit_reached;
46818+ atomic_long_unchecked_t mesq_send_amo_nacked;
46819+ atomic_long_unchecked_t mesq_send_put_nacked;
46820+ atomic_long_unchecked_t mesq_page_overflow;
46821+ atomic_long_unchecked_t mesq_qf_locked;
46822+ atomic_long_unchecked_t mesq_qf_noop_not_full;
46823+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
46824+ atomic_long_unchecked_t mesq_qf_unexpected_error;
46825+ atomic_long_unchecked_t mesq_noop_unexpected_error;
46826+ atomic_long_unchecked_t mesq_noop_lb_overflow;
46827+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
46828+ atomic_long_unchecked_t mesq_noop_amo_nacked;
46829+ atomic_long_unchecked_t mesq_noop_put_nacked;
46830+ atomic_long_unchecked_t mesq_noop_page_overflow;
46831
46832 };
46833
46834@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
46835 tghop_invalidate, mcsop_last};
46836
46837 struct mcs_op_statistic {
46838- atomic_long_t count;
46839- atomic_long_t total;
46840+ atomic_long_unchecked_t count;
46841+ atomic_long_unchecked_t total;
46842 unsigned long max;
46843 };
46844
46845@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
46846
46847 #define STAT(id) do { \
46848 if (gru_options & OPT_STATS) \
46849- atomic_long_inc(&gru_stats.id); \
46850+ atomic_long_inc_unchecked(&gru_stats.id); \
46851 } while (0)
46852
46853 #ifdef CONFIG_SGI_GRU_DEBUG
46854diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
46855index c862cd4..0d176fe 100644
46856--- a/drivers/misc/sgi-xp/xp.h
46857+++ b/drivers/misc/sgi-xp/xp.h
46858@@ -288,7 +288,7 @@ struct xpc_interface {
46859 xpc_notify_func, void *);
46860 void (*received) (short, int, void *);
46861 enum xp_retval (*partid_to_nasids) (short, void *);
46862-};
46863+} __no_const;
46864
46865 extern struct xpc_interface xpc_interface;
46866
46867diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
46868index 01be66d..e3a0c7e 100644
46869--- a/drivers/misc/sgi-xp/xp_main.c
46870+++ b/drivers/misc/sgi-xp/xp_main.c
46871@@ -78,13 +78,13 @@ xpc_notloaded(void)
46872 }
46873
46874 struct xpc_interface xpc_interface = {
46875- (void (*)(int))xpc_notloaded,
46876- (void (*)(int))xpc_notloaded,
46877- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46878- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46879+ .connect = (void (*)(int))xpc_notloaded,
46880+ .disconnect = (void (*)(int))xpc_notloaded,
46881+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
46882+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
46883 void *))xpc_notloaded,
46884- (void (*)(short, int, void *))xpc_notloaded,
46885- (enum xp_retval(*)(short, void *))xpc_notloaded
46886+ .received = (void (*)(short, int, void *))xpc_notloaded,
46887+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
46888 };
46889 EXPORT_SYMBOL_GPL(xpc_interface);
46890
46891diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
46892index b94d5f7..7f494c5 100644
46893--- a/drivers/misc/sgi-xp/xpc.h
46894+++ b/drivers/misc/sgi-xp/xpc.h
46895@@ -835,6 +835,7 @@ struct xpc_arch_operations {
46896 void (*received_payload) (struct xpc_channel *, void *);
46897 void (*notify_senders_of_disconnect) (struct xpc_channel *);
46898 };
46899+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
46900
46901 /* struct xpc_partition act_state values (for XPC HB) */
46902
46903@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
46904 /* found in xpc_main.c */
46905 extern struct device *xpc_part;
46906 extern struct device *xpc_chan;
46907-extern struct xpc_arch_operations xpc_arch_ops;
46908+extern xpc_arch_operations_no_const xpc_arch_ops;
46909 extern int xpc_disengage_timelimit;
46910 extern int xpc_disengage_timedout;
46911 extern int xpc_activate_IRQ_rcvd;
46912diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
46913index 82dc574..8539ab2 100644
46914--- a/drivers/misc/sgi-xp/xpc_main.c
46915+++ b/drivers/misc/sgi-xp/xpc_main.c
46916@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
46917 .notifier_call = xpc_system_die,
46918 };
46919
46920-struct xpc_arch_operations xpc_arch_ops;
46921+xpc_arch_operations_no_const xpc_arch_ops;
46922
46923 /*
46924 * Timer function to enforce the timelimit on the partition disengage.
46925@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
46926
46927 if (((die_args->trapnr == X86_TRAP_MF) ||
46928 (die_args->trapnr == X86_TRAP_XF)) &&
46929- !user_mode_vm(die_args->regs))
46930+ !user_mode(die_args->regs))
46931 xpc_die_deactivate();
46932
46933 break;
46934diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
46935index 4409d79..d7766d0 100644
46936--- a/drivers/mmc/card/block.c
46937+++ b/drivers/mmc/card/block.c
46938@@ -577,7 +577,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
46939 if (idata->ic.postsleep_min_us)
46940 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
46941
46942- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
46943+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
46944 err = -EFAULT;
46945 goto cmd_rel_host;
46946 }
46947diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
46948index 0d0f7a2..45b8d60 100644
46949--- a/drivers/mmc/host/dw_mmc.h
46950+++ b/drivers/mmc/host/dw_mmc.h
46951@@ -276,5 +276,5 @@ struct dw_mci_drv_data {
46952 int (*parse_dt)(struct dw_mci *host);
46953 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
46954 struct dw_mci_tuning_data *tuning_data);
46955-};
46956+} __do_const;
46957 #endif /* _DW_MMC_H_ */
46958diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
46959index 8232e9a..7776006 100644
46960--- a/drivers/mmc/host/mmci.c
46961+++ b/drivers/mmc/host/mmci.c
46962@@ -1635,7 +1635,9 @@ static int mmci_probe(struct amba_device *dev,
46963 mmc->caps |= MMC_CAP_CMD23;
46964
46965 if (variant->busy_detect) {
46966- mmci_ops.card_busy = mmci_card_busy;
46967+ pax_open_kernel();
46968+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
46969+ pax_close_kernel();
46970 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
46971 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
46972 mmc->max_busy_timeout = 0;
46973diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
46974index 7c71dcd..74cb746 100644
46975--- a/drivers/mmc/host/omap_hsmmc.c
46976+++ b/drivers/mmc/host/omap_hsmmc.c
46977@@ -2120,7 +2120,9 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
46978
46979 if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) {
46980 dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n");
46981- omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46982+ pax_open_kernel();
46983+ *(void **)&omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
46984+ pax_close_kernel();
46985 }
46986
46987 pm_runtime_enable(host->dev);
46988diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
46989index af1f7c0..00d368a 100644
46990--- a/drivers/mmc/host/sdhci-esdhc-imx.c
46991+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
46992@@ -989,9 +989,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
46993 host->mmc->caps |= MMC_CAP_1_8V_DDR;
46994 }
46995
46996- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
46997- sdhci_esdhc_ops.platform_execute_tuning =
46998+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
46999+ pax_open_kernel();
47000+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47001 esdhc_executing_tuning;
47002+ pax_close_kernel();
47003+ }
47004
47005 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47006 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47007diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47008index c45b893..fba0144 100644
47009--- a/drivers/mmc/host/sdhci-s3c.c
47010+++ b/drivers/mmc/host/sdhci-s3c.c
47011@@ -590,9 +590,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47012 * we can use overriding functions instead of default.
47013 */
47014 if (sc->no_divider) {
47015- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47016- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47017- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47018+ pax_open_kernel();
47019+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47020+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47021+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47022+ pax_close_kernel();
47023 }
47024
47025 /* It supports additional host capabilities if needed */
47026diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47027index 423666b..81ff5eb 100644
47028--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47029+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47030@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47031 size_t totlen = 0, thislen;
47032 int ret = 0;
47033 size_t buflen = 0;
47034- static char *buffer;
47035+ char *buffer;
47036
47037 if (!ECCBUF_SIZE) {
47038 /* We should fall back to a general writev implementation.
47039diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47040index b3b7ca1..5dd4634 100644
47041--- a/drivers/mtd/nand/denali.c
47042+++ b/drivers/mtd/nand/denali.c
47043@@ -24,6 +24,7 @@
47044 #include <linux/slab.h>
47045 #include <linux/mtd/mtd.h>
47046 #include <linux/module.h>
47047+#include <linux/slab.h>
47048
47049 #include "denali.h"
47050
47051diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47052index 4f3851a..f477a23 100644
47053--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47054+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47055@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47056
47057 /* first try to map the upper buffer directly */
47058 if (virt_addr_valid(this->upper_buf) &&
47059- !object_is_on_stack(this->upper_buf)) {
47060+ !object_starts_on_stack(this->upper_buf)) {
47061 sg_init_one(sgl, this->upper_buf, this->upper_len);
47062 ret = dma_map_sg(this->dev, sgl, 1, dr);
47063 if (ret == 0)
47064diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47065index 51b9d6a..52af9a7 100644
47066--- a/drivers/mtd/nftlmount.c
47067+++ b/drivers/mtd/nftlmount.c
47068@@ -24,6 +24,7 @@
47069 #include <asm/errno.h>
47070 #include <linux/delay.h>
47071 #include <linux/slab.h>
47072+#include <linux/sched.h>
47073 #include <linux/mtd/mtd.h>
47074 #include <linux/mtd/nand.h>
47075 #include <linux/mtd/nftl.h>
47076diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47077index c23184a..4115c41 100644
47078--- a/drivers/mtd/sm_ftl.c
47079+++ b/drivers/mtd/sm_ftl.c
47080@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47081 #define SM_CIS_VENDOR_OFFSET 0x59
47082 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47083 {
47084- struct attribute_group *attr_group;
47085+ attribute_group_no_const *attr_group;
47086 struct attribute **attributes;
47087 struct sm_sysfs_attribute *vendor_attribute;
47088 char *vendor;
47089diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47090index 7b11243..b3278a3 100644
47091--- a/drivers/net/bonding/bond_netlink.c
47092+++ b/drivers/net/bonding/bond_netlink.c
47093@@ -585,7 +585,7 @@ nla_put_failure:
47094 return -EMSGSIZE;
47095 }
47096
47097-struct rtnl_link_ops bond_link_ops __read_mostly = {
47098+struct rtnl_link_ops bond_link_ops = {
47099 .kind = "bond",
47100 .priv_size = sizeof(struct bonding),
47101 .setup = bond_setup,
47102diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
47103index b3b922a..80bba38 100644
47104--- a/drivers/net/caif/caif_hsi.c
47105+++ b/drivers/net/caif/caif_hsi.c
47106@@ -1444,7 +1444,7 @@ err:
47107 return -ENODEV;
47108 }
47109
47110-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
47111+static struct rtnl_link_ops caif_hsi_link_ops = {
47112 .kind = "cfhsi",
47113 .priv_size = sizeof(struct cfhsi),
47114 .setup = cfhsi_setup,
47115diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47116index 98d73aa..63ef9da 100644
47117--- a/drivers/net/can/Kconfig
47118+++ b/drivers/net/can/Kconfig
47119@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47120
47121 config CAN_FLEXCAN
47122 tristate "Support for Freescale FLEXCAN based chips"
47123- depends on ARM || PPC
47124+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47125 ---help---
47126 Say Y here if you want to support for Freescale FlexCAN.
47127
47128diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
47129index 62ca0e8..3bed607 100644
47130--- a/drivers/net/can/dev.c
47131+++ b/drivers/net/can/dev.c
47132@@ -958,7 +958,7 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
47133 return -EOPNOTSUPP;
47134 }
47135
47136-static struct rtnl_link_ops can_link_ops __read_mostly = {
47137+static struct rtnl_link_ops can_link_ops = {
47138 .kind = "can",
47139 .maxtype = IFLA_CAN_MAX,
47140 .policy = can_policy,
47141diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
47142index 674f367..ec3a31f 100644
47143--- a/drivers/net/can/vcan.c
47144+++ b/drivers/net/can/vcan.c
47145@@ -163,7 +163,7 @@ static void vcan_setup(struct net_device *dev)
47146 dev->destructor = free_netdev;
47147 }
47148
47149-static struct rtnl_link_ops vcan_link_ops __read_mostly = {
47150+static struct rtnl_link_ops vcan_link_ops = {
47151 .kind = "vcan",
47152 .setup = vcan_setup,
47153 };
47154diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
47155index 49adbf1..fff7ff8 100644
47156--- a/drivers/net/dummy.c
47157+++ b/drivers/net/dummy.c
47158@@ -164,7 +164,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
47159 return 0;
47160 }
47161
47162-static struct rtnl_link_ops dummy_link_ops __read_mostly = {
47163+static struct rtnl_link_ops dummy_link_ops = {
47164 .kind = DRV_NAME,
47165 .setup = dummy_setup,
47166 .validate = dummy_validate,
47167diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47168index 0443654..4f0aa18 100644
47169--- a/drivers/net/ethernet/8390/ax88796.c
47170+++ b/drivers/net/ethernet/8390/ax88796.c
47171@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47172 if (ax->plat->reg_offsets)
47173 ei_local->reg_offset = ax->plat->reg_offsets;
47174 else {
47175+ resource_size_t _mem_size = mem_size;
47176+ do_div(_mem_size, 0x18);
47177 ei_local->reg_offset = ax->reg_offsets;
47178 for (ret = 0; ret < 0x18; ret++)
47179- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47180+ ax->reg_offsets[ret] = _mem_size * ret;
47181 }
47182
47183 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47184diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47185index 760c72c..a99728c 100644
47186--- a/drivers/net/ethernet/altera/altera_tse_main.c
47187+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47188@@ -1217,7 +1217,7 @@ static int tse_shutdown(struct net_device *dev)
47189 return 0;
47190 }
47191
47192-static struct net_device_ops altera_tse_netdev_ops = {
47193+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47194 .ndo_open = tse_open,
47195 .ndo_stop = tse_shutdown,
47196 .ndo_start_xmit = tse_start_xmit,
47197@@ -1454,11 +1454,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47198 ndev->netdev_ops = &altera_tse_netdev_ops;
47199 altera_tse_set_ethtool_ops(ndev);
47200
47201+ pax_open_kernel();
47202 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47203
47204 if (priv->hash_filter)
47205 altera_tse_netdev_ops.ndo_set_rx_mode =
47206 tse_set_rx_mode_hashfilter;
47207+ pax_close_kernel();
47208
47209 /* Scatter/gather IO is not supported,
47210 * so it is turned off
47211diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47212index 29a0927..5a348e24 100644
47213--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47214+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47215@@ -1122,14 +1122,14 @@ do { \
47216 * operations, everything works on mask values.
47217 */
47218 #define XMDIO_READ(_pdata, _mmd, _reg) \
47219- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47220+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47221 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47222
47223 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47224 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47225
47226 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47227- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47228+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47229 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47230
47231 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47232diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47233index 8a50b01..39c1ad0 100644
47234--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47235+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47236@@ -187,7 +187,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47237
47238 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47239
47240- pdata->hw_if.config_dcb_tc(pdata);
47241+ pdata->hw_if->config_dcb_tc(pdata);
47242
47243 return 0;
47244 }
47245@@ -226,7 +226,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47246
47247 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47248
47249- pdata->hw_if.config_dcb_pfc(pdata);
47250+ pdata->hw_if->config_dcb_pfc(pdata);
47251
47252 return 0;
47253 }
47254diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47255index a50891f..b26fe24 100644
47256--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47257+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47258@@ -347,7 +347,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
47259
47260 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47261 {
47262- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47263+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47264 struct xgbe_channel *channel;
47265 struct xgbe_ring *ring;
47266 struct xgbe_ring_data *rdata;
47267@@ -388,7 +388,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47268
47269 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47270 {
47271- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47272+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47273 struct xgbe_channel *channel;
47274 struct xgbe_ring *ring;
47275 struct xgbe_ring_desc *rdesc;
47276@@ -624,7 +624,7 @@ err_out:
47277 static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47278 {
47279 struct xgbe_prv_data *pdata = channel->pdata;
47280- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47281+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47282 struct xgbe_ring *ring = channel->rx_ring;
47283 struct xgbe_ring_data *rdata;
47284 int i;
47285@@ -650,17 +650,12 @@ static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
47286 DBGPR("<--xgbe_realloc_rx_buffer\n");
47287 }
47288
47289-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47290-{
47291- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47292-
47293- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47294- desc_if->free_ring_resources = xgbe_free_ring_resources;
47295- desc_if->map_tx_skb = xgbe_map_tx_skb;
47296- desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
47297- desc_if->unmap_rdata = xgbe_unmap_rdata;
47298- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47299- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47300-
47301- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47302-}
47303+const struct xgbe_desc_if default_xgbe_desc_if = {
47304+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47305+ .free_ring_resources = xgbe_free_ring_resources,
47306+ .map_tx_skb = xgbe_map_tx_skb,
47307+ .realloc_rx_buffer = xgbe_realloc_rx_buffer,
47308+ .unmap_rdata = xgbe_unmap_rdata,
47309+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47310+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47311+};
47312diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47313index 4c66cd1..1a20aab 100644
47314--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47315+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47316@@ -2703,7 +2703,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47317
47318 static int xgbe_init(struct xgbe_prv_data *pdata)
47319 {
47320- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47321+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47322 int ret;
47323
47324 DBGPR("-->xgbe_init\n");
47325@@ -2767,108 +2767,103 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47326 return 0;
47327 }
47328
47329-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47330-{
47331- DBGPR("-->xgbe_init_function_ptrs\n");
47332-
47333- hw_if->tx_complete = xgbe_tx_complete;
47334-
47335- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47336- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47337- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47338- hw_if->set_mac_address = xgbe_set_mac_address;
47339-
47340- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47341- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47342-
47343- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47344- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47345- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47346- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47347- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47348-
47349- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47350- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47351-
47352- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47353- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47354- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47355-
47356- hw_if->enable_tx = xgbe_enable_tx;
47357- hw_if->disable_tx = xgbe_disable_tx;
47358- hw_if->enable_rx = xgbe_enable_rx;
47359- hw_if->disable_rx = xgbe_disable_rx;
47360-
47361- hw_if->powerup_tx = xgbe_powerup_tx;
47362- hw_if->powerdown_tx = xgbe_powerdown_tx;
47363- hw_if->powerup_rx = xgbe_powerup_rx;
47364- hw_if->powerdown_rx = xgbe_powerdown_rx;
47365-
47366- hw_if->dev_xmit = xgbe_dev_xmit;
47367- hw_if->dev_read = xgbe_dev_read;
47368- hw_if->enable_int = xgbe_enable_int;
47369- hw_if->disable_int = xgbe_disable_int;
47370- hw_if->init = xgbe_init;
47371- hw_if->exit = xgbe_exit;
47372+const struct xgbe_hw_if default_xgbe_hw_if = {
47373+ .tx_complete = xgbe_tx_complete,
47374+
47375+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47376+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47377+ .add_mac_addresses = xgbe_add_mac_addresses,
47378+ .set_mac_address = xgbe_set_mac_address,
47379+
47380+ .enable_rx_csum = xgbe_enable_rx_csum,
47381+ .disable_rx_csum = xgbe_disable_rx_csum,
47382+
47383+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47384+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47385+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47386+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47387+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47388+
47389+ .read_mmd_regs = xgbe_read_mmd_regs,
47390+ .write_mmd_regs = xgbe_write_mmd_regs,
47391+
47392+ .set_gmii_speed = xgbe_set_gmii_speed,
47393+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47394+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47395+
47396+ .enable_tx = xgbe_enable_tx,
47397+ .disable_tx = xgbe_disable_tx,
47398+ .enable_rx = xgbe_enable_rx,
47399+ .disable_rx = xgbe_disable_rx,
47400+
47401+ .powerup_tx = xgbe_powerup_tx,
47402+ .powerdown_tx = xgbe_powerdown_tx,
47403+ .powerup_rx = xgbe_powerup_rx,
47404+ .powerdown_rx = xgbe_powerdown_rx,
47405+
47406+ .dev_xmit = xgbe_dev_xmit,
47407+ .dev_read = xgbe_dev_read,
47408+ .enable_int = xgbe_enable_int,
47409+ .disable_int = xgbe_disable_int,
47410+ .init = xgbe_init,
47411+ .exit = xgbe_exit,
47412
47413 /* Descriptor related Sequences have to be initialized here */
47414- hw_if->tx_desc_init = xgbe_tx_desc_init;
47415- hw_if->rx_desc_init = xgbe_rx_desc_init;
47416- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47417- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47418- hw_if->is_last_desc = xgbe_is_last_desc;
47419- hw_if->is_context_desc = xgbe_is_context_desc;
47420- hw_if->tx_start_xmit = xgbe_tx_start_xmit;
47421+ .tx_desc_init = xgbe_tx_desc_init,
47422+ .rx_desc_init = xgbe_rx_desc_init,
47423+ .tx_desc_reset = xgbe_tx_desc_reset,
47424+ .rx_desc_reset = xgbe_rx_desc_reset,
47425+ .is_last_desc = xgbe_is_last_desc,
47426+ .is_context_desc = xgbe_is_context_desc,
47427+ .tx_start_xmit = xgbe_tx_start_xmit,
47428
47429 /* For FLOW ctrl */
47430- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47431- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47432+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47433+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47434
47435 /* For RX coalescing */
47436- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47437- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47438- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47439- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47440+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47441+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47442+ .usec_to_riwt = xgbe_usec_to_riwt,
47443+ .riwt_to_usec = xgbe_riwt_to_usec,
47444
47445 /* For RX and TX threshold config */
47446- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47447- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47448+ .config_rx_threshold = xgbe_config_rx_threshold,
47449+ .config_tx_threshold = xgbe_config_tx_threshold,
47450
47451 /* For RX and TX Store and Forward Mode config */
47452- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47453- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47454+ .config_rsf_mode = xgbe_config_rsf_mode,
47455+ .config_tsf_mode = xgbe_config_tsf_mode,
47456
47457 /* For TX DMA Operating on Second Frame config */
47458- hw_if->config_osp_mode = xgbe_config_osp_mode;
47459+ .config_osp_mode = xgbe_config_osp_mode,
47460
47461 /* For RX and TX PBL config */
47462- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47463- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47464- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47465- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47466- hw_if->config_pblx8 = xgbe_config_pblx8;
47467+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47468+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47469+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47470+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47471+ .config_pblx8 = xgbe_config_pblx8,
47472
47473 /* For MMC statistics support */
47474- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47475- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47476- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47477+ .tx_mmc_int = xgbe_tx_mmc_int,
47478+ .rx_mmc_int = xgbe_rx_mmc_int,
47479+ .read_mmc_stats = xgbe_read_mmc_stats,
47480
47481 /* For PTP config */
47482- hw_if->config_tstamp = xgbe_config_tstamp;
47483- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47484- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47485- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47486- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47487+ .config_tstamp = xgbe_config_tstamp,
47488+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47489+ .set_tstamp_time = xgbe_set_tstamp_time,
47490+ .get_tstamp_time = xgbe_get_tstamp_time,
47491+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47492
47493 /* For Data Center Bridging config */
47494- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47495- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47496+ .config_dcb_tc = xgbe_config_dcb_tc,
47497+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47498
47499 /* For Receive Side Scaling */
47500- hw_if->enable_rss = xgbe_enable_rss;
47501- hw_if->disable_rss = xgbe_disable_rss;
47502- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
47503- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
47504-
47505- DBGPR("<--xgbe_init_function_ptrs\n");
47506-}
47507+ .enable_rss = xgbe_enable_rss,
47508+ .disable_rss = xgbe_disable_rss,
47509+ .set_rss_hash_key = xgbe_set_rss_hash_key,
47510+ .set_rss_lookup_table = xgbe_set_rss_lookup_table,
47511+};
47512diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47513index e5ffb2c..e56d30b 100644
47514--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47515+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47516@@ -239,7 +239,7 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
47517 * support, tell it now
47518 */
47519 if (ring->tx.xmit_more)
47520- pdata->hw_if.tx_start_xmit(channel, ring);
47521+ pdata->hw_if->tx_start_xmit(channel, ring);
47522
47523 return NETDEV_TX_BUSY;
47524 }
47525@@ -267,7 +267,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47526
47527 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47528 {
47529- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47530+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47531 struct xgbe_channel *channel;
47532 enum xgbe_int int_id;
47533 unsigned int i;
47534@@ -289,7 +289,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47535
47536 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47537 {
47538- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47539+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47540 struct xgbe_channel *channel;
47541 enum xgbe_int int_id;
47542 unsigned int i;
47543@@ -312,7 +312,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47544 static irqreturn_t xgbe_isr(int irq, void *data)
47545 {
47546 struct xgbe_prv_data *pdata = data;
47547- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47548+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47549 struct xgbe_channel *channel;
47550 unsigned int dma_isr, dma_ch_isr;
47551 unsigned int mac_isr, mac_tssr;
47552@@ -611,7 +611,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47553
47554 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47555 {
47556- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47557+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47558
47559 DBGPR("-->xgbe_init_tx_coalesce\n");
47560
47561@@ -625,7 +625,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47562
47563 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47564 {
47565- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47566+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47567
47568 DBGPR("-->xgbe_init_rx_coalesce\n");
47569
47570@@ -639,7 +639,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47571
47572 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47573 {
47574- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47575+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47576 struct xgbe_channel *channel;
47577 struct xgbe_ring *ring;
47578 struct xgbe_ring_data *rdata;
47579@@ -664,7 +664,7 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
47580
47581 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47582 {
47583- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47584+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47585 struct xgbe_channel *channel;
47586 struct xgbe_ring *ring;
47587 struct xgbe_ring_data *rdata;
47588@@ -690,7 +690,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
47589 static void xgbe_adjust_link(struct net_device *netdev)
47590 {
47591 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47592- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47593+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47594 struct phy_device *phydev = pdata->phydev;
47595 int new_state = 0;
47596
47597@@ -798,7 +798,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47598 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47599 {
47600 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47601- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47602+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47603 unsigned long flags;
47604
47605 DBGPR("-->xgbe_powerdown\n");
47606@@ -836,7 +836,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47607 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47608 {
47609 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47610- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47611+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47612 unsigned long flags;
47613
47614 DBGPR("-->xgbe_powerup\n");
47615@@ -873,7 +873,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47616
47617 static int xgbe_start(struct xgbe_prv_data *pdata)
47618 {
47619- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47620+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47621 struct net_device *netdev = pdata->netdev;
47622
47623 DBGPR("-->xgbe_start\n");
47624@@ -899,7 +899,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47625
47626 static void xgbe_stop(struct xgbe_prv_data *pdata)
47627 {
47628- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47629+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47630 struct xgbe_channel *channel;
47631 struct net_device *netdev = pdata->netdev;
47632 struct netdev_queue *txq;
47633@@ -932,7 +932,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47634 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47635 {
47636 struct xgbe_channel *channel;
47637- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47638+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47639 unsigned int i;
47640
47641 DBGPR("-->xgbe_restart_dev\n");
47642@@ -1135,7 +1135,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
47643 return -ERANGE;
47644 }
47645
47646- pdata->hw_if.config_tstamp(pdata, mac_tscr);
47647+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
47648
47649 memcpy(&pdata->tstamp_config, &config, sizeof(config));
47650
47651@@ -1284,8 +1284,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
47652 static int xgbe_open(struct net_device *netdev)
47653 {
47654 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47655- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47656- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47657+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47658+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47659 struct xgbe_channel *channel = NULL;
47660 unsigned int i = 0;
47661 int ret;
47662@@ -1400,8 +1400,8 @@ err_phy_init:
47663 static int xgbe_close(struct net_device *netdev)
47664 {
47665 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47666- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47667- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47668+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47669+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47670 struct xgbe_channel *channel;
47671 unsigned int i;
47672
47673@@ -1442,8 +1442,8 @@ static int xgbe_close(struct net_device *netdev)
47674 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
47675 {
47676 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47677- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47678- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47679+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47680+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47681 struct xgbe_channel *channel;
47682 struct xgbe_ring *ring;
47683 struct xgbe_packet_data *packet;
47684@@ -1518,7 +1518,7 @@ tx_netdev_return:
47685 static void xgbe_set_rx_mode(struct net_device *netdev)
47686 {
47687 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47688- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47689+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47690 unsigned int pr_mode, am_mode;
47691
47692 DBGPR("-->xgbe_set_rx_mode\n");
47693@@ -1537,7 +1537,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
47694 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
47695 {
47696 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47697- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47698+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47699 struct sockaddr *saddr = addr;
47700
47701 DBGPR("-->xgbe_set_mac_address\n");
47702@@ -1604,7 +1604,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
47703
47704 DBGPR("-->%s\n", __func__);
47705
47706- pdata->hw_if.read_mmc_stats(pdata);
47707+ pdata->hw_if->read_mmc_stats(pdata);
47708
47709 s->rx_packets = pstats->rxframecount_gb;
47710 s->rx_bytes = pstats->rxoctetcount_gb;
47711@@ -1631,7 +1631,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
47712 u16 vid)
47713 {
47714 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47715- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47716+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47717
47718 DBGPR("-->%s\n", __func__);
47719
47720@@ -1647,7 +1647,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
47721 u16 vid)
47722 {
47723 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47724- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47725+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47726
47727 DBGPR("-->%s\n", __func__);
47728
47729@@ -1713,7 +1713,7 @@ static int xgbe_set_features(struct net_device *netdev,
47730 netdev_features_t features)
47731 {
47732 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47733- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47734+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47735 netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
47736 int ret = 0;
47737
47738@@ -1778,7 +1778,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
47739 static void xgbe_rx_refresh(struct xgbe_channel *channel)
47740 {
47741 struct xgbe_prv_data *pdata = channel->pdata;
47742- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47743+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47744 struct xgbe_ring *ring = channel->rx_ring;
47745 struct xgbe_ring_data *rdata;
47746
47747@@ -1819,8 +1819,8 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
47748 static int xgbe_tx_poll(struct xgbe_channel *channel)
47749 {
47750 struct xgbe_prv_data *pdata = channel->pdata;
47751- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47752- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47753+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47754+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47755 struct xgbe_ring *ring = channel->tx_ring;
47756 struct xgbe_ring_data *rdata;
47757 struct xgbe_ring_desc *rdesc;
47758@@ -1891,7 +1891,7 @@ unlock:
47759 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
47760 {
47761 struct xgbe_prv_data *pdata = channel->pdata;
47762- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47763+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47764 struct xgbe_ring *ring = channel->rx_ring;
47765 struct xgbe_ring_data *rdata;
47766 struct xgbe_packet_data *packet;
47767diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47768index ebf4893..28108c7 100644
47769--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47770+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
47771@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
47772
47773 DBGPR("-->%s\n", __func__);
47774
47775- pdata->hw_if.read_mmc_stats(pdata);
47776+ pdata->hw_if->read_mmc_stats(pdata);
47777 for (i = 0; i < XGBE_STATS_COUNT; i++) {
47778 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
47779 *data++ = *(u64 *)stat;
47780@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
47781 struct ethtool_coalesce *ec)
47782 {
47783 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47784- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47785+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47786 unsigned int riwt;
47787
47788 DBGPR("-->xgbe_get_coalesce\n");
47789@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
47790 struct ethtool_coalesce *ec)
47791 {
47792 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47793- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47794+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47795 unsigned int rx_frames, rx_riwt, rx_usecs;
47796 unsigned int tx_frames, tx_usecs;
47797
47798diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47799index dbd3850..4e31b38 100644
47800--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47801+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
47802@@ -155,12 +155,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
47803 DBGPR("<--xgbe_default_config\n");
47804 }
47805
47806-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
47807-{
47808- xgbe_init_function_ptrs_dev(&pdata->hw_if);
47809- xgbe_init_function_ptrs_desc(&pdata->desc_if);
47810-}
47811-
47812 static int xgbe_probe(struct platform_device *pdev)
47813 {
47814 struct xgbe_prv_data *pdata;
47815@@ -281,9 +275,8 @@ static int xgbe_probe(struct platform_device *pdev)
47816 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
47817
47818 /* Set all the function pointers */
47819- xgbe_init_all_fptrs(pdata);
47820- hw_if = &pdata->hw_if;
47821- desc_if = &pdata->desc_if;
47822+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
47823+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
47824
47825 /* Issue software reset to device */
47826 hw_if->exit(pdata);
47827diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47828index 363b210..b241389 100644
47829--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47830+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
47831@@ -126,7 +126,7 @@
47832 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
47833 {
47834 struct xgbe_prv_data *pdata = mii->priv;
47835- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47836+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47837 int mmd_data;
47838
47839 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
47840@@ -143,7 +143,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
47841 u16 mmd_val)
47842 {
47843 struct xgbe_prv_data *pdata = mii->priv;
47844- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47845+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47846 int mmd_data = mmd_val;
47847
47848 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
47849diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47850index a1bf9d1c..84adcab 100644
47851--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47852+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
47853@@ -129,7 +129,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
47854 tstamp_cc);
47855 u64 nsec;
47856
47857- nsec = pdata->hw_if.get_tstamp_time(pdata);
47858+ nsec = pdata->hw_if->get_tstamp_time(pdata);
47859
47860 return nsec;
47861 }
47862@@ -158,7 +158,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
47863
47864 spin_lock_irqsave(&pdata->tstamp_lock, flags);
47865
47866- pdata->hw_if.update_tstamp_addend(pdata, addend);
47867+ pdata->hw_if->update_tstamp_addend(pdata, addend);
47868
47869 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
47870
47871diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
47872index f9ec762..988c969 100644
47873--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
47874+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
47875@@ -668,8 +668,8 @@ struct xgbe_prv_data {
47876 int dev_irq;
47877 unsigned int per_channel_irq;
47878
47879- struct xgbe_hw_if hw_if;
47880- struct xgbe_desc_if desc_if;
47881+ const struct xgbe_hw_if *hw_if;
47882+ const struct xgbe_desc_if *desc_if;
47883
47884 /* AXI DMA settings */
47885 unsigned int axdomain;
47886@@ -787,6 +787,9 @@ struct xgbe_prv_data {
47887 #endif
47888 };
47889
47890+extern const struct xgbe_hw_if default_xgbe_hw_if;
47891+extern const struct xgbe_desc_if default_xgbe_desc_if;
47892+
47893 /* Function prototypes*/
47894
47895 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
47896diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47897index adcacda..fa6e0ae 100644
47898--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47899+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
47900@@ -1065,7 +1065,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
47901 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
47902 {
47903 /* RX_MODE controlling object */
47904- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
47905+ bnx2x_init_rx_mode_obj(bp);
47906
47907 /* multicast configuration controlling object */
47908 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
47909diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47910index 07cdf9b..b08ecc7 100644
47911--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47912+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
47913@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
47914 return rc;
47915 }
47916
47917-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47918- struct bnx2x_rx_mode_obj *o)
47919+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
47920 {
47921 if (CHIP_IS_E1x(bp)) {
47922- o->wait_comp = bnx2x_empty_rx_mode_wait;
47923- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
47924+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
47925+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
47926 } else {
47927- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
47928- o->config_rx_mode = bnx2x_set_rx_mode_e2;
47929+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
47930+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
47931 }
47932 }
47933
47934diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47935index 86baecb..ff3bb46 100644
47936--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47937+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
47938@@ -1411,8 +1411,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
47939
47940 /********************* RX MODE ****************/
47941
47942-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
47943- struct bnx2x_rx_mode_obj *o);
47944+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
47945
47946 /**
47947 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
47948diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
47949index 31c9f82..e65e986 100644
47950--- a/drivers/net/ethernet/broadcom/tg3.h
47951+++ b/drivers/net/ethernet/broadcom/tg3.h
47952@@ -150,6 +150,7 @@
47953 #define CHIPREV_ID_5750_A0 0x4000
47954 #define CHIPREV_ID_5750_A1 0x4001
47955 #define CHIPREV_ID_5750_A3 0x4003
47956+#define CHIPREV_ID_5750_C1 0x4201
47957 #define CHIPREV_ID_5750_C2 0x4202
47958 #define CHIPREV_ID_5752_A0_HW 0x5000
47959 #define CHIPREV_ID_5752_A0 0x6000
47960diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
47961index 903466e..b285864 100644
47962--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
47963+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
47964@@ -1693,10 +1693,10 @@ bna_cb_ioceth_reset(void *arg)
47965 }
47966
47967 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
47968- bna_cb_ioceth_enable,
47969- bna_cb_ioceth_disable,
47970- bna_cb_ioceth_hbfail,
47971- bna_cb_ioceth_reset
47972+ .enable_cbfn = bna_cb_ioceth_enable,
47973+ .disable_cbfn = bna_cb_ioceth_disable,
47974+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
47975+ .reset_cbfn = bna_cb_ioceth_reset
47976 };
47977
47978 static void bna_attr_init(struct bna_ioceth *ioceth)
47979diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47980index 8cffcdf..aadf043 100644
47981--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47982+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
47983@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
47984 */
47985 struct l2t_skb_cb {
47986 arp_failure_handler_func arp_failure_handler;
47987-};
47988+} __no_const;
47989
47990 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
47991
47992diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47993index ccf3436..b720d77 100644
47994--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47995+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
47996@@ -2277,7 +2277,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
47997
47998 int i;
47999 struct adapter *ap = netdev2adap(dev);
48000- static const unsigned int *reg_ranges;
48001+ const unsigned int *reg_ranges;
48002 int arr_size = 0, buf_size = 0;
48003
48004 if (is_t4(ap->params.chip)) {
48005diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48006index badff18..e15c4ec 100644
48007--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48008+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48009@@ -5373,7 +5373,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48010 for (i=0; i<ETH_ALEN; i++) {
48011 tmp.addr[i] = dev->dev_addr[i];
48012 }
48013- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48014+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48015 break;
48016
48017 case DE4X5_SET_HWADDR: /* Set the hardware address */
48018@@ -5413,7 +5413,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48019 spin_lock_irqsave(&lp->lock, flags);
48020 memcpy(&statbuf, &lp->pktStats, ioc->len);
48021 spin_unlock_irqrestore(&lp->lock, flags);
48022- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48023+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48024 return -EFAULT;
48025 break;
48026 }
48027diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48028index d48806b..41cd80f 100644
48029--- a/drivers/net/ethernet/emulex/benet/be_main.c
48030+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48031@@ -537,7 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48032
48033 if (wrapped)
48034 newacc += 65536;
48035- ACCESS_ONCE(*acc) = newacc;
48036+ ACCESS_ONCE_RW(*acc) = newacc;
48037 }
48038
48039 static void populate_erx_stats(struct be_adapter *adapter,
48040diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48041index 6d0c5d5..55be363 100644
48042--- a/drivers/net/ethernet/faraday/ftgmac100.c
48043+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48044@@ -30,6 +30,8 @@
48045 #include <linux/netdevice.h>
48046 #include <linux/phy.h>
48047 #include <linux/platform_device.h>
48048+#include <linux/interrupt.h>
48049+#include <linux/irqreturn.h>
48050 #include <net/ip.h>
48051
48052 #include "ftgmac100.h"
48053diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48054index dce5f7b..2433466 100644
48055--- a/drivers/net/ethernet/faraday/ftmac100.c
48056+++ b/drivers/net/ethernet/faraday/ftmac100.c
48057@@ -31,6 +31,8 @@
48058 #include <linux/module.h>
48059 #include <linux/netdevice.h>
48060 #include <linux/platform_device.h>
48061+#include <linux/interrupt.h>
48062+#include <linux/irqreturn.h>
48063
48064 #include "ftmac100.h"
48065
48066diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48067index 6d1ec92..4d5d97d 100644
48068--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48069+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48070@@ -407,7 +407,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48071 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48072
48073 /* Update the base adjustement value. */
48074- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48075+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48076 smp_mb(); /* Force the above update. */
48077 }
48078
48079diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48080index 5fd4b52..87aa34b 100644
48081--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48082+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48083@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48084 }
48085
48086 /* update the base incval used to calculate frequency adjustment */
48087- ACCESS_ONCE(adapter->base_incval) = incval;
48088+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48089 smp_mb();
48090
48091 /* need lock to prevent incorrect read while modifying cyclecounter */
48092diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48093index e3357bf..d4d5348 100644
48094--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48095+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
48096@@ -466,8 +466,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
48097 wmb();
48098
48099 /* we want to dirty this cache line once */
48100- ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
48101- ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
48102+ ACCESS_ONCE_RW(ring->last_nr_txbb) = last_nr_txbb;
48103+ ACCESS_ONCE_RW(ring->cons) = ring_cons + txbbs_skipped;
48104
48105 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
48106
48107diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48108index 2bbd01f..e8baa64 100644
48109--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48110+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48111@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48112 struct __vxge_hw_fifo *fifo;
48113 struct vxge_hw_fifo_config *config;
48114 u32 txdl_size, txdl_per_memblock;
48115- struct vxge_hw_mempool_cbs fifo_mp_callback;
48116+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48117+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48118+ };
48119+
48120 struct __vxge_hw_virtualpath *vpath;
48121
48122 if ((vp == NULL) || (attr == NULL)) {
48123@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48124 goto exit;
48125 }
48126
48127- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48128-
48129 fifo->mempool =
48130 __vxge_hw_mempool_create(vpath->hldev,
48131 fifo->config->memblock_size,
48132diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48133index 2bb48d5..d1a865d 100644
48134--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48135+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48136@@ -2324,7 +2324,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48137 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48138 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48139 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48140- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48141+ pax_open_kernel();
48142+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48143+ pax_close_kernel();
48144 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48145 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48146 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48147diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48148index be7d7a6..a8983f8 100644
48149--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48150+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48151@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48152 case QLCNIC_NON_PRIV_FUNC:
48153 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48154 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48155- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48156+ pax_open_kernel();
48157+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48158+ pax_close_kernel();
48159 break;
48160 case QLCNIC_PRIV_FUNC:
48161 ahw->op_mode = QLCNIC_PRIV_FUNC;
48162 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48163- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48164+ pax_open_kernel();
48165+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48166+ pax_close_kernel();
48167 break;
48168 case QLCNIC_MGMT_FUNC:
48169 ahw->op_mode = QLCNIC_MGMT_FUNC;
48170 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48171- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48172+ pax_open_kernel();
48173+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48174+ pax_close_kernel();
48175 break;
48176 default:
48177 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48178diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48179index c9f57fb..208bdc1 100644
48180--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48181+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48182@@ -1285,7 +1285,7 @@ flash_temp:
48183 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48184 {
48185 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48186- static const struct qlcnic_dump_operations *fw_dump_ops;
48187+ const struct qlcnic_dump_operations *fw_dump_ops;
48188 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48189 u32 entry_offset, dump, no_entries, buf_offset = 0;
48190 int i, k, ops_cnt, ops_index, dump_size = 0;
48191diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48192index 2e2cf80..ebc796d 100644
48193--- a/drivers/net/ethernet/realtek/r8169.c
48194+++ b/drivers/net/ethernet/realtek/r8169.c
48195@@ -788,22 +788,22 @@ struct rtl8169_private {
48196 struct mdio_ops {
48197 void (*write)(struct rtl8169_private *, int, int);
48198 int (*read)(struct rtl8169_private *, int);
48199- } mdio_ops;
48200+ } __no_const mdio_ops;
48201
48202 struct pll_power_ops {
48203 void (*down)(struct rtl8169_private *);
48204 void (*up)(struct rtl8169_private *);
48205- } pll_power_ops;
48206+ } __no_const pll_power_ops;
48207
48208 struct jumbo_ops {
48209 void (*enable)(struct rtl8169_private *);
48210 void (*disable)(struct rtl8169_private *);
48211- } jumbo_ops;
48212+ } __no_const jumbo_ops;
48213
48214 struct csi_ops {
48215 void (*write)(struct rtl8169_private *, int, int);
48216 u32 (*read)(struct rtl8169_private *, int);
48217- } csi_ops;
48218+ } __no_const csi_ops;
48219
48220 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48221 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48222diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48223index 6b861e3..204ac86 100644
48224--- a/drivers/net/ethernet/sfc/ptp.c
48225+++ b/drivers/net/ethernet/sfc/ptp.c
48226@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48227 ptp->start.dma_addr);
48228
48229 /* Clear flag that signals MC ready */
48230- ACCESS_ONCE(*start) = 0;
48231+ ACCESS_ONCE_RW(*start) = 0;
48232 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48233 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48234 EFX_BUG_ON_PARANOID(rc);
48235diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48236index 08c483b..2c4a553 100644
48237--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48238+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48239@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48240
48241 writel(value, ioaddr + MMC_CNTRL);
48242
48243- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48244- MMC_CNTRL, value);
48245+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48246+// MMC_CNTRL, value);
48247 }
48248
48249 /* To mask all all interrupts.*/
48250diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48251index 384ca4f..dd7d4f9 100644
48252--- a/drivers/net/hyperv/hyperv_net.h
48253+++ b/drivers/net/hyperv/hyperv_net.h
48254@@ -171,7 +171,7 @@ struct rndis_device {
48255 enum rndis_device_state state;
48256 bool link_state;
48257 bool link_change;
48258- atomic_t new_req_id;
48259+ atomic_unchecked_t new_req_id;
48260
48261 spinlock_t request_lock;
48262 struct list_head req_list;
48263diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48264index ec0c40a..c9e42eb 100644
48265--- a/drivers/net/hyperv/rndis_filter.c
48266+++ b/drivers/net/hyperv/rndis_filter.c
48267@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48268 * template
48269 */
48270 set = &rndis_msg->msg.set_req;
48271- set->req_id = atomic_inc_return(&dev->new_req_id);
48272+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48273
48274 /* Add to the request list */
48275 spin_lock_irqsave(&dev->request_lock, flags);
48276@@ -912,7 +912,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48277
48278 /* Setup the rndis set */
48279 halt = &request->request_msg.msg.halt_req;
48280- halt->req_id = atomic_inc_return(&dev->new_req_id);
48281+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48282
48283 /* Ignore return since this msg is optional. */
48284 rndis_filter_send_request(dev, request);
48285diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
48286index 34f846b..4a0d5b1 100644
48287--- a/drivers/net/ifb.c
48288+++ b/drivers/net/ifb.c
48289@@ -253,7 +253,7 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
48290 return 0;
48291 }
48292
48293-static struct rtnl_link_ops ifb_link_ops __read_mostly = {
48294+static struct rtnl_link_ops ifb_link_ops = {
48295 .kind = "ifb",
48296 .priv_size = sizeof(struct ifb_private),
48297 .setup = ifb_setup,
48298diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48299index 612e073..a9f5eda 100644
48300--- a/drivers/net/macvlan.c
48301+++ b/drivers/net/macvlan.c
48302@@ -335,7 +335,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48303 free_nskb:
48304 kfree_skb(nskb);
48305 err:
48306- atomic_long_inc(&skb->dev->rx_dropped);
48307+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48308 }
48309
48310 static void macvlan_flush_sources(struct macvlan_port *port,
48311@@ -1459,13 +1459,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48312 int macvlan_link_register(struct rtnl_link_ops *ops)
48313 {
48314 /* common fields */
48315- ops->priv_size = sizeof(struct macvlan_dev);
48316- ops->validate = macvlan_validate;
48317- ops->maxtype = IFLA_MACVLAN_MAX;
48318- ops->policy = macvlan_policy;
48319- ops->changelink = macvlan_changelink;
48320- ops->get_size = macvlan_get_size;
48321- ops->fill_info = macvlan_fill_info;
48322+ pax_open_kernel();
48323+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48324+ *(void **)&ops->validate = macvlan_validate;
48325+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48326+ *(const void **)&ops->policy = macvlan_policy;
48327+ *(void **)&ops->changelink = macvlan_changelink;
48328+ *(void **)&ops->get_size = macvlan_get_size;
48329+ *(void **)&ops->fill_info = macvlan_fill_info;
48330+ pax_close_kernel();
48331
48332 return rtnl_link_register(ops);
48333 };
48334@@ -1545,7 +1547,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48335 return NOTIFY_DONE;
48336 }
48337
48338-static struct notifier_block macvlan_notifier_block __read_mostly = {
48339+static struct notifier_block macvlan_notifier_block = {
48340 .notifier_call = macvlan_device_event,
48341 };
48342
48343diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48344index 4d050ee..012f6dd 100644
48345--- a/drivers/net/macvtap.c
48346+++ b/drivers/net/macvtap.c
48347@@ -436,7 +436,7 @@ static void macvtap_setup(struct net_device *dev)
48348 dev->tx_queue_len = TUN_READQ_SIZE;
48349 }
48350
48351-static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
48352+static struct rtnl_link_ops macvtap_link_ops = {
48353 .kind = "macvtap",
48354 .setup = macvtap_setup,
48355 .newlink = macvtap_newlink,
48356@@ -1033,7 +1033,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48357
48358 ret = 0;
48359 u = q->flags;
48360- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48361+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48362 put_user(u, &ifr->ifr_flags))
48363 ret = -EFAULT;
48364 macvtap_put_vlan(vlan);
48365@@ -1217,7 +1217,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48366 return NOTIFY_DONE;
48367 }
48368
48369-static struct notifier_block macvtap_notifier_block __read_mostly = {
48370+static struct notifier_block macvtap_notifier_block = {
48371 .notifier_call = macvtap_device_event,
48372 };
48373
48374diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
48375index 34924df..a747360 100644
48376--- a/drivers/net/nlmon.c
48377+++ b/drivers/net/nlmon.c
48378@@ -154,7 +154,7 @@ static int nlmon_validate(struct nlattr *tb[], struct nlattr *data[])
48379 return 0;
48380 }
48381
48382-static struct rtnl_link_ops nlmon_link_ops __read_mostly = {
48383+static struct rtnl_link_ops nlmon_link_ops = {
48384 .kind = "nlmon",
48385 .priv_size = sizeof(struct nlmon),
48386 .setup = nlmon_setup,
48387diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
48388index 3fc91e8..6c36337 100644
48389--- a/drivers/net/phy/phy_device.c
48390+++ b/drivers/net/phy/phy_device.c
48391@@ -218,7 +218,7 @@ EXPORT_SYMBOL(phy_device_create);
48392 * zero on success.
48393 *
48394 */
48395-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48396+static int get_phy_c45_ids(struct mii_bus *bus, int addr, int *phy_id,
48397 struct phy_c45_device_ids *c45_ids) {
48398 int phy_reg;
48399 int i, reg_addr;
48400@@ -288,7 +288,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
48401 * its return value is in turn returned.
48402 *
48403 */
48404-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48405+static int get_phy_id(struct mii_bus *bus, int addr, int *phy_id,
48406 bool is_c45, struct phy_c45_device_ids *c45_ids)
48407 {
48408 int phy_reg;
48409@@ -326,7 +326,7 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
48410 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
48411 {
48412 struct phy_c45_device_ids c45_ids = {0};
48413- u32 phy_id = 0;
48414+ int phy_id = 0;
48415 int r;
48416
48417 r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
48418diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48419index af034db..1611c0b2 100644
48420--- a/drivers/net/ppp/ppp_generic.c
48421+++ b/drivers/net/ppp/ppp_generic.c
48422@@ -1022,7 +1022,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48423 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48424 struct ppp_stats stats;
48425 struct ppp_comp_stats cstats;
48426- char *vers;
48427
48428 switch (cmd) {
48429 case SIOCGPPPSTATS:
48430@@ -1044,8 +1043,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48431 break;
48432
48433 case SIOCGPPPVER:
48434- vers = PPP_VERSION;
48435- if (copy_to_user(addr, vers, strlen(vers) + 1))
48436+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48437 break;
48438 err = 0;
48439 break;
48440diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48441index 079f7ad..b2a2bfa7 100644
48442--- a/drivers/net/slip/slhc.c
48443+++ b/drivers/net/slip/slhc.c
48444@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48445 register struct tcphdr *thp;
48446 register struct iphdr *ip;
48447 register struct cstate *cs;
48448- int len, hdrlen;
48449+ long len, hdrlen;
48450 unsigned char *cp = icp;
48451
48452 /* We've got a compressed packet; read the change byte */
48453diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48454index 2c087ef..4859007 100644
48455--- a/drivers/net/team/team.c
48456+++ b/drivers/net/team/team.c
48457@@ -2103,7 +2103,7 @@ static unsigned int team_get_num_rx_queues(void)
48458 return TEAM_DEFAULT_NUM_RX_QUEUES;
48459 }
48460
48461-static struct rtnl_link_ops team_link_ops __read_mostly = {
48462+static struct rtnl_link_ops team_link_ops = {
48463 .kind = DRV_NAME,
48464 .priv_size = sizeof(struct team),
48465 .setup = team_setup,
48466@@ -2893,7 +2893,7 @@ static int team_device_event(struct notifier_block *unused,
48467 return NOTIFY_DONE;
48468 }
48469
48470-static struct notifier_block team_notifier_block __read_mostly = {
48471+static struct notifier_block team_notifier_block = {
48472 .notifier_call = team_device_event,
48473 };
48474
48475diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48476index 10f9e40..3515e7e 100644
48477--- a/drivers/net/tun.c
48478+++ b/drivers/net/tun.c
48479@@ -1425,7 +1425,7 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
48480 return -EINVAL;
48481 }
48482
48483-static struct rtnl_link_ops tun_link_ops __read_mostly = {
48484+static struct rtnl_link_ops tun_link_ops = {
48485 .kind = DRV_NAME,
48486 .priv_size = sizeof(struct tun_struct),
48487 .setup = tun_setup,
48488@@ -1827,7 +1827,7 @@ unlock:
48489 }
48490
48491 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48492- unsigned long arg, int ifreq_len)
48493+ unsigned long arg, size_t ifreq_len)
48494 {
48495 struct tun_file *tfile = file->private_data;
48496 struct tun_struct *tun;
48497@@ -1841,6 +1841,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48498 int le;
48499 int ret;
48500
48501+ if (ifreq_len > sizeof ifr)
48502+ return -EFAULT;
48503+
48504 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48505 if (copy_from_user(&ifr, argp, ifreq_len))
48506 return -EFAULT;
48507diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48508index 9c5aa92..8cd0405 100644
48509--- a/drivers/net/usb/hso.c
48510+++ b/drivers/net/usb/hso.c
48511@@ -71,7 +71,7 @@
48512 #include <asm/byteorder.h>
48513 #include <linux/serial_core.h>
48514 #include <linux/serial.h>
48515-
48516+#include <asm/local.h>
48517
48518 #define MOD_AUTHOR "Option Wireless"
48519 #define MOD_DESCRIPTION "USB High Speed Option driver"
48520@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48521 struct urb *urb;
48522
48523 urb = serial->rx_urb[0];
48524- if (serial->port.count > 0) {
48525+ if (atomic_read(&serial->port.count) > 0) {
48526 count = put_rxbuf_data(urb, serial);
48527 if (count == -1)
48528 return;
48529@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48530 DUMP1(urb->transfer_buffer, urb->actual_length);
48531
48532 /* Anyone listening? */
48533- if (serial->port.count == 0)
48534+ if (atomic_read(&serial->port.count) == 0)
48535 return;
48536
48537 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48538@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48539 tty_port_tty_set(&serial->port, tty);
48540
48541 /* check for port already opened, if not set the termios */
48542- serial->port.count++;
48543- if (serial->port.count == 1) {
48544+ if (atomic_inc_return(&serial->port.count) == 1) {
48545 serial->rx_state = RX_IDLE;
48546 /* Force default termio settings */
48547 _hso_serial_set_termios(tty, NULL);
48548@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48549 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48550 if (result) {
48551 hso_stop_serial_device(serial->parent);
48552- serial->port.count--;
48553+ atomic_dec(&serial->port.count);
48554 kref_put(&serial->parent->ref, hso_serial_ref_free);
48555 }
48556 } else {
48557@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48558
48559 /* reset the rts and dtr */
48560 /* do the actual close */
48561- serial->port.count--;
48562+ atomic_dec(&serial->port.count);
48563
48564- if (serial->port.count <= 0) {
48565- serial->port.count = 0;
48566+ if (atomic_read(&serial->port.count) <= 0) {
48567+ atomic_set(&serial->port.count, 0);
48568 tty_port_tty_set(&serial->port, NULL);
48569 if (!usb_gone)
48570 hso_stop_serial_device(serial->parent);
48571@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48572
48573 /* the actual setup */
48574 spin_lock_irqsave(&serial->serial_lock, flags);
48575- if (serial->port.count)
48576+ if (atomic_read(&serial->port.count))
48577 _hso_serial_set_termios(tty, old);
48578 else
48579 tty->termios = *old;
48580@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48581 D1("Pending read interrupt on port %d\n", i);
48582 spin_lock(&serial->serial_lock);
48583 if (serial->rx_state == RX_IDLE &&
48584- serial->port.count > 0) {
48585+ atomic_read(&serial->port.count) > 0) {
48586 /* Setup and send a ctrl req read on
48587 * port i */
48588 if (!serial->rx_urb_filled[0]) {
48589@@ -3046,7 +3045,7 @@ static int hso_resume(struct usb_interface *iface)
48590 /* Start all serial ports */
48591 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48592 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48593- if (dev2ser(serial_table[i])->port.count) {
48594+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48595 result =
48596 hso_start_serial_device(serial_table[i], GFP_NOIO);
48597 hso_kick_transmit(dev2ser(serial_table[i]));
48598diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48599index bf405f1..fd847ee 100644
48600--- a/drivers/net/usb/r8152.c
48601+++ b/drivers/net/usb/r8152.c
48602@@ -571,7 +571,7 @@ struct r8152 {
48603 void (*unload)(struct r8152 *);
48604 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
48605 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
48606- } rtl_ops;
48607+ } __no_const rtl_ops;
48608
48609 int intr_interval;
48610 u32 saved_wolopts;
48611diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48612index a2515887..6d13233 100644
48613--- a/drivers/net/usb/sierra_net.c
48614+++ b/drivers/net/usb/sierra_net.c
48615@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48616 /* atomic counter partially included in MAC address to make sure 2 devices
48617 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48618 */
48619-static atomic_t iface_counter = ATOMIC_INIT(0);
48620+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48621
48622 /*
48623 * SYNC Timer Delay definition used to set the expiry time
48624@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48625 dev->net->netdev_ops = &sierra_net_device_ops;
48626
48627 /* change MAC addr to include, ifacenum, and to be unique */
48628- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48629+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48630 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48631
48632 /* we will have to manufacture ethernet headers, prepare template */
48633diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48634index 0ad6c0c..4013638 100644
48635--- a/drivers/net/virtio_net.c
48636+++ b/drivers/net/virtio_net.c
48637@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48638 #define RECEIVE_AVG_WEIGHT 64
48639
48640 /* Minimum alignment for mergeable packet buffers. */
48641-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48642+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48643
48644 #define VIRTNET_DRIVER_VERSION "1.0.0"
48645
48646diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48647index a8c755d..a988b71 100644
48648--- a/drivers/net/vxlan.c
48649+++ b/drivers/net/vxlan.c
48650@@ -2702,7 +2702,7 @@ nla_put_failure:
48651 return -EMSGSIZE;
48652 }
48653
48654-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48655+static struct rtnl_link_ops vxlan_link_ops = {
48656 .kind = "vxlan",
48657 .maxtype = IFLA_VXLAN_MAX,
48658 .policy = vxlan_policy,
48659@@ -2749,7 +2749,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48660 return NOTIFY_DONE;
48661 }
48662
48663-static struct notifier_block vxlan_notifier_block __read_mostly = {
48664+static struct notifier_block vxlan_notifier_block = {
48665 .notifier_call = vxlan_lowerdev_event,
48666 };
48667
48668diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
48669index 5920c99..ff2e4a5 100644
48670--- a/drivers/net/wan/lmc/lmc_media.c
48671+++ b/drivers/net/wan/lmc/lmc_media.c
48672@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
48673 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
48674
48675 lmc_media_t lmc_ds3_media = {
48676- lmc_ds3_init, /* special media init stuff */
48677- lmc_ds3_default, /* reset to default state */
48678- lmc_ds3_set_status, /* reset status to state provided */
48679- lmc_dummy_set_1, /* set clock source */
48680- lmc_dummy_set2_1, /* set line speed */
48681- lmc_ds3_set_100ft, /* set cable length */
48682- lmc_ds3_set_scram, /* set scrambler */
48683- lmc_ds3_get_link_status, /* get link status */
48684- lmc_dummy_set_1, /* set link status */
48685- lmc_ds3_set_crc_length, /* set CRC length */
48686- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48687- lmc_ds3_watchdog
48688+ .init = lmc_ds3_init, /* special media init stuff */
48689+ .defaults = lmc_ds3_default, /* reset to default state */
48690+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
48691+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
48692+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48693+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
48694+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
48695+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
48696+ .set_link_status = lmc_dummy_set_1, /* set link status */
48697+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
48698+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48699+ .watchdog = lmc_ds3_watchdog
48700 };
48701
48702 lmc_media_t lmc_hssi_media = {
48703- lmc_hssi_init, /* special media init stuff */
48704- lmc_hssi_default, /* reset to default state */
48705- lmc_hssi_set_status, /* reset status to state provided */
48706- lmc_hssi_set_clock, /* set clock source */
48707- lmc_dummy_set2_1, /* set line speed */
48708- lmc_dummy_set_1, /* set cable length */
48709- lmc_dummy_set_1, /* set scrambler */
48710- lmc_hssi_get_link_status, /* get link status */
48711- lmc_hssi_set_link_status, /* set link status */
48712- lmc_hssi_set_crc_length, /* set CRC length */
48713- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48714- lmc_hssi_watchdog
48715+ .init = lmc_hssi_init, /* special media init stuff */
48716+ .defaults = lmc_hssi_default, /* reset to default state */
48717+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
48718+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
48719+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48720+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48721+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48722+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
48723+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
48724+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
48725+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48726+ .watchdog = lmc_hssi_watchdog
48727 };
48728
48729-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
48730- lmc_ssi_default, /* reset to default state */
48731- lmc_ssi_set_status, /* reset status to state provided */
48732- lmc_ssi_set_clock, /* set clock source */
48733- lmc_ssi_set_speed, /* set line speed */
48734- lmc_dummy_set_1, /* set cable length */
48735- lmc_dummy_set_1, /* set scrambler */
48736- lmc_ssi_get_link_status, /* get link status */
48737- lmc_ssi_set_link_status, /* set link status */
48738- lmc_ssi_set_crc_length, /* set CRC length */
48739- lmc_dummy_set_1, /* set T1 or E1 circuit type */
48740- lmc_ssi_watchdog
48741+lmc_media_t lmc_ssi_media = {
48742+ .init = lmc_ssi_init, /* special media init stuff */
48743+ .defaults = lmc_ssi_default, /* reset to default state */
48744+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
48745+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
48746+ .set_speed = lmc_ssi_set_speed, /* set line speed */
48747+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48748+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48749+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
48750+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
48751+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
48752+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
48753+ .watchdog = lmc_ssi_watchdog
48754 };
48755
48756 lmc_media_t lmc_t1_media = {
48757- lmc_t1_init, /* special media init stuff */
48758- lmc_t1_default, /* reset to default state */
48759- lmc_t1_set_status, /* reset status to state provided */
48760- lmc_t1_set_clock, /* set clock source */
48761- lmc_dummy_set2_1, /* set line speed */
48762- lmc_dummy_set_1, /* set cable length */
48763- lmc_dummy_set_1, /* set scrambler */
48764- lmc_t1_get_link_status, /* get link status */
48765- lmc_dummy_set_1, /* set link status */
48766- lmc_t1_set_crc_length, /* set CRC length */
48767- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48768- lmc_t1_watchdog
48769+ .init = lmc_t1_init, /* special media init stuff */
48770+ .defaults = lmc_t1_default, /* reset to default state */
48771+ .set_status = lmc_t1_set_status, /* reset status to state provided */
48772+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
48773+ .set_speed = lmc_dummy_set2_1, /* set line speed */
48774+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
48775+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
48776+ .get_link_status = lmc_t1_get_link_status, /* get link status */
48777+ .set_link_status = lmc_dummy_set_1, /* set link status */
48778+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
48779+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
48780+ .watchdog = lmc_t1_watchdog
48781 };
48782
48783 static void
48784diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
48785index feacc3b..5bac0de 100644
48786--- a/drivers/net/wan/z85230.c
48787+++ b/drivers/net/wan/z85230.c
48788@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
48789
48790 struct z8530_irqhandler z8530_sync =
48791 {
48792- z8530_rx,
48793- z8530_tx,
48794- z8530_status
48795+ .rx = z8530_rx,
48796+ .tx = z8530_tx,
48797+ .status = z8530_status
48798 };
48799
48800 EXPORT_SYMBOL(z8530_sync);
48801@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
48802 }
48803
48804 static struct z8530_irqhandler z8530_dma_sync = {
48805- z8530_dma_rx,
48806- z8530_dma_tx,
48807- z8530_dma_status
48808+ .rx = z8530_dma_rx,
48809+ .tx = z8530_dma_tx,
48810+ .status = z8530_dma_status
48811 };
48812
48813 static struct z8530_irqhandler z8530_txdma_sync = {
48814- z8530_rx,
48815- z8530_dma_tx,
48816- z8530_dma_status
48817+ .rx = z8530_rx,
48818+ .tx = z8530_dma_tx,
48819+ .status = z8530_dma_status
48820 };
48821
48822 /**
48823@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
48824
48825 struct z8530_irqhandler z8530_nop=
48826 {
48827- z8530_rx_clear,
48828- z8530_tx_clear,
48829- z8530_status_clear
48830+ .rx = z8530_rx_clear,
48831+ .tx = z8530_tx_clear,
48832+ .status = z8530_status_clear
48833 };
48834
48835
48836diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
48837index 0b60295..b8bfa5b 100644
48838--- a/drivers/net/wimax/i2400m/rx.c
48839+++ b/drivers/net/wimax/i2400m/rx.c
48840@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
48841 if (i2400m->rx_roq == NULL)
48842 goto error_roq_alloc;
48843
48844- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
48845+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
48846 GFP_KERNEL);
48847 if (rd == NULL) {
48848 result = -ENOMEM;
48849diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
48850index e71a2ce..2268d61 100644
48851--- a/drivers/net/wireless/airo.c
48852+++ b/drivers/net/wireless/airo.c
48853@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
48854 struct airo_info *ai = dev->ml_priv;
48855 int ridcode;
48856 int enabled;
48857- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48858+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
48859 unsigned char *iobuf;
48860
48861 /* Only super-user can write RIDs */
48862diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
48863index da92bfa..5a9001a 100644
48864--- a/drivers/net/wireless/at76c50x-usb.c
48865+++ b/drivers/net/wireless/at76c50x-usb.c
48866@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
48867 }
48868
48869 /* Convert timeout from the DFU status to jiffies */
48870-static inline unsigned long at76_get_timeout(struct dfu_status *s)
48871+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
48872 {
48873 return msecs_to_jiffies((s->poll_timeout[2] << 16)
48874 | (s->poll_timeout[1] << 8)
48875diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
48876index f1946a6..cd367fb 100644
48877--- a/drivers/net/wireless/ath/ath10k/htc.c
48878+++ b/drivers/net/wireless/ath/ath10k/htc.c
48879@@ -851,7 +851,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
48880 /* registered target arrival callback from the HIF layer */
48881 int ath10k_htc_init(struct ath10k *ar)
48882 {
48883- struct ath10k_hif_cb htc_callbacks;
48884+ static struct ath10k_hif_cb htc_callbacks = {
48885+ .rx_completion = ath10k_htc_rx_completion_handler,
48886+ .tx_completion = ath10k_htc_tx_completion_handler,
48887+ };
48888 struct ath10k_htc_ep *ep = NULL;
48889 struct ath10k_htc *htc = &ar->htc;
48890
48891@@ -860,8 +863,6 @@ int ath10k_htc_init(struct ath10k *ar)
48892 ath10k_htc_reset_endpoint_states(htc);
48893
48894 /* setup HIF layer callbacks */
48895- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
48896- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
48897 htc->ar = ar;
48898
48899 /* Get HIF default pipe for HTC message exchange */
48900diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
48901index 527179c..a890150 100644
48902--- a/drivers/net/wireless/ath/ath10k/htc.h
48903+++ b/drivers/net/wireless/ath/ath10k/htc.h
48904@@ -270,13 +270,13 @@ enum ath10k_htc_ep_id {
48905
48906 struct ath10k_htc_ops {
48907 void (*target_send_suspend_complete)(struct ath10k *ar);
48908-};
48909+} __no_const;
48910
48911 struct ath10k_htc_ep_ops {
48912 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
48913 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
48914 void (*ep_tx_credits)(struct ath10k *);
48915-};
48916+} __no_const;
48917
48918 /* service connection information */
48919 struct ath10k_htc_svc_conn_req {
48920diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48921index f816909..e56cd8b 100644
48922--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48923+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
48924@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48925 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
48926 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
48927
48928- ACCESS_ONCE(ads->ds_link) = i->link;
48929- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
48930+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
48931+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
48932
48933 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
48934 ctl6 = SM(i->keytype, AR_EncrType);
48935@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48936
48937 if ((i->is_first || i->is_last) &&
48938 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
48939- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
48940+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
48941 | set11nTries(i->rates, 1)
48942 | set11nTries(i->rates, 2)
48943 | set11nTries(i->rates, 3)
48944 | (i->dur_update ? AR_DurUpdateEna : 0)
48945 | SM(0, AR_BurstDur);
48946
48947- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
48948+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
48949 | set11nRate(i->rates, 1)
48950 | set11nRate(i->rates, 2)
48951 | set11nRate(i->rates, 3);
48952 } else {
48953- ACCESS_ONCE(ads->ds_ctl2) = 0;
48954- ACCESS_ONCE(ads->ds_ctl3) = 0;
48955+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
48956+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
48957 }
48958
48959 if (!i->is_first) {
48960- ACCESS_ONCE(ads->ds_ctl0) = 0;
48961- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48962- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48963+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
48964+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48965+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48966 return;
48967 }
48968
48969@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48970 break;
48971 }
48972
48973- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48974+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
48975 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
48976 | SM(i->txpower[0], AR_XmitPower0)
48977 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
48978@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
48979 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
48980 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
48981
48982- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
48983- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
48984+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
48985+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
48986
48987 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
48988 return;
48989
48990- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48991+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
48992 | set11nPktDurRTSCTS(i->rates, 1);
48993
48994- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48995+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
48996 | set11nPktDurRTSCTS(i->rates, 3);
48997
48998- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
48999+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49000 | set11nRateFlags(i->rates, 1)
49001 | set11nRateFlags(i->rates, 2)
49002 | set11nRateFlags(i->rates, 3)
49003 | SM(i->rtscts_rate, AR_RTSCTSRate);
49004
49005- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49006- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49007- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49008+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
49009+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
49010+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
49011 }
49012
49013 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49014diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49015index da84b70..83e4978 100644
49016--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49017+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49018@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49019 (i->qcu << AR_TxQcuNum_S) | desc_len;
49020
49021 checksum += val;
49022- ACCESS_ONCE(ads->info) = val;
49023+ ACCESS_ONCE_RW(ads->info) = val;
49024
49025 checksum += i->link;
49026- ACCESS_ONCE(ads->link) = i->link;
49027+ ACCESS_ONCE_RW(ads->link) = i->link;
49028
49029 checksum += i->buf_addr[0];
49030- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49031+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49032 checksum += i->buf_addr[1];
49033- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49034+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49035 checksum += i->buf_addr[2];
49036- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49037+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49038 checksum += i->buf_addr[3];
49039- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49040+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49041
49042 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49043- ACCESS_ONCE(ads->ctl3) = val;
49044+ ACCESS_ONCE_RW(ads->ctl3) = val;
49045 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49046- ACCESS_ONCE(ads->ctl5) = val;
49047+ ACCESS_ONCE_RW(ads->ctl5) = val;
49048 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49049- ACCESS_ONCE(ads->ctl7) = val;
49050+ ACCESS_ONCE_RW(ads->ctl7) = val;
49051 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49052- ACCESS_ONCE(ads->ctl9) = val;
49053+ ACCESS_ONCE_RW(ads->ctl9) = val;
49054
49055 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49056- ACCESS_ONCE(ads->ctl10) = checksum;
49057+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49058
49059 if (i->is_first || i->is_last) {
49060- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49061+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49062 | set11nTries(i->rates, 1)
49063 | set11nTries(i->rates, 2)
49064 | set11nTries(i->rates, 3)
49065 | (i->dur_update ? AR_DurUpdateEna : 0)
49066 | SM(0, AR_BurstDur);
49067
49068- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49069+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49070 | set11nRate(i->rates, 1)
49071 | set11nRate(i->rates, 2)
49072 | set11nRate(i->rates, 3);
49073 } else {
49074- ACCESS_ONCE(ads->ctl13) = 0;
49075- ACCESS_ONCE(ads->ctl14) = 0;
49076+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49077+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49078 }
49079
49080 ads->ctl20 = 0;
49081@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49082
49083 ctl17 = SM(i->keytype, AR_EncrType);
49084 if (!i->is_first) {
49085- ACCESS_ONCE(ads->ctl11) = 0;
49086- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49087- ACCESS_ONCE(ads->ctl15) = 0;
49088- ACCESS_ONCE(ads->ctl16) = 0;
49089- ACCESS_ONCE(ads->ctl17) = ctl17;
49090- ACCESS_ONCE(ads->ctl18) = 0;
49091- ACCESS_ONCE(ads->ctl19) = 0;
49092+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49093+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49094+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49095+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49096+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49097+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49098+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49099 return;
49100 }
49101
49102- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49103+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49104 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49105 | SM(i->txpower[0], AR_XmitPower0)
49106 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49107@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49108 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49109 ctl12 |= SM(val, AR_PAPRDChainMask);
49110
49111- ACCESS_ONCE(ads->ctl12) = ctl12;
49112- ACCESS_ONCE(ads->ctl17) = ctl17;
49113+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49114+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49115
49116- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49117+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49118 | set11nPktDurRTSCTS(i->rates, 1);
49119
49120- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49121+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49122 | set11nPktDurRTSCTS(i->rates, 3);
49123
49124- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49125+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49126 | set11nRateFlags(i->rates, 1)
49127 | set11nRateFlags(i->rates, 2)
49128 | set11nRateFlags(i->rates, 3)
49129 | SM(i->rtscts_rate, AR_RTSCTSRate);
49130
49131- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49132+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49133
49134- ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49135- ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49136- ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49137+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
49138+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
49139+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
49140 }
49141
49142 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49143diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49144index 1cbd335..27dfb40 100644
49145--- a/drivers/net/wireless/ath/ath9k/hw.h
49146+++ b/drivers/net/wireless/ath/ath9k/hw.h
49147@@ -640,7 +640,7 @@ struct ath_hw_private_ops {
49148
49149 /* ANI */
49150 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49151-};
49152+} __no_const;
49153
49154 /**
49155 * struct ath_spec_scan - parameters for Atheros spectral scan
49156@@ -716,7 +716,7 @@ struct ath_hw_ops {
49157 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49158 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49159 #endif
49160-};
49161+} __no_const;
49162
49163 struct ath_nf_limits {
49164 s16 max;
49165diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49166index 62b0bf4..4ae094c 100644
49167--- a/drivers/net/wireless/ath/ath9k/main.c
49168+++ b/drivers/net/wireless/ath/ath9k/main.c
49169@@ -2546,16 +2546,18 @@ void ath9k_fill_chanctx_ops(void)
49170 if (!ath9k_is_chanctx_enabled())
49171 return;
49172
49173- ath9k_ops.hw_scan = ath9k_hw_scan;
49174- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49175- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49176- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49177- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49178- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49179- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49180- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49181- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49182- ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49183+ pax_open_kernel();
49184+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49185+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49186+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49187+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49188+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49189+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49190+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49191+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49192+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49193+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx;
49194+ pax_close_kernel();
49195 }
49196
49197 #endif
49198diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49199index 058a9f2..d5cb1ba 100644
49200--- a/drivers/net/wireless/b43/phy_lp.c
49201+++ b/drivers/net/wireless/b43/phy_lp.c
49202@@ -2502,7 +2502,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49203 {
49204 struct ssb_bus *bus = dev->dev->sdev->bus;
49205
49206- static const struct b206x_channel *chandata = NULL;
49207+ const struct b206x_channel *chandata = NULL;
49208 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49209 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49210 u16 old_comm15, scale;
49211diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49212index dc1d20c..f7a4f06 100644
49213--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49214+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49215@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49216 */
49217 if (il3945_mod_params.disable_hw_scan) {
49218 D_INFO("Disabling hw_scan\n");
49219- il3945_mac_ops.hw_scan = NULL;
49220+ pax_open_kernel();
49221+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49222+ pax_close_kernel();
49223 }
49224
49225 D_INFO("*** LOAD DRIVER ***\n");
49226diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49227index 0ffb6ff..c0b7f0e 100644
49228--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49229+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49230@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49231 {
49232 struct iwl_priv *priv = file->private_data;
49233 char buf[64];
49234- int buf_size;
49235+ size_t buf_size;
49236 u32 offset, len;
49237
49238 memset(buf, 0, sizeof(buf));
49239@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49240 struct iwl_priv *priv = file->private_data;
49241
49242 char buf[8];
49243- int buf_size;
49244+ size_t buf_size;
49245 u32 reset_flag;
49246
49247 memset(buf, 0, sizeof(buf));
49248@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49249 {
49250 struct iwl_priv *priv = file->private_data;
49251 char buf[8];
49252- int buf_size;
49253+ size_t buf_size;
49254 int ht40;
49255
49256 memset(buf, 0, sizeof(buf));
49257@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49258 {
49259 struct iwl_priv *priv = file->private_data;
49260 char buf[8];
49261- int buf_size;
49262+ size_t buf_size;
49263 int value;
49264
49265 memset(buf, 0, sizeof(buf));
49266@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49267 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49268 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49269
49270-static const char *fmt_value = " %-30s %10u\n";
49271-static const char *fmt_hex = " %-30s 0x%02X\n";
49272-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49273-static const char *fmt_header =
49274+static const char fmt_value[] = " %-30s %10u\n";
49275+static const char fmt_hex[] = " %-30s 0x%02X\n";
49276+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49277+static const char fmt_header[] =
49278 "%-32s current cumulative delta max\n";
49279
49280 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49281@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49282 {
49283 struct iwl_priv *priv = file->private_data;
49284 char buf[8];
49285- int buf_size;
49286+ size_t buf_size;
49287 int clear;
49288
49289 memset(buf, 0, sizeof(buf));
49290@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49291 {
49292 struct iwl_priv *priv = file->private_data;
49293 char buf[8];
49294- int buf_size;
49295+ size_t buf_size;
49296 int trace;
49297
49298 memset(buf, 0, sizeof(buf));
49299@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49300 {
49301 struct iwl_priv *priv = file->private_data;
49302 char buf[8];
49303- int buf_size;
49304+ size_t buf_size;
49305 int missed;
49306
49307 memset(buf, 0, sizeof(buf));
49308@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49309
49310 struct iwl_priv *priv = file->private_data;
49311 char buf[8];
49312- int buf_size;
49313+ size_t buf_size;
49314 int plcp;
49315
49316 memset(buf, 0, sizeof(buf));
49317@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49318
49319 struct iwl_priv *priv = file->private_data;
49320 char buf[8];
49321- int buf_size;
49322+ size_t buf_size;
49323 int flush;
49324
49325 memset(buf, 0, sizeof(buf));
49326@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49327
49328 struct iwl_priv *priv = file->private_data;
49329 char buf[8];
49330- int buf_size;
49331+ size_t buf_size;
49332 int rts;
49333
49334 if (!priv->cfg->ht_params)
49335@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49336 {
49337 struct iwl_priv *priv = file->private_data;
49338 char buf[8];
49339- int buf_size;
49340+ size_t buf_size;
49341
49342 memset(buf, 0, sizeof(buf));
49343 buf_size = min(count, sizeof(buf) - 1);
49344@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49345 struct iwl_priv *priv = file->private_data;
49346 u32 event_log_flag;
49347 char buf[8];
49348- int buf_size;
49349+ size_t buf_size;
49350
49351 /* check that the interface is up */
49352 if (!iwl_is_ready(priv))
49353@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49354 struct iwl_priv *priv = file->private_data;
49355 char buf[8];
49356 u32 calib_disabled;
49357- int buf_size;
49358+ size_t buf_size;
49359
49360 memset(buf, 0, sizeof(buf));
49361 buf_size = min(count, sizeof(buf) - 1);
49362diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49363index 523fe0c..0d9473b 100644
49364--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49365+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49366@@ -1781,7 +1781,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49367 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49368
49369 char buf[8];
49370- int buf_size;
49371+ size_t buf_size;
49372 u32 reset_flag;
49373
49374 memset(buf, 0, sizeof(buf));
49375@@ -1802,7 +1802,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49376 {
49377 struct iwl_trans *trans = file->private_data;
49378 char buf[8];
49379- int buf_size;
49380+ size_t buf_size;
49381 int csr;
49382
49383 memset(buf, 0, sizeof(buf));
49384diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49385index ef58a88..fafa731 100644
49386--- a/drivers/net/wireless/mac80211_hwsim.c
49387+++ b/drivers/net/wireless/mac80211_hwsim.c
49388@@ -3066,20 +3066,20 @@ static int __init init_mac80211_hwsim(void)
49389 if (channels < 1)
49390 return -EINVAL;
49391
49392- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49393- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49394- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49395- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49396- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49397- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49398- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49399- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49400- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49401- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49402- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49403- mac80211_hwsim_assign_vif_chanctx;
49404- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49405- mac80211_hwsim_unassign_vif_chanctx;
49406+ pax_open_kernel();
49407+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49408+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49409+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49410+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49411+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49412+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49413+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49414+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49415+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49416+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49417+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49418+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49419+ pax_close_kernel();
49420
49421 spin_lock_init(&hwsim_radio_lock);
49422 INIT_LIST_HEAD(&hwsim_radios);
49423diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49424index 1a4facd..a2ecbbd 100644
49425--- a/drivers/net/wireless/rndis_wlan.c
49426+++ b/drivers/net/wireless/rndis_wlan.c
49427@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49428
49429 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49430
49431- if (rts_threshold < 0 || rts_threshold > 2347)
49432+ if (rts_threshold > 2347)
49433 rts_threshold = 2347;
49434
49435 tmp = cpu_to_le32(rts_threshold);
49436diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49437index 9bb398b..b0cc047 100644
49438--- a/drivers/net/wireless/rt2x00/rt2x00.h
49439+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49440@@ -375,7 +375,7 @@ struct rt2x00_intf {
49441 * for hardware which doesn't support hardware
49442 * sequence counting.
49443 */
49444- atomic_t seqno;
49445+ atomic_unchecked_t seqno;
49446 };
49447
49448 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49449diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49450index 66ff364..3ce34f7 100644
49451--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49452+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49453@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49454 * sequence counter given by mac80211.
49455 */
49456 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49457- seqno = atomic_add_return(0x10, &intf->seqno);
49458+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49459 else
49460- seqno = atomic_read(&intf->seqno);
49461+ seqno = atomic_read_unchecked(&intf->seqno);
49462
49463 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49464 hdr->seq_ctrl |= cpu_to_le16(seqno);
49465diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49466index b661f896..ddf7d2b 100644
49467--- a/drivers/net/wireless/ti/wl1251/sdio.c
49468+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49469@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49470
49471 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49472
49473- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49474- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49475+ pax_open_kernel();
49476+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49477+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49478+ pax_close_kernel();
49479
49480 wl1251_info("using dedicated interrupt line");
49481 } else {
49482- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49483- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49484+ pax_open_kernel();
49485+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49486+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49487+ pax_close_kernel();
49488
49489 wl1251_info("using SDIO interrupt");
49490 }
49491diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49492index d6d0d6d..60c23a0 100644
49493--- a/drivers/net/wireless/ti/wl12xx/main.c
49494+++ b/drivers/net/wireless/ti/wl12xx/main.c
49495@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49496 sizeof(wl->conf.mem));
49497
49498 /* read data preparation is only needed by wl127x */
49499- wl->ops->prepare_read = wl127x_prepare_read;
49500+ pax_open_kernel();
49501+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49502+ pax_close_kernel();
49503
49504 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49505 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49506@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49507 sizeof(wl->conf.mem));
49508
49509 /* read data preparation is only needed by wl127x */
49510- wl->ops->prepare_read = wl127x_prepare_read;
49511+ pax_open_kernel();
49512+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49513+ pax_close_kernel();
49514
49515 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49516 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49517diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49518index 8e56261..9140678 100644
49519--- a/drivers/net/wireless/ti/wl18xx/main.c
49520+++ b/drivers/net/wireless/ti/wl18xx/main.c
49521@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49522 }
49523
49524 if (!checksum_param) {
49525- wl18xx_ops.set_rx_csum = NULL;
49526- wl18xx_ops.init_vif = NULL;
49527+ pax_open_kernel();
49528+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49529+ *(void **)&wl18xx_ops.init_vif = NULL;
49530+ pax_close_kernel();
49531 }
49532
49533 /* Enable 11a Band only if we have 5G antennas */
49534diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49535index a912dc0..a8225ba 100644
49536--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49537+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49538@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49539 {
49540 struct zd_usb *usb = urb->context;
49541 struct zd_usb_interrupt *intr = &usb->intr;
49542- int len;
49543+ unsigned int len;
49544 u16 int_num;
49545
49546 ZD_ASSERT(in_interrupt());
49547diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49548index ce2e2cf..f81e500 100644
49549--- a/drivers/nfc/nfcwilink.c
49550+++ b/drivers/nfc/nfcwilink.c
49551@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49552
49553 static int nfcwilink_probe(struct platform_device *pdev)
49554 {
49555- static struct nfcwilink *drv;
49556+ struct nfcwilink *drv;
49557 int rc;
49558 __u32 protocols;
49559
49560diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
49561index f2596c8..50d53af 100644
49562--- a/drivers/nfc/st21nfca/st21nfca.c
49563+++ b/drivers/nfc/st21nfca/st21nfca.c
49564@@ -559,7 +559,7 @@ static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *gate,
49565 goto exit;
49566 }
49567
49568- gate = uid_skb->data;
49569+ memcpy(gate, uid_skb->data, uid_skb->len);
49570 *len = uid_skb->len;
49571 exit:
49572 kfree_skb(uid_skb);
49573diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
49574index 5100742..6ad4e6d 100644
49575--- a/drivers/of/fdt.c
49576+++ b/drivers/of/fdt.c
49577@@ -1118,7 +1118,9 @@ static int __init of_fdt_raw_init(void)
49578 pr_warn("fdt: not creating '/sys/firmware/fdt': CRC check failed\n");
49579 return 0;
49580 }
49581- of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49582+ pax_open_kernel();
49583+ *(size_t *)&of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
49584+ pax_close_kernel();
49585 return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
49586 }
49587 late_initcall(of_fdt_raw_init);
49588diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49589index d93b2b6..ae50401 100644
49590--- a/drivers/oprofile/buffer_sync.c
49591+++ b/drivers/oprofile/buffer_sync.c
49592@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49593 if (cookie == NO_COOKIE)
49594 offset = pc;
49595 if (cookie == INVALID_COOKIE) {
49596- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49597+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49598 offset = pc;
49599 }
49600 if (cookie != last_cookie) {
49601@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49602 /* add userspace sample */
49603
49604 if (!mm) {
49605- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49606+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49607 return 0;
49608 }
49609
49610 cookie = lookup_dcookie(mm, s->eip, &offset);
49611
49612 if (cookie == INVALID_COOKIE) {
49613- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49614+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49615 return 0;
49616 }
49617
49618@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49619 /* ignore backtraces if failed to add a sample */
49620 if (state == sb_bt_start) {
49621 state = sb_bt_ignore;
49622- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49623+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49624 }
49625 }
49626 release_mm(mm);
49627diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49628index c0cc4e7..44d4e54 100644
49629--- a/drivers/oprofile/event_buffer.c
49630+++ b/drivers/oprofile/event_buffer.c
49631@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49632 }
49633
49634 if (buffer_pos == buffer_size) {
49635- atomic_inc(&oprofile_stats.event_lost_overflow);
49636+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49637 return;
49638 }
49639
49640diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49641index ed2c3ec..deda85a 100644
49642--- a/drivers/oprofile/oprof.c
49643+++ b/drivers/oprofile/oprof.c
49644@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49645 if (oprofile_ops.switch_events())
49646 return;
49647
49648- atomic_inc(&oprofile_stats.multiplex_counter);
49649+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49650 start_switch_worker();
49651 }
49652
49653diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49654index ee2cfce..7f8f699 100644
49655--- a/drivers/oprofile/oprofile_files.c
49656+++ b/drivers/oprofile/oprofile_files.c
49657@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49658
49659 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49660
49661-static ssize_t timeout_read(struct file *file, char __user *buf,
49662+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49663 size_t count, loff_t *offset)
49664 {
49665 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49666diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49667index 59659ce..6c860a0 100644
49668--- a/drivers/oprofile/oprofile_stats.c
49669+++ b/drivers/oprofile/oprofile_stats.c
49670@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49671 cpu_buf->sample_invalid_eip = 0;
49672 }
49673
49674- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
49675- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
49676- atomic_set(&oprofile_stats.event_lost_overflow, 0);
49677- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
49678- atomic_set(&oprofile_stats.multiplex_counter, 0);
49679+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
49680+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
49681+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
49682+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
49683+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
49684 }
49685
49686
49687diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
49688index 1fc622b..8c48fc3 100644
49689--- a/drivers/oprofile/oprofile_stats.h
49690+++ b/drivers/oprofile/oprofile_stats.h
49691@@ -13,11 +13,11 @@
49692 #include <linux/atomic.h>
49693
49694 struct oprofile_stat_struct {
49695- atomic_t sample_lost_no_mm;
49696- atomic_t sample_lost_no_mapping;
49697- atomic_t bt_lost_no_mapping;
49698- atomic_t event_lost_overflow;
49699- atomic_t multiplex_counter;
49700+ atomic_unchecked_t sample_lost_no_mm;
49701+ atomic_unchecked_t sample_lost_no_mapping;
49702+ atomic_unchecked_t bt_lost_no_mapping;
49703+ atomic_unchecked_t event_lost_overflow;
49704+ atomic_unchecked_t multiplex_counter;
49705 };
49706
49707 extern struct oprofile_stat_struct oprofile_stats;
49708diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
49709index 3f49345..c750d0b 100644
49710--- a/drivers/oprofile/oprofilefs.c
49711+++ b/drivers/oprofile/oprofilefs.c
49712@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
49713
49714 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
49715 {
49716- atomic_t *val = file->private_data;
49717- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
49718+ atomic_unchecked_t *val = file->private_data;
49719+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
49720 }
49721
49722
49723@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
49724
49725
49726 int oprofilefs_create_ro_atomic(struct dentry *root,
49727- char const *name, atomic_t *val)
49728+ char const *name, atomic_unchecked_t *val)
49729 {
49730 return __oprofilefs_create_file(root, name,
49731 &atomic_ro_fops, 0444, val);
49732diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
49733index bdef916..88c7dee 100644
49734--- a/drivers/oprofile/timer_int.c
49735+++ b/drivers/oprofile/timer_int.c
49736@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
49737 return NOTIFY_OK;
49738 }
49739
49740-static struct notifier_block __refdata oprofile_cpu_notifier = {
49741+static struct notifier_block oprofile_cpu_notifier = {
49742 .notifier_call = oprofile_cpu_notify,
49743 };
49744
49745diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
49746index 3b47080..6cd05dd 100644
49747--- a/drivers/parport/procfs.c
49748+++ b/drivers/parport/procfs.c
49749@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
49750
49751 *ppos += len;
49752
49753- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
49754+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
49755 }
49756
49757 #ifdef CONFIG_PARPORT_1284
49758@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
49759
49760 *ppos += len;
49761
49762- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
49763+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
49764 }
49765 #endif /* IEEE1284.3 support. */
49766
49767diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
49768index 6ca2399..68d866b 100644
49769--- a/drivers/pci/hotplug/acpiphp_ibm.c
49770+++ b/drivers/pci/hotplug/acpiphp_ibm.c
49771@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
49772 goto init_cleanup;
49773 }
49774
49775- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49776+ pax_open_kernel();
49777+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
49778+ pax_close_kernel();
49779 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
49780
49781 return retval;
49782diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
49783index 66b7bbe..26bee78 100644
49784--- a/drivers/pci/hotplug/cpcihp_generic.c
49785+++ b/drivers/pci/hotplug/cpcihp_generic.c
49786@@ -73,7 +73,6 @@ static u16 port;
49787 static unsigned int enum_bit;
49788 static u8 enum_mask;
49789
49790-static struct cpci_hp_controller_ops generic_hpc_ops;
49791 static struct cpci_hp_controller generic_hpc;
49792
49793 static int __init validate_parameters(void)
49794@@ -139,6 +138,10 @@ static int query_enum(void)
49795 return ((value & enum_mask) == enum_mask);
49796 }
49797
49798+static struct cpci_hp_controller_ops generic_hpc_ops = {
49799+ .query_enum = query_enum,
49800+};
49801+
49802 static int __init cpcihp_generic_init(void)
49803 {
49804 int status;
49805@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
49806 pci_dev_put(dev);
49807
49808 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
49809- generic_hpc_ops.query_enum = query_enum;
49810 generic_hpc.ops = &generic_hpc_ops;
49811
49812 status = cpci_hp_register_controller(&generic_hpc);
49813diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
49814index 7ecf34e..effed62 100644
49815--- a/drivers/pci/hotplug/cpcihp_zt5550.c
49816+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
49817@@ -59,7 +59,6 @@
49818 /* local variables */
49819 static bool debug;
49820 static bool poll;
49821-static struct cpci_hp_controller_ops zt5550_hpc_ops;
49822 static struct cpci_hp_controller zt5550_hpc;
49823
49824 /* Primary cPCI bus bridge device */
49825@@ -204,6 +203,10 @@ static int zt5550_hc_disable_irq(void)
49826 return 0;
49827 }
49828
49829+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
49830+ .query_enum = zt5550_hc_query_enum,
49831+};
49832+
49833 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
49834 {
49835 int status;
49836@@ -215,16 +218,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
49837 dbg("returned from zt5550_hc_config");
49838
49839 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
49840- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
49841 zt5550_hpc.ops = &zt5550_hpc_ops;
49842 if (!poll) {
49843 zt5550_hpc.irq = hc_dev->irq;
49844 zt5550_hpc.irq_flags = IRQF_SHARED;
49845 zt5550_hpc.dev_id = hc_dev;
49846
49847- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49848- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49849- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49850+ pax_open_kernel();
49851+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
49852+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
49853+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
49854+ pax_open_kernel();
49855 } else {
49856 info("using ENUM# polling mode");
49857 }
49858diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
49859index 1e08ff8c..3cd145f 100644
49860--- a/drivers/pci/hotplug/cpqphp_nvram.c
49861+++ b/drivers/pci/hotplug/cpqphp_nvram.c
49862@@ -425,8 +425,10 @@ static u32 store_HRT (void __iomem *rom_start)
49863
49864 void compaq_nvram_init (void __iomem *rom_start)
49865 {
49866+#ifndef CONFIG_PAX_KERNEXEC
49867 if (rom_start)
49868 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
49869+#endif
49870
49871 dbg("int15 entry = %p\n", compaq_int15_entry_point);
49872
49873diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
49874index 56d8486..f26113f 100644
49875--- a/drivers/pci/hotplug/pci_hotplug_core.c
49876+++ b/drivers/pci/hotplug/pci_hotplug_core.c
49877@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
49878 return -EINVAL;
49879 }
49880
49881- slot->ops->owner = owner;
49882- slot->ops->mod_name = mod_name;
49883+ pax_open_kernel();
49884+ *(struct module **)&slot->ops->owner = owner;
49885+ *(const char **)&slot->ops->mod_name = mod_name;
49886+ pax_close_kernel();
49887
49888 mutex_lock(&pci_hp_mutex);
49889 /*
49890diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
49891index 07aa722..84514b4 100644
49892--- a/drivers/pci/hotplug/pciehp_core.c
49893+++ b/drivers/pci/hotplug/pciehp_core.c
49894@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
49895 struct slot *slot = ctrl->slot;
49896 struct hotplug_slot *hotplug = NULL;
49897 struct hotplug_slot_info *info = NULL;
49898- struct hotplug_slot_ops *ops = NULL;
49899+ hotplug_slot_ops_no_const *ops = NULL;
49900 char name[SLOT_NAME_SIZE];
49901 int retval = -ENOMEM;
49902
49903diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
49904index fd60806..ab6c565 100644
49905--- a/drivers/pci/msi.c
49906+++ b/drivers/pci/msi.c
49907@@ -513,8 +513,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
49908 {
49909 struct attribute **msi_attrs;
49910 struct attribute *msi_attr;
49911- struct device_attribute *msi_dev_attr;
49912- struct attribute_group *msi_irq_group;
49913+ device_attribute_no_const *msi_dev_attr;
49914+ attribute_group_no_const *msi_irq_group;
49915 const struct attribute_group **msi_irq_groups;
49916 struct msi_desc *entry;
49917 int ret = -ENOMEM;
49918@@ -573,7 +573,7 @@ error_attrs:
49919 count = 0;
49920 msi_attr = msi_attrs[count];
49921 while (msi_attr) {
49922- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
49923+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
49924 kfree(msi_attr->name);
49925 kfree(msi_dev_attr);
49926 ++count;
49927diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
49928index 312f23a..d21181c 100644
49929--- a/drivers/pci/pci-sysfs.c
49930+++ b/drivers/pci/pci-sysfs.c
49931@@ -1140,7 +1140,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
49932 {
49933 /* allocate attribute structure, piggyback attribute name */
49934 int name_len = write_combine ? 13 : 10;
49935- struct bin_attribute *res_attr;
49936+ bin_attribute_no_const *res_attr;
49937 int retval;
49938
49939 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
49940@@ -1317,7 +1317,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
49941 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
49942 {
49943 int retval;
49944- struct bin_attribute *attr;
49945+ bin_attribute_no_const *attr;
49946
49947 /* If the device has VPD, try to expose it in sysfs. */
49948 if (dev->vpd) {
49949@@ -1364,7 +1364,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
49950 {
49951 int retval;
49952 int rom_size = 0;
49953- struct bin_attribute *attr;
49954+ bin_attribute_no_const *attr;
49955
49956 if (!sysfs_initialized)
49957 return -EACCES;
49958diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
49959index d54632a..198c84d 100644
49960--- a/drivers/pci/pci.h
49961+++ b/drivers/pci/pci.h
49962@@ -93,7 +93,7 @@ struct pci_vpd_ops {
49963 struct pci_vpd {
49964 unsigned int len;
49965 const struct pci_vpd_ops *ops;
49966- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
49967+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
49968 };
49969
49970 int pci_vpd_pci22_init(struct pci_dev *dev);
49971diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
49972index e1e7026..d28dd33 100644
49973--- a/drivers/pci/pcie/aspm.c
49974+++ b/drivers/pci/pcie/aspm.c
49975@@ -27,9 +27,9 @@
49976 #define MODULE_PARAM_PREFIX "pcie_aspm."
49977
49978 /* Note: those are not register definitions */
49979-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
49980-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
49981-#define ASPM_STATE_L1 (4) /* L1 state */
49982+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
49983+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
49984+#define ASPM_STATE_L1 (4U) /* L1 state */
49985 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
49986 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
49987
49988diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
49989index 23212f8..65e945b 100644
49990--- a/drivers/pci/probe.c
49991+++ b/drivers/pci/probe.c
49992@@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
49993 u16 orig_cmd;
49994 struct pci_bus_region region, inverted_region;
49995
49996- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
49997+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
49998
49999 /* No printks while decoding is disabled! */
50000 if (!dev->mmio_always_on) {
50001diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50002index 3f155e7..0f4b1f0 100644
50003--- a/drivers/pci/proc.c
50004+++ b/drivers/pci/proc.c
50005@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50006 static int __init pci_proc_init(void)
50007 {
50008 struct pci_dev *dev = NULL;
50009+
50010+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50011+#ifdef CONFIG_GRKERNSEC_PROC_USER
50012+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50013+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50014+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50015+#endif
50016+#else
50017 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50018+#endif
50019 proc_create("devices", 0, proc_bus_pci_dir,
50020 &proc_bus_pci_dev_operations);
50021 proc_initialized = 1;
50022diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50023index b84fdd6..b89d829 100644
50024--- a/drivers/platform/chrome/chromeos_laptop.c
50025+++ b/drivers/platform/chrome/chromeos_laptop.c
50026@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50027 .callback = chromeos_laptop_dmi_matched, \
50028 .driver_data = (void *)&board_
50029
50030-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50031+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50032 {
50033 .ident = "Samsung Series 5 550",
50034 .matches = {
50035diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50036index 1e1e594..8fe59c5 100644
50037--- a/drivers/platform/x86/alienware-wmi.c
50038+++ b/drivers/platform/x86/alienware-wmi.c
50039@@ -150,7 +150,7 @@ struct wmax_led_args {
50040 } __packed;
50041
50042 static struct platform_device *platform_device;
50043-static struct device_attribute *zone_dev_attrs;
50044+static device_attribute_no_const *zone_dev_attrs;
50045 static struct attribute **zone_attrs;
50046 static struct platform_zone *zone_data;
50047
50048@@ -160,7 +160,7 @@ static struct platform_driver platform_driver = {
50049 }
50050 };
50051
50052-static struct attribute_group zone_attribute_group = {
50053+static attribute_group_no_const zone_attribute_group = {
50054 .name = "rgb_zones",
50055 };
50056
50057diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50058index 7543a56..367ca8ed 100644
50059--- a/drivers/platform/x86/asus-wmi.c
50060+++ b/drivers/platform/x86/asus-wmi.c
50061@@ -1589,6 +1589,10 @@ static int show_dsts(struct seq_file *m, void *data)
50062 int err;
50063 u32 retval = -1;
50064
50065+#ifdef CONFIG_GRKERNSEC_KMEM
50066+ return -EPERM;
50067+#endif
50068+
50069 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50070
50071 if (err < 0)
50072@@ -1605,6 +1609,10 @@ static int show_devs(struct seq_file *m, void *data)
50073 int err;
50074 u32 retval = -1;
50075
50076+#ifdef CONFIG_GRKERNSEC_KMEM
50077+ return -EPERM;
50078+#endif
50079+
50080 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50081 &retval);
50082
50083@@ -1629,6 +1637,10 @@ static int show_call(struct seq_file *m, void *data)
50084 union acpi_object *obj;
50085 acpi_status status;
50086
50087+#ifdef CONFIG_GRKERNSEC_KMEM
50088+ return -EPERM;
50089+#endif
50090+
50091 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50092 1, asus->debug.method_id,
50093 &input, &output);
50094diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50095index 0859877..1cf7d08 100644
50096--- a/drivers/platform/x86/msi-laptop.c
50097+++ b/drivers/platform/x86/msi-laptop.c
50098@@ -999,12 +999,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50099
50100 if (!quirks->ec_read_only) {
50101 /* allow userland write sysfs file */
50102- dev_attr_bluetooth.store = store_bluetooth;
50103- dev_attr_wlan.store = store_wlan;
50104- dev_attr_threeg.store = store_threeg;
50105- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50106- dev_attr_wlan.attr.mode |= S_IWUSR;
50107- dev_attr_threeg.attr.mode |= S_IWUSR;
50108+ pax_open_kernel();
50109+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50110+ *(void **)&dev_attr_wlan.store = store_wlan;
50111+ *(void **)&dev_attr_threeg.store = store_threeg;
50112+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50113+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50114+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50115+ pax_close_kernel();
50116 }
50117
50118 /* disable hardware control by fn key */
50119diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50120index 6d2bac0..ec2b029 100644
50121--- a/drivers/platform/x86/msi-wmi.c
50122+++ b/drivers/platform/x86/msi-wmi.c
50123@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50124 static void msi_wmi_notify(u32 value, void *context)
50125 {
50126 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50127- static struct key_entry *key;
50128+ struct key_entry *key;
50129 union acpi_object *obj;
50130 acpi_status status;
50131
50132diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50133index 6dd1c0e..5d602c7 100644
50134--- a/drivers/platform/x86/sony-laptop.c
50135+++ b/drivers/platform/x86/sony-laptop.c
50136@@ -2526,7 +2526,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50137 }
50138
50139 /* High speed charging function */
50140-static struct device_attribute *hsc_handle;
50141+static device_attribute_no_const *hsc_handle;
50142
50143 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50144 struct device_attribute *attr,
50145@@ -2600,7 +2600,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50146 }
50147
50148 /* low battery function */
50149-static struct device_attribute *lowbatt_handle;
50150+static device_attribute_no_const *lowbatt_handle;
50151
50152 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50153 struct device_attribute *attr,
50154@@ -2666,7 +2666,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50155 }
50156
50157 /* fan speed function */
50158-static struct device_attribute *fan_handle, *hsf_handle;
50159+static device_attribute_no_const *fan_handle, *hsf_handle;
50160
50161 static ssize_t sony_nc_hsfan_store(struct device *dev,
50162 struct device_attribute *attr,
50163@@ -2773,7 +2773,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50164 }
50165
50166 /* USB charge function */
50167-static struct device_attribute *uc_handle;
50168+static device_attribute_no_const *uc_handle;
50169
50170 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50171 struct device_attribute *attr,
50172@@ -2847,7 +2847,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50173 }
50174
50175 /* Panel ID function */
50176-static struct device_attribute *panel_handle;
50177+static device_attribute_no_const *panel_handle;
50178
50179 static ssize_t sony_nc_panelid_show(struct device *dev,
50180 struct device_attribute *attr, char *buffer)
50181@@ -2894,7 +2894,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50182 }
50183
50184 /* smart connect function */
50185-static struct device_attribute *sc_handle;
50186+static device_attribute_no_const *sc_handle;
50187
50188 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50189 struct device_attribute *attr,
50190diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50191index c3d11fa..f83cded 100644
50192--- a/drivers/platform/x86/thinkpad_acpi.c
50193+++ b/drivers/platform/x86/thinkpad_acpi.c
50194@@ -2092,7 +2092,7 @@ static int hotkey_mask_get(void)
50195 return 0;
50196 }
50197
50198-void static hotkey_mask_warn_incomplete_mask(void)
50199+static void hotkey_mask_warn_incomplete_mask(void)
50200 {
50201 /* log only what the user can fix... */
50202 const u32 wantedmask = hotkey_driver_mask &
50203@@ -2436,10 +2436,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50204 && !tp_features.bright_unkfw)
50205 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50206 }
50207+}
50208
50209 #undef TPACPI_COMPARE_KEY
50210 #undef TPACPI_MAY_SEND_KEY
50211-}
50212
50213 /*
50214 * Polling driver
50215diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50216index 438d4c7..ca8a2fb 100644
50217--- a/drivers/pnp/pnpbios/bioscalls.c
50218+++ b/drivers/pnp/pnpbios/bioscalls.c
50219@@ -59,7 +59,7 @@ do { \
50220 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50221 } while(0)
50222
50223-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50224+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50225 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50226
50227 /*
50228@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50229
50230 cpu = get_cpu();
50231 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50232+
50233+ pax_open_kernel();
50234 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50235+ pax_close_kernel();
50236
50237 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50238 spin_lock_irqsave(&pnp_bios_lock, flags);
50239@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50240 :"memory");
50241 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50242
50243+ pax_open_kernel();
50244 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50245+ pax_close_kernel();
50246+
50247 put_cpu();
50248
50249 /* If we get here and this is set then the PnP BIOS faulted on us. */
50250@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50251 return status;
50252 }
50253
50254-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50255+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50256 {
50257 int i;
50258
50259@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50260 pnp_bios_callpoint.offset = header->fields.pm16offset;
50261 pnp_bios_callpoint.segment = PNP_CS16;
50262
50263+ pax_open_kernel();
50264+
50265 for_each_possible_cpu(i) {
50266 struct desc_struct *gdt = get_cpu_gdt_table(i);
50267 if (!gdt)
50268@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50269 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50270 (unsigned long)__va(header->fields.pm16dseg));
50271 }
50272+
50273+ pax_close_kernel();
50274 }
50275diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50276index 0c52e2a..3421ab7 100644
50277--- a/drivers/power/pda_power.c
50278+++ b/drivers/power/pda_power.c
50279@@ -37,7 +37,11 @@ static int polling;
50280
50281 #if IS_ENABLED(CONFIG_USB_PHY)
50282 static struct usb_phy *transceiver;
50283-static struct notifier_block otg_nb;
50284+static int otg_handle_notification(struct notifier_block *nb,
50285+ unsigned long event, void *unused);
50286+static struct notifier_block otg_nb = {
50287+ .notifier_call = otg_handle_notification
50288+};
50289 #endif
50290
50291 static struct regulator *ac_draw;
50292@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50293
50294 #if IS_ENABLED(CONFIG_USB_PHY)
50295 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50296- otg_nb.notifier_call = otg_handle_notification;
50297 ret = usb_register_notifier(transceiver, &otg_nb);
50298 if (ret) {
50299 dev_err(dev, "failure to register otg notifier\n");
50300diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50301index cc439fd..8fa30df 100644
50302--- a/drivers/power/power_supply.h
50303+++ b/drivers/power/power_supply.h
50304@@ -16,12 +16,12 @@ struct power_supply;
50305
50306 #ifdef CONFIG_SYSFS
50307
50308-extern void power_supply_init_attrs(struct device_type *dev_type);
50309+extern void power_supply_init_attrs(void);
50310 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50311
50312 #else
50313
50314-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50315+static inline void power_supply_init_attrs(void) {}
50316 #define power_supply_uevent NULL
50317
50318 #endif /* CONFIG_SYSFS */
50319diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50320index 694e8cd..9f03483 100644
50321--- a/drivers/power/power_supply_core.c
50322+++ b/drivers/power/power_supply_core.c
50323@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50324 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50325 EXPORT_SYMBOL_GPL(power_supply_notifier);
50326
50327-static struct device_type power_supply_dev_type;
50328+extern const struct attribute_group *power_supply_attr_groups[];
50329+static struct device_type power_supply_dev_type = {
50330+ .groups = power_supply_attr_groups,
50331+};
50332
50333 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50334 struct power_supply *supply)
50335@@ -637,7 +640,7 @@ static int __init power_supply_class_init(void)
50336 return PTR_ERR(power_supply_class);
50337
50338 power_supply_class->dev_uevent = power_supply_uevent;
50339- power_supply_init_attrs(&power_supply_dev_type);
50340+ power_supply_init_attrs();
50341
50342 return 0;
50343 }
50344diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50345index 62653f5..d0bb485 100644
50346--- a/drivers/power/power_supply_sysfs.c
50347+++ b/drivers/power/power_supply_sysfs.c
50348@@ -238,17 +238,15 @@ static struct attribute_group power_supply_attr_group = {
50349 .is_visible = power_supply_attr_is_visible,
50350 };
50351
50352-static const struct attribute_group *power_supply_attr_groups[] = {
50353+const struct attribute_group *power_supply_attr_groups[] = {
50354 &power_supply_attr_group,
50355 NULL,
50356 };
50357
50358-void power_supply_init_attrs(struct device_type *dev_type)
50359+void power_supply_init_attrs(void)
50360 {
50361 int i;
50362
50363- dev_type->groups = power_supply_attr_groups;
50364-
50365 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50366 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50367 }
50368diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50369index 84419af..268ede8 100644
50370--- a/drivers/powercap/powercap_sys.c
50371+++ b/drivers/powercap/powercap_sys.c
50372@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50373 struct device_attribute name_attr;
50374 };
50375
50376+static ssize_t show_constraint_name(struct device *dev,
50377+ struct device_attribute *dev_attr,
50378+ char *buf);
50379+
50380 static struct powercap_constraint_attr
50381- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50382+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50383+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50384+ .power_limit_attr = {
50385+ .attr = {
50386+ .name = NULL,
50387+ .mode = S_IWUSR | S_IRUGO
50388+ },
50389+ .show = show_constraint_power_limit_uw,
50390+ .store = store_constraint_power_limit_uw
50391+ },
50392+
50393+ .time_window_attr = {
50394+ .attr = {
50395+ .name = NULL,
50396+ .mode = S_IWUSR | S_IRUGO
50397+ },
50398+ .show = show_constraint_time_window_us,
50399+ .store = store_constraint_time_window_us
50400+ },
50401+
50402+ .max_power_attr = {
50403+ .attr = {
50404+ .name = NULL,
50405+ .mode = S_IRUGO
50406+ },
50407+ .show = show_constraint_max_power_uw,
50408+ .store = NULL
50409+ },
50410+
50411+ .min_power_attr = {
50412+ .attr = {
50413+ .name = NULL,
50414+ .mode = S_IRUGO
50415+ },
50416+ .show = show_constraint_min_power_uw,
50417+ .store = NULL
50418+ },
50419+
50420+ .max_time_window_attr = {
50421+ .attr = {
50422+ .name = NULL,
50423+ .mode = S_IRUGO
50424+ },
50425+ .show = show_constraint_max_time_window_us,
50426+ .store = NULL
50427+ },
50428+
50429+ .min_time_window_attr = {
50430+ .attr = {
50431+ .name = NULL,
50432+ .mode = S_IRUGO
50433+ },
50434+ .show = show_constraint_min_time_window_us,
50435+ .store = NULL
50436+ },
50437+
50438+ .name_attr = {
50439+ .attr = {
50440+ .name = NULL,
50441+ .mode = S_IRUGO
50442+ },
50443+ .show = show_constraint_name,
50444+ .store = NULL
50445+ }
50446+ }
50447+};
50448
50449 /* A list of powercap control_types */
50450 static LIST_HEAD(powercap_cntrl_list);
50451@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50452 }
50453
50454 static int create_constraint_attribute(int id, const char *name,
50455- int mode,
50456- struct device_attribute *dev_attr,
50457- ssize_t (*show)(struct device *,
50458- struct device_attribute *, char *),
50459- ssize_t (*store)(struct device *,
50460- struct device_attribute *,
50461- const char *, size_t)
50462- )
50463+ struct device_attribute *dev_attr)
50464 {
50465+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50466
50467- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50468- id, name);
50469- if (!dev_attr->attr.name)
50470+ if (!name)
50471 return -ENOMEM;
50472- dev_attr->attr.mode = mode;
50473- dev_attr->show = show;
50474- dev_attr->store = store;
50475+
50476+ pax_open_kernel();
50477+ *(const char **)&dev_attr->attr.name = name;
50478+ pax_close_kernel();
50479
50480 return 0;
50481 }
50482@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50483
50484 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50485 ret = create_constraint_attribute(i, "power_limit_uw",
50486- S_IWUSR | S_IRUGO,
50487- &constraint_attrs[i].power_limit_attr,
50488- show_constraint_power_limit_uw,
50489- store_constraint_power_limit_uw);
50490+ &constraint_attrs[i].power_limit_attr);
50491 if (ret)
50492 goto err_alloc;
50493 ret = create_constraint_attribute(i, "time_window_us",
50494- S_IWUSR | S_IRUGO,
50495- &constraint_attrs[i].time_window_attr,
50496- show_constraint_time_window_us,
50497- store_constraint_time_window_us);
50498+ &constraint_attrs[i].time_window_attr);
50499 if (ret)
50500 goto err_alloc;
50501- ret = create_constraint_attribute(i, "name", S_IRUGO,
50502- &constraint_attrs[i].name_attr,
50503- show_constraint_name,
50504- NULL);
50505+ ret = create_constraint_attribute(i, "name",
50506+ &constraint_attrs[i].name_attr);
50507 if (ret)
50508 goto err_alloc;
50509- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50510- &constraint_attrs[i].max_power_attr,
50511- show_constraint_max_power_uw,
50512- NULL);
50513+ ret = create_constraint_attribute(i, "max_power_uw",
50514+ &constraint_attrs[i].max_power_attr);
50515 if (ret)
50516 goto err_alloc;
50517- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50518- &constraint_attrs[i].min_power_attr,
50519- show_constraint_min_power_uw,
50520- NULL);
50521+ ret = create_constraint_attribute(i, "min_power_uw",
50522+ &constraint_attrs[i].min_power_attr);
50523 if (ret)
50524 goto err_alloc;
50525 ret = create_constraint_attribute(i, "max_time_window_us",
50526- S_IRUGO,
50527- &constraint_attrs[i].max_time_window_attr,
50528- show_constraint_max_time_window_us,
50529- NULL);
50530+ &constraint_attrs[i].max_time_window_attr);
50531 if (ret)
50532 goto err_alloc;
50533 ret = create_constraint_attribute(i, "min_time_window_us",
50534- S_IRUGO,
50535- &constraint_attrs[i].min_time_window_attr,
50536- show_constraint_min_time_window_us,
50537- NULL);
50538+ &constraint_attrs[i].min_time_window_attr);
50539 if (ret)
50540 goto err_alloc;
50541
50542@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50543 power_zone->zone_dev_attrs[count++] =
50544 &dev_attr_max_energy_range_uj.attr;
50545 if (power_zone->ops->get_energy_uj) {
50546+ pax_open_kernel();
50547 if (power_zone->ops->reset_energy_uj)
50548- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50549+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50550 else
50551- dev_attr_energy_uj.attr.mode = S_IRUGO;
50552+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50553+ pax_close_kernel();
50554 power_zone->zone_dev_attrs[count++] =
50555 &dev_attr_energy_uj.attr;
50556 }
50557diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50558index 9c5d414..c7900ce 100644
50559--- a/drivers/ptp/ptp_private.h
50560+++ b/drivers/ptp/ptp_private.h
50561@@ -51,7 +51,7 @@ struct ptp_clock {
50562 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50563 wait_queue_head_t tsev_wq;
50564 int defunct; /* tells readers to go away when clock is being removed */
50565- struct device_attribute *pin_dev_attr;
50566+ device_attribute_no_const *pin_dev_attr;
50567 struct attribute **pin_attr;
50568 struct attribute_group pin_attr_group;
50569 };
50570diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50571index 302e626..12579af 100644
50572--- a/drivers/ptp/ptp_sysfs.c
50573+++ b/drivers/ptp/ptp_sysfs.c
50574@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50575 goto no_pin_attr;
50576
50577 for (i = 0; i < n_pins; i++) {
50578- struct device_attribute *da = &ptp->pin_dev_attr[i];
50579+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50580 sysfs_attr_init(&da->attr);
50581 da->attr.name = info->pin_config[i].name;
50582 da->attr.mode = 0644;
50583diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50584index a5761d0..a2a4540 100644
50585--- a/drivers/regulator/core.c
50586+++ b/drivers/regulator/core.c
50587@@ -3591,7 +3591,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50588 {
50589 const struct regulation_constraints *constraints = NULL;
50590 const struct regulator_init_data *init_data;
50591- static atomic_t regulator_no = ATOMIC_INIT(0);
50592+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50593 struct regulator_dev *rdev;
50594 struct device *dev;
50595 int ret, i;
50596@@ -3665,7 +3665,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50597 rdev->dev.class = &regulator_class;
50598 rdev->dev.parent = dev;
50599 dev_set_name(&rdev->dev, "regulator.%d",
50600- atomic_inc_return(&regulator_no) - 1);
50601+ atomic_inc_return_unchecked(&regulator_no) - 1);
50602 ret = device_register(&rdev->dev);
50603 if (ret != 0) {
50604 put_device(&rdev->dev);
50605diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50606index 7eee2ca..4024513 100644
50607--- a/drivers/regulator/max8660.c
50608+++ b/drivers/regulator/max8660.c
50609@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50610 max8660->shadow_regs[MAX8660_OVER1] = 5;
50611 } else {
50612 /* Otherwise devices can be toggled via software */
50613- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50614- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50615+ pax_open_kernel();
50616+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50617+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50618+ pax_close_kernel();
50619 }
50620
50621 /*
50622diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50623index c3d55c2..0dddfe6 100644
50624--- a/drivers/regulator/max8973-regulator.c
50625+++ b/drivers/regulator/max8973-regulator.c
50626@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50627 if (!pdata || !pdata->enable_ext_control) {
50628 max->desc.enable_reg = MAX8973_VOUT;
50629 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50630- max->ops.enable = regulator_enable_regmap;
50631- max->ops.disable = regulator_disable_regmap;
50632- max->ops.is_enabled = regulator_is_enabled_regmap;
50633+ pax_open_kernel();
50634+ *(void **)&max->ops.enable = regulator_enable_regmap;
50635+ *(void **)&max->ops.disable = regulator_disable_regmap;
50636+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50637+ pax_close_kernel();
50638 }
50639
50640 if (pdata) {
50641diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50642index 0d17c92..a29f627 100644
50643--- a/drivers/regulator/mc13892-regulator.c
50644+++ b/drivers/regulator/mc13892-regulator.c
50645@@ -584,10 +584,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50646 mc13xxx_unlock(mc13892);
50647
50648 /* update mc13892_vcam ops */
50649- memcpy(&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50650+ pax_open_kernel();
50651+ memcpy((void *)&mc13892_vcam_ops, mc13892_regulators[MC13892_VCAM].desc.ops,
50652 sizeof(struct regulator_ops));
50653- mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50654- mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50655+ *(void **)&mc13892_vcam_ops.set_mode = mc13892_vcam_set_mode,
50656+ *(void **)&mc13892_vcam_ops.get_mode = mc13892_vcam_get_mode,
50657+ pax_close_kernel();
50658 mc13892_regulators[MC13892_VCAM].desc.ops = &mc13892_vcam_ops;
50659
50660 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50661diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50662index 5b2e761..c8c8a4a 100644
50663--- a/drivers/rtc/rtc-cmos.c
50664+++ b/drivers/rtc/rtc-cmos.c
50665@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50666 hpet_rtc_timer_init();
50667
50668 /* export at least the first block of NVRAM */
50669- nvram.size = address_space - NVRAM_OFFSET;
50670+ pax_open_kernel();
50671+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
50672+ pax_close_kernel();
50673 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
50674 if (retval < 0) {
50675 dev_dbg(dev, "can't create nvram file? %d\n", retval);
50676diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
50677index d049393..bb20be0 100644
50678--- a/drivers/rtc/rtc-dev.c
50679+++ b/drivers/rtc/rtc-dev.c
50680@@ -16,6 +16,7 @@
50681 #include <linux/module.h>
50682 #include <linux/rtc.h>
50683 #include <linux/sched.h>
50684+#include <linux/grsecurity.h>
50685 #include "rtc-core.h"
50686
50687 static dev_t rtc_devt;
50688@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
50689 if (copy_from_user(&tm, uarg, sizeof(tm)))
50690 return -EFAULT;
50691
50692+ gr_log_timechange();
50693+
50694 return rtc_set_time(rtc, &tm);
50695
50696 case RTC_PIE_ON:
50697diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
50698index 4ffabb3..1f87fca 100644
50699--- a/drivers/rtc/rtc-ds1307.c
50700+++ b/drivers/rtc/rtc-ds1307.c
50701@@ -107,7 +107,7 @@ struct ds1307 {
50702 u8 offset; /* register's offset */
50703 u8 regs[11];
50704 u16 nvram_offset;
50705- struct bin_attribute *nvram;
50706+ bin_attribute_no_const *nvram;
50707 enum ds_type type;
50708 unsigned long flags;
50709 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
50710diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
50711index 90abb5b..e0bf6dd 100644
50712--- a/drivers/rtc/rtc-m48t59.c
50713+++ b/drivers/rtc/rtc-m48t59.c
50714@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
50715 if (IS_ERR(m48t59->rtc))
50716 return PTR_ERR(m48t59->rtc);
50717
50718- m48t59_nvram_attr.size = pdata->offset;
50719+ pax_open_kernel();
50720+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
50721+ pax_close_kernel();
50722
50723 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
50724 if (ret)
50725diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
50726index e693af6..2e525b6 100644
50727--- a/drivers/scsi/bfa/bfa_fcpim.h
50728+++ b/drivers/scsi/bfa/bfa_fcpim.h
50729@@ -36,7 +36,7 @@ struct bfa_iotag_s {
50730
50731 struct bfa_itn_s {
50732 bfa_isr_func_t isr;
50733-};
50734+} __no_const;
50735
50736 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
50737 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
50738diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
50739index 0f19455..ef7adb5 100644
50740--- a/drivers/scsi/bfa/bfa_fcs.c
50741+++ b/drivers/scsi/bfa/bfa_fcs.c
50742@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
50743 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
50744
50745 static struct bfa_fcs_mod_s fcs_modules[] = {
50746- { bfa_fcs_port_attach, NULL, NULL },
50747- { bfa_fcs_uf_attach, NULL, NULL },
50748- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
50749- bfa_fcs_fabric_modexit },
50750+ {
50751+ .attach = bfa_fcs_port_attach,
50752+ .modinit = NULL,
50753+ .modexit = NULL
50754+ },
50755+ {
50756+ .attach = bfa_fcs_uf_attach,
50757+ .modinit = NULL,
50758+ .modexit = NULL
50759+ },
50760+ {
50761+ .attach = bfa_fcs_fabric_attach,
50762+ .modinit = bfa_fcs_fabric_modinit,
50763+ .modexit = bfa_fcs_fabric_modexit
50764+ },
50765 };
50766
50767 /*
50768diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
50769index ff75ef8..2dfe00a 100644
50770--- a/drivers/scsi/bfa/bfa_fcs_lport.c
50771+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
50772@@ -89,15 +89,26 @@ static struct {
50773 void (*offline) (struct bfa_fcs_lport_s *port);
50774 } __port_action[] = {
50775 {
50776- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
50777- bfa_fcs_lport_unknown_offline}, {
50778- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
50779- bfa_fcs_lport_fab_offline}, {
50780- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
50781- bfa_fcs_lport_n2n_offline}, {
50782- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
50783- bfa_fcs_lport_loop_offline},
50784- };
50785+ .init = bfa_fcs_lport_unknown_init,
50786+ .online = bfa_fcs_lport_unknown_online,
50787+ .offline = bfa_fcs_lport_unknown_offline
50788+ },
50789+ {
50790+ .init = bfa_fcs_lport_fab_init,
50791+ .online = bfa_fcs_lport_fab_online,
50792+ .offline = bfa_fcs_lport_fab_offline
50793+ },
50794+ {
50795+ .init = bfa_fcs_lport_n2n_init,
50796+ .online = bfa_fcs_lport_n2n_online,
50797+ .offline = bfa_fcs_lport_n2n_offline
50798+ },
50799+ {
50800+ .init = bfa_fcs_lport_loop_init,
50801+ .online = bfa_fcs_lport_loop_online,
50802+ .offline = bfa_fcs_lport_loop_offline
50803+ },
50804+};
50805
50806 /*
50807 * fcs_port_sm FCS logical port state machine
50808diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
50809index a38aafa0..fe8f03b 100644
50810--- a/drivers/scsi/bfa/bfa_ioc.h
50811+++ b/drivers/scsi/bfa/bfa_ioc.h
50812@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
50813 bfa_ioc_disable_cbfn_t disable_cbfn;
50814 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
50815 bfa_ioc_reset_cbfn_t reset_cbfn;
50816-};
50817+} __no_const;
50818
50819 /*
50820 * IOC event notification mechanism.
50821@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
50822 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
50823 enum bfi_ioc_state fwstate);
50824 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
50825-};
50826+} __no_const;
50827
50828 /*
50829 * Queue element to wait for room in request queue. FIFO order is
50830diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
50831index a14c784..6de6790 100644
50832--- a/drivers/scsi/bfa/bfa_modules.h
50833+++ b/drivers/scsi/bfa/bfa_modules.h
50834@@ -78,12 +78,12 @@ enum {
50835 \
50836 extern struct bfa_module_s hal_mod_ ## __mod; \
50837 struct bfa_module_s hal_mod_ ## __mod = { \
50838- bfa_ ## __mod ## _meminfo, \
50839- bfa_ ## __mod ## _attach, \
50840- bfa_ ## __mod ## _detach, \
50841- bfa_ ## __mod ## _start, \
50842- bfa_ ## __mod ## _stop, \
50843- bfa_ ## __mod ## _iocdisable, \
50844+ .meminfo = bfa_ ## __mod ## _meminfo, \
50845+ .attach = bfa_ ## __mod ## _attach, \
50846+ .detach = bfa_ ## __mod ## _detach, \
50847+ .start = bfa_ ## __mod ## _start, \
50848+ .stop = bfa_ ## __mod ## _stop, \
50849+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
50850 }
50851
50852 #define BFA_CACHELINE_SZ (256)
50853diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
50854index 045c4e1..13de803 100644
50855--- a/drivers/scsi/fcoe/fcoe_sysfs.c
50856+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
50857@@ -33,8 +33,8 @@
50858 */
50859 #include "libfcoe.h"
50860
50861-static atomic_t ctlr_num;
50862-static atomic_t fcf_num;
50863+static atomic_unchecked_t ctlr_num;
50864+static atomic_unchecked_t fcf_num;
50865
50866 /*
50867 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
50868@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
50869 if (!ctlr)
50870 goto out;
50871
50872- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
50873+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
50874 ctlr->f = f;
50875 ctlr->mode = FIP_CONN_TYPE_FABRIC;
50876 INIT_LIST_HEAD(&ctlr->fcfs);
50877@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
50878 fcf->dev.parent = &ctlr->dev;
50879 fcf->dev.bus = &fcoe_bus_type;
50880 fcf->dev.type = &fcoe_fcf_device_type;
50881- fcf->id = atomic_inc_return(&fcf_num) - 1;
50882+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
50883 fcf->state = FCOE_FCF_STATE_UNKNOWN;
50884
50885 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
50886@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
50887 {
50888 int error;
50889
50890- atomic_set(&ctlr_num, 0);
50891- atomic_set(&fcf_num, 0);
50892+ atomic_set_unchecked(&ctlr_num, 0);
50893+ atomic_set_unchecked(&fcf_num, 0);
50894
50895 error = bus_register(&fcoe_bus_type);
50896 if (error)
50897diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
50898index 8bb173e..20236b4 100644
50899--- a/drivers/scsi/hosts.c
50900+++ b/drivers/scsi/hosts.c
50901@@ -42,7 +42,7 @@
50902 #include "scsi_logging.h"
50903
50904
50905-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50906+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
50907
50908
50909 static void scsi_host_cls_release(struct device *dev)
50910@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
50911 * subtract one because we increment first then return, but we need to
50912 * know what the next host number was before increment
50913 */
50914- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
50915+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
50916 shost->dma_channel = 0xff;
50917
50918 /* These three are default values which can be overridden */
50919diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
50920index 6bb4611..0203251 100644
50921--- a/drivers/scsi/hpsa.c
50922+++ b/drivers/scsi/hpsa.c
50923@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
50924 struct reply_queue_buffer *rq = &h->reply_queue[q];
50925
50926 if (h->transMethod & CFGTBL_Trans_io_accel1)
50927- return h->access.command_completed(h, q);
50928+ return h->access->command_completed(h, q);
50929
50930 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
50931- return h->access.command_completed(h, q);
50932+ return h->access->command_completed(h, q);
50933
50934 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
50935 a = rq->head[rq->current_entry];
50936@@ -5360,7 +5360,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50937 while (!list_empty(&h->reqQ)) {
50938 c = list_entry(h->reqQ.next, struct CommandList, list);
50939 /* can't do anything if fifo is full */
50940- if ((h->access.fifo_full(h))) {
50941+ if ((h->access->fifo_full(h))) {
50942 h->fifo_recently_full = 1;
50943 dev_warn(&h->pdev->dev, "fifo full\n");
50944 break;
50945@@ -5376,7 +5376,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
50946 atomic_inc(&h->commands_outstanding);
50947 spin_unlock_irqrestore(&h->lock, *flags);
50948 /* Tell the controller execute command */
50949- h->access.submit_command(h, c);
50950+ h->access->submit_command(h, c);
50951 spin_lock_irqsave(&h->lock, *flags);
50952 }
50953 }
50954@@ -5392,17 +5392,17 @@ static void lock_and_start_io(struct ctlr_info *h)
50955
50956 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
50957 {
50958- return h->access.command_completed(h, q);
50959+ return h->access->command_completed(h, q);
50960 }
50961
50962 static inline bool interrupt_pending(struct ctlr_info *h)
50963 {
50964- return h->access.intr_pending(h);
50965+ return h->access->intr_pending(h);
50966 }
50967
50968 static inline long interrupt_not_for_us(struct ctlr_info *h)
50969 {
50970- return (h->access.intr_pending(h) == 0) ||
50971+ return (h->access->intr_pending(h) == 0) ||
50972 (h->interrupts_enabled == 0);
50973 }
50974
50975@@ -6343,7 +6343,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
50976 if (prod_index < 0)
50977 return -ENODEV;
50978 h->product_name = products[prod_index].product_name;
50979- h->access = *(products[prod_index].access);
50980+ h->access = products[prod_index].access;
50981
50982 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
50983 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
50984@@ -6690,7 +6690,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
50985 unsigned long flags;
50986 u32 lockup_detected;
50987
50988- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50989+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50990 spin_lock_irqsave(&h->lock, flags);
50991 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
50992 if (!lockup_detected) {
50993@@ -6937,7 +6937,7 @@ reinit_after_soft_reset:
50994 }
50995
50996 /* make sure the board interrupts are off */
50997- h->access.set_intr_mask(h, HPSA_INTR_OFF);
50998+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
50999
51000 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51001 goto clean2;
51002@@ -6972,7 +6972,7 @@ reinit_after_soft_reset:
51003 * fake ones to scoop up any residual completions.
51004 */
51005 spin_lock_irqsave(&h->lock, flags);
51006- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51007+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51008 spin_unlock_irqrestore(&h->lock, flags);
51009 free_irqs(h);
51010 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
51011@@ -6991,9 +6991,9 @@ reinit_after_soft_reset:
51012 dev_info(&h->pdev->dev, "Board READY.\n");
51013 dev_info(&h->pdev->dev,
51014 "Waiting for stale completions to drain.\n");
51015- h->access.set_intr_mask(h, HPSA_INTR_ON);
51016+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51017 msleep(10000);
51018- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51019+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51020
51021 rc = controller_reset_failed(h->cfgtable);
51022 if (rc)
51023@@ -7019,7 +7019,7 @@ reinit_after_soft_reset:
51024 h->drv_req_rescan = 0;
51025
51026 /* Turn the interrupts on so we can service requests */
51027- h->access.set_intr_mask(h, HPSA_INTR_ON);
51028+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51029
51030 hpsa_hba_inquiry(h);
51031 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51032@@ -7084,7 +7084,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51033 * To write all data in the battery backed cache to disks
51034 */
51035 hpsa_flush_cache(h);
51036- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51037+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51038 hpsa_free_irqs_and_disable_msix(h);
51039 }
51040
51041@@ -7202,7 +7202,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51042 CFGTBL_Trans_enable_directed_msix |
51043 (trans_support & (CFGTBL_Trans_io_accel1 |
51044 CFGTBL_Trans_io_accel2));
51045- struct access_method access = SA5_performant_access;
51046+ struct access_method *access = &SA5_performant_access;
51047
51048 /* This is a bit complicated. There are 8 registers on
51049 * the controller which we write to to tell it 8 different
51050@@ -7244,7 +7244,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51051 * perform the superfluous readl() after each command submission.
51052 */
51053 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51054- access = SA5_performant_access_no_read;
51055+ access = &SA5_performant_access_no_read;
51056
51057 /* Controller spec: zero out this buffer. */
51058 for (i = 0; i < h->nreply_queues; i++)
51059@@ -7274,12 +7274,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51060 * enable outbound interrupt coalescing in accelerator mode;
51061 */
51062 if (trans_support & CFGTBL_Trans_io_accel1) {
51063- access = SA5_ioaccel_mode1_access;
51064+ access = &SA5_ioaccel_mode1_access;
51065 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51066 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51067 } else {
51068 if (trans_support & CFGTBL_Trans_io_accel2) {
51069- access = SA5_ioaccel_mode2_access;
51070+ access = &SA5_ioaccel_mode2_access;
51071 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51072 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51073 }
51074diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51075index 8e06d9e..396e0a1 100644
51076--- a/drivers/scsi/hpsa.h
51077+++ b/drivers/scsi/hpsa.h
51078@@ -127,7 +127,7 @@ struct ctlr_info {
51079 unsigned int msix_vector;
51080 unsigned int msi_vector;
51081 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51082- struct access_method access;
51083+ struct access_method *access;
51084 char hba_mode_enabled;
51085
51086 /* queue and queue Info */
51087@@ -523,43 +523,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51088 }
51089
51090 static struct access_method SA5_access = {
51091- SA5_submit_command,
51092- SA5_intr_mask,
51093- SA5_fifo_full,
51094- SA5_intr_pending,
51095- SA5_completed,
51096+ .submit_command = SA5_submit_command,
51097+ .set_intr_mask = SA5_intr_mask,
51098+ .fifo_full = SA5_fifo_full,
51099+ .intr_pending = SA5_intr_pending,
51100+ .command_completed = SA5_completed,
51101 };
51102
51103 static struct access_method SA5_ioaccel_mode1_access = {
51104- SA5_submit_command,
51105- SA5_performant_intr_mask,
51106- SA5_fifo_full,
51107- SA5_ioaccel_mode1_intr_pending,
51108- SA5_ioaccel_mode1_completed,
51109+ .submit_command = SA5_submit_command,
51110+ .set_intr_mask = SA5_performant_intr_mask,
51111+ .fifo_full = SA5_fifo_full,
51112+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51113+ .command_completed = SA5_ioaccel_mode1_completed,
51114 };
51115
51116 static struct access_method SA5_ioaccel_mode2_access = {
51117- SA5_submit_command_ioaccel2,
51118- SA5_performant_intr_mask,
51119- SA5_fifo_full,
51120- SA5_performant_intr_pending,
51121- SA5_performant_completed,
51122+ .submit_command = SA5_submit_command_ioaccel2,
51123+ .set_intr_mask = SA5_performant_intr_mask,
51124+ .fifo_full = SA5_fifo_full,
51125+ .intr_pending = SA5_performant_intr_pending,
51126+ .command_completed = SA5_performant_completed,
51127 };
51128
51129 static struct access_method SA5_performant_access = {
51130- SA5_submit_command,
51131- SA5_performant_intr_mask,
51132- SA5_fifo_full,
51133- SA5_performant_intr_pending,
51134- SA5_performant_completed,
51135+ .submit_command = SA5_submit_command,
51136+ .set_intr_mask = SA5_performant_intr_mask,
51137+ .fifo_full = SA5_fifo_full,
51138+ .intr_pending = SA5_performant_intr_pending,
51139+ .command_completed = SA5_performant_completed,
51140 };
51141
51142 static struct access_method SA5_performant_access_no_read = {
51143- SA5_submit_command_no_read,
51144- SA5_performant_intr_mask,
51145- SA5_fifo_full,
51146- SA5_performant_intr_pending,
51147- SA5_performant_completed,
51148+ .submit_command = SA5_submit_command_no_read,
51149+ .set_intr_mask = SA5_performant_intr_mask,
51150+ .fifo_full = SA5_fifo_full,
51151+ .intr_pending = SA5_performant_intr_pending,
51152+ .command_completed = SA5_performant_completed,
51153 };
51154
51155 struct board_type {
51156diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51157index 1b3a094..068e683 100644
51158--- a/drivers/scsi/libfc/fc_exch.c
51159+++ b/drivers/scsi/libfc/fc_exch.c
51160@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51161 u16 pool_max_index;
51162
51163 struct {
51164- atomic_t no_free_exch;
51165- atomic_t no_free_exch_xid;
51166- atomic_t xid_not_found;
51167- atomic_t xid_busy;
51168- atomic_t seq_not_found;
51169- atomic_t non_bls_resp;
51170+ atomic_unchecked_t no_free_exch;
51171+ atomic_unchecked_t no_free_exch_xid;
51172+ atomic_unchecked_t xid_not_found;
51173+ atomic_unchecked_t xid_busy;
51174+ atomic_unchecked_t seq_not_found;
51175+ atomic_unchecked_t non_bls_resp;
51176 } stats;
51177 };
51178
51179@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51180 /* allocate memory for exchange */
51181 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51182 if (!ep) {
51183- atomic_inc(&mp->stats.no_free_exch);
51184+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51185 goto out;
51186 }
51187 memset(ep, 0, sizeof(*ep));
51188@@ -874,7 +874,7 @@ out:
51189 return ep;
51190 err:
51191 spin_unlock_bh(&pool->lock);
51192- atomic_inc(&mp->stats.no_free_exch_xid);
51193+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51194 mempool_free(ep, mp->ep_pool);
51195 return NULL;
51196 }
51197@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51198 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51199 ep = fc_exch_find(mp, xid);
51200 if (!ep) {
51201- atomic_inc(&mp->stats.xid_not_found);
51202+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51203 reject = FC_RJT_OX_ID;
51204 goto out;
51205 }
51206@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51207 ep = fc_exch_find(mp, xid);
51208 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51209 if (ep) {
51210- atomic_inc(&mp->stats.xid_busy);
51211+ atomic_inc_unchecked(&mp->stats.xid_busy);
51212 reject = FC_RJT_RX_ID;
51213 goto rel;
51214 }
51215@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51216 }
51217 xid = ep->xid; /* get our XID */
51218 } else if (!ep) {
51219- atomic_inc(&mp->stats.xid_not_found);
51220+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51221 reject = FC_RJT_RX_ID; /* XID not found */
51222 goto out;
51223 }
51224@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51225 } else {
51226 sp = &ep->seq;
51227 if (sp->id != fh->fh_seq_id) {
51228- atomic_inc(&mp->stats.seq_not_found);
51229+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51230 if (f_ctl & FC_FC_END_SEQ) {
51231 /*
51232 * Update sequence_id based on incoming last
51233@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51234
51235 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51236 if (!ep) {
51237- atomic_inc(&mp->stats.xid_not_found);
51238+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51239 goto out;
51240 }
51241 if (ep->esb_stat & ESB_ST_COMPLETE) {
51242- atomic_inc(&mp->stats.xid_not_found);
51243+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51244 goto rel;
51245 }
51246 if (ep->rxid == FC_XID_UNKNOWN)
51247 ep->rxid = ntohs(fh->fh_rx_id);
51248 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51249- atomic_inc(&mp->stats.xid_not_found);
51250+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51251 goto rel;
51252 }
51253 if (ep->did != ntoh24(fh->fh_s_id) &&
51254 ep->did != FC_FID_FLOGI) {
51255- atomic_inc(&mp->stats.xid_not_found);
51256+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51257 goto rel;
51258 }
51259 sof = fr_sof(fp);
51260@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51261 sp->ssb_stat |= SSB_ST_RESP;
51262 sp->id = fh->fh_seq_id;
51263 } else if (sp->id != fh->fh_seq_id) {
51264- atomic_inc(&mp->stats.seq_not_found);
51265+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51266 goto rel;
51267 }
51268
51269@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51270 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51271
51272 if (!sp)
51273- atomic_inc(&mp->stats.xid_not_found);
51274+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51275 else
51276- atomic_inc(&mp->stats.non_bls_resp);
51277+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51278
51279 fc_frame_free(fp);
51280 }
51281@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51282
51283 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51284 mp = ema->mp;
51285- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51286+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51287 st->fc_no_free_exch_xid +=
51288- atomic_read(&mp->stats.no_free_exch_xid);
51289- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51290- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51291- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51292- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51293+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51294+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51295+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51296+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51297+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51298 }
51299 }
51300 EXPORT_SYMBOL(fc_exch_update_stats);
51301diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51302index 932d9cc..50c7ee9 100644
51303--- a/drivers/scsi/libsas/sas_ata.c
51304+++ b/drivers/scsi/libsas/sas_ata.c
51305@@ -535,7 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
51306 .postreset = ata_std_postreset,
51307 .error_handler = ata_std_error_handler,
51308 .post_internal_cmd = sas_ata_post_internal,
51309- .qc_defer = ata_std_qc_defer,
51310+ .qc_defer = ata_std_qc_defer,
51311 .qc_prep = ata_noop_qc_prep,
51312 .qc_issue = sas_ata_qc_issue,
51313 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51314diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51315index 434e903..5a4a79b 100644
51316--- a/drivers/scsi/lpfc/lpfc.h
51317+++ b/drivers/scsi/lpfc/lpfc.h
51318@@ -430,7 +430,7 @@ struct lpfc_vport {
51319 struct dentry *debug_nodelist;
51320 struct dentry *vport_debugfs_root;
51321 struct lpfc_debugfs_trc *disc_trc;
51322- atomic_t disc_trc_cnt;
51323+ atomic_unchecked_t disc_trc_cnt;
51324 #endif
51325 uint8_t stat_data_enabled;
51326 uint8_t stat_data_blocked;
51327@@ -880,8 +880,8 @@ struct lpfc_hba {
51328 struct timer_list fabric_block_timer;
51329 unsigned long bit_flags;
51330 #define FABRIC_COMANDS_BLOCKED 0
51331- atomic_t num_rsrc_err;
51332- atomic_t num_cmd_success;
51333+ atomic_unchecked_t num_rsrc_err;
51334+ atomic_unchecked_t num_cmd_success;
51335 unsigned long last_rsrc_error_time;
51336 unsigned long last_ramp_down_time;
51337 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51338@@ -916,7 +916,7 @@ struct lpfc_hba {
51339
51340 struct dentry *debug_slow_ring_trc;
51341 struct lpfc_debugfs_trc *slow_ring_trc;
51342- atomic_t slow_ring_trc_cnt;
51343+ atomic_unchecked_t slow_ring_trc_cnt;
51344 /* iDiag debugfs sub-directory */
51345 struct dentry *idiag_root;
51346 struct dentry *idiag_pci_cfg;
51347diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51348index 5633e7d..8272114 100644
51349--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51350+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51351@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51352
51353 #include <linux/debugfs.h>
51354
51355-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51356+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51357 static unsigned long lpfc_debugfs_start_time = 0L;
51358
51359 /* iDiag */
51360@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51361 lpfc_debugfs_enable = 0;
51362
51363 len = 0;
51364- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51365+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51366 (lpfc_debugfs_max_disc_trc - 1);
51367 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51368 dtp = vport->disc_trc + i;
51369@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51370 lpfc_debugfs_enable = 0;
51371
51372 len = 0;
51373- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51374+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51375 (lpfc_debugfs_max_slow_ring_trc - 1);
51376 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51377 dtp = phba->slow_ring_trc + i;
51378@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51379 !vport || !vport->disc_trc)
51380 return;
51381
51382- index = atomic_inc_return(&vport->disc_trc_cnt) &
51383+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51384 (lpfc_debugfs_max_disc_trc - 1);
51385 dtp = vport->disc_trc + index;
51386 dtp->fmt = fmt;
51387 dtp->data1 = data1;
51388 dtp->data2 = data2;
51389 dtp->data3 = data3;
51390- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51391+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51392 dtp->jif = jiffies;
51393 #endif
51394 return;
51395@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51396 !phba || !phba->slow_ring_trc)
51397 return;
51398
51399- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51400+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51401 (lpfc_debugfs_max_slow_ring_trc - 1);
51402 dtp = phba->slow_ring_trc + index;
51403 dtp->fmt = fmt;
51404 dtp->data1 = data1;
51405 dtp->data2 = data2;
51406 dtp->data3 = data3;
51407- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51408+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51409 dtp->jif = jiffies;
51410 #endif
51411 return;
51412@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51413 "slow_ring buffer\n");
51414 goto debug_failed;
51415 }
51416- atomic_set(&phba->slow_ring_trc_cnt, 0);
51417+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51418 memset(phba->slow_ring_trc, 0,
51419 (sizeof(struct lpfc_debugfs_trc) *
51420 lpfc_debugfs_max_slow_ring_trc));
51421@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51422 "buffer\n");
51423 goto debug_failed;
51424 }
51425- atomic_set(&vport->disc_trc_cnt, 0);
51426+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51427
51428 snprintf(name, sizeof(name), "discovery_trace");
51429 vport->debug_disc_trc =
51430diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51431index 0b2c53a..aec2b45 100644
51432--- a/drivers/scsi/lpfc/lpfc_init.c
51433+++ b/drivers/scsi/lpfc/lpfc_init.c
51434@@ -11290,8 +11290,10 @@ lpfc_init(void)
51435 "misc_register returned with status %d", error);
51436
51437 if (lpfc_enable_npiv) {
51438- lpfc_transport_functions.vport_create = lpfc_vport_create;
51439- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51440+ pax_open_kernel();
51441+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51442+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51443+ pax_close_kernel();
51444 }
51445 lpfc_transport_template =
51446 fc_attach_transport(&lpfc_transport_functions);
51447diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51448index 4f9222e..f1850e3 100644
51449--- a/drivers/scsi/lpfc/lpfc_scsi.c
51450+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51451@@ -261,7 +261,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51452 unsigned long expires;
51453
51454 spin_lock_irqsave(&phba->hbalock, flags);
51455- atomic_inc(&phba->num_rsrc_err);
51456+ atomic_inc_unchecked(&phba->num_rsrc_err);
51457 phba->last_rsrc_error_time = jiffies;
51458
51459 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
51460@@ -303,8 +303,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51461 unsigned long num_rsrc_err, num_cmd_success;
51462 int i;
51463
51464- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51465- num_cmd_success = atomic_read(&phba->num_cmd_success);
51466+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51467+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51468
51469 /*
51470 * The error and success command counters are global per
51471@@ -331,8 +331,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51472 }
51473 }
51474 lpfc_destroy_vport_work_array(phba, vports);
51475- atomic_set(&phba->num_rsrc_err, 0);
51476- atomic_set(&phba->num_cmd_success, 0);
51477+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51478+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51479 }
51480
51481 /**
51482diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51483index 6a1c036..38e0e8d 100644
51484--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51485+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51486@@ -1508,7 +1508,7 @@ _scsih_get_resync(struct device *dev)
51487 {
51488 struct scsi_device *sdev = to_scsi_device(dev);
51489 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51490- static struct _raid_device *raid_device;
51491+ struct _raid_device *raid_device;
51492 unsigned long flags;
51493 Mpi2RaidVolPage0_t vol_pg0;
51494 Mpi2ConfigReply_t mpi_reply;
51495@@ -1560,7 +1560,7 @@ _scsih_get_state(struct device *dev)
51496 {
51497 struct scsi_device *sdev = to_scsi_device(dev);
51498 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51499- static struct _raid_device *raid_device;
51500+ struct _raid_device *raid_device;
51501 unsigned long flags;
51502 Mpi2RaidVolPage0_t vol_pg0;
51503 Mpi2ConfigReply_t mpi_reply;
51504@@ -6602,7 +6602,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51505 Mpi2EventDataIrOperationStatus_t *event_data =
51506 (Mpi2EventDataIrOperationStatus_t *)
51507 fw_event->event_data;
51508- static struct _raid_device *raid_device;
51509+ struct _raid_device *raid_device;
51510 unsigned long flags;
51511 u16 handle;
51512
51513@@ -7073,7 +7073,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51514 u64 sas_address;
51515 struct _sas_device *sas_device;
51516 struct _sas_node *expander_device;
51517- static struct _raid_device *raid_device;
51518+ struct _raid_device *raid_device;
51519 u8 retry_count;
51520 unsigned long flags;
51521
51522diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51523index 8c27b6a..607f56e 100644
51524--- a/drivers/scsi/pmcraid.c
51525+++ b/drivers/scsi/pmcraid.c
51526@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51527 res->scsi_dev = scsi_dev;
51528 scsi_dev->hostdata = res;
51529 res->change_detected = 0;
51530- atomic_set(&res->read_failures, 0);
51531- atomic_set(&res->write_failures, 0);
51532+ atomic_set_unchecked(&res->read_failures, 0);
51533+ atomic_set_unchecked(&res->write_failures, 0);
51534 rc = 0;
51535 }
51536 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51537@@ -2646,9 +2646,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51538
51539 /* If this was a SCSI read/write command keep count of errors */
51540 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51541- atomic_inc(&res->read_failures);
51542+ atomic_inc_unchecked(&res->read_failures);
51543 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51544- atomic_inc(&res->write_failures);
51545+ atomic_inc_unchecked(&res->write_failures);
51546
51547 if (!RES_IS_GSCSI(res->cfg_entry) &&
51548 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51549@@ -3474,7 +3474,7 @@ static int pmcraid_queuecommand_lck(
51550 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51551 * hrrq_id assigned here in queuecommand
51552 */
51553- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51554+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51555 pinstance->num_hrrq;
51556 cmd->cmd_done = pmcraid_io_done;
51557
51558@@ -3788,7 +3788,7 @@ static long pmcraid_ioctl_passthrough(
51559 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51560 * hrrq_id assigned here in queuecommand
51561 */
51562- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51563+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51564 pinstance->num_hrrq;
51565
51566 if (request_size) {
51567@@ -4426,7 +4426,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51568
51569 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51570 /* add resources only after host is added into system */
51571- if (!atomic_read(&pinstance->expose_resources))
51572+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51573 return;
51574
51575 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51576@@ -5243,8 +5243,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51577 init_waitqueue_head(&pinstance->reset_wait_q);
51578
51579 atomic_set(&pinstance->outstanding_cmds, 0);
51580- atomic_set(&pinstance->last_message_id, 0);
51581- atomic_set(&pinstance->expose_resources, 0);
51582+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51583+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51584
51585 INIT_LIST_HEAD(&pinstance->free_res_q);
51586 INIT_LIST_HEAD(&pinstance->used_res_q);
51587@@ -5957,7 +5957,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51588 /* Schedule worker thread to handle CCN and take care of adding and
51589 * removing devices to OS
51590 */
51591- atomic_set(&pinstance->expose_resources, 1);
51592+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51593 schedule_work(&pinstance->worker_q);
51594 return rc;
51595
51596diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51597index e1d150f..6c6df44 100644
51598--- a/drivers/scsi/pmcraid.h
51599+++ b/drivers/scsi/pmcraid.h
51600@@ -748,7 +748,7 @@ struct pmcraid_instance {
51601 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51602
51603 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51604- atomic_t last_message_id;
51605+ atomic_unchecked_t last_message_id;
51606
51607 /* configuration table */
51608 struct pmcraid_config_table *cfg_table;
51609@@ -777,7 +777,7 @@ struct pmcraid_instance {
51610 atomic_t outstanding_cmds;
51611
51612 /* should add/delete resources to mid-layer now ?*/
51613- atomic_t expose_resources;
51614+ atomic_unchecked_t expose_resources;
51615
51616
51617
51618@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51619 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51620 };
51621 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51622- atomic_t read_failures; /* count of failed READ commands */
51623- atomic_t write_failures; /* count of failed WRITE commands */
51624+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51625+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51626
51627 /* To indicate add/delete/modify during CCN */
51628 u8 change_detected;
51629diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51630index 82b92c4..3178171 100644
51631--- a/drivers/scsi/qla2xxx/qla_attr.c
51632+++ b/drivers/scsi/qla2xxx/qla_attr.c
51633@@ -2192,7 +2192,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51634 return 0;
51635 }
51636
51637-struct fc_function_template qla2xxx_transport_functions = {
51638+fc_function_template_no_const qla2xxx_transport_functions = {
51639
51640 .show_host_node_name = 1,
51641 .show_host_port_name = 1,
51642@@ -2240,7 +2240,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51643 .bsg_timeout = qla24xx_bsg_timeout,
51644 };
51645
51646-struct fc_function_template qla2xxx_transport_vport_functions = {
51647+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51648
51649 .show_host_node_name = 1,
51650 .show_host_port_name = 1,
51651diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51652index 7686bfe..4710893 100644
51653--- a/drivers/scsi/qla2xxx/qla_gbl.h
51654+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51655@@ -571,8 +571,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
51656 struct device_attribute;
51657 extern struct device_attribute *qla2x00_host_attrs[];
51658 struct fc_function_template;
51659-extern struct fc_function_template qla2xxx_transport_functions;
51660-extern struct fc_function_template qla2xxx_transport_vport_functions;
51661+extern fc_function_template_no_const qla2xxx_transport_functions;
51662+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51663 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51664 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51665 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51666diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51667index cce1cbc..5b9f0fe 100644
51668--- a/drivers/scsi/qla2xxx/qla_os.c
51669+++ b/drivers/scsi/qla2xxx/qla_os.c
51670@@ -1435,8 +1435,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
51671 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
51672 /* Ok, a 64bit DMA mask is applicable. */
51673 ha->flags.enable_64bit_addressing = 1;
51674- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51675- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51676+ pax_open_kernel();
51677+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
51678+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
51679+ pax_close_kernel();
51680 return;
51681 }
51682 }
51683diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
51684index 8f6d0fb..1b21097 100644
51685--- a/drivers/scsi/qla4xxx/ql4_def.h
51686+++ b/drivers/scsi/qla4xxx/ql4_def.h
51687@@ -305,7 +305,7 @@ struct ddb_entry {
51688 * (4000 only) */
51689 atomic_t relogin_timer; /* Max Time to wait for
51690 * relogin to complete */
51691- atomic_t relogin_retry_count; /* Num of times relogin has been
51692+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
51693 * retried */
51694 uint32_t default_time2wait; /* Default Min time between
51695 * relogins (+aens) */
51696diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
51697index 6d25879..3031a9f 100644
51698--- a/drivers/scsi/qla4xxx/ql4_os.c
51699+++ b/drivers/scsi/qla4xxx/ql4_os.c
51700@@ -4491,12 +4491,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
51701 */
51702 if (!iscsi_is_session_online(cls_sess)) {
51703 /* Reset retry relogin timer */
51704- atomic_inc(&ddb_entry->relogin_retry_count);
51705+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
51706 DEBUG2(ql4_printk(KERN_INFO, ha,
51707 "%s: index[%d] relogin timed out-retrying"
51708 " relogin (%d), retry (%d)\n", __func__,
51709 ddb_entry->fw_ddb_index,
51710- atomic_read(&ddb_entry->relogin_retry_count),
51711+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
51712 ddb_entry->default_time2wait + 4));
51713 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
51714 atomic_set(&ddb_entry->retry_relogin_timer,
51715@@ -6604,7 +6604,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
51716
51717 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
51718 atomic_set(&ddb_entry->relogin_timer, 0);
51719- atomic_set(&ddb_entry->relogin_retry_count, 0);
51720+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
51721 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
51722 ddb_entry->default_relogin_timeout =
51723 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
51724diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
51725index 7129701..b49c4e5 100644
51726--- a/drivers/scsi/scsi_lib.c
51727+++ b/drivers/scsi/scsi_lib.c
51728@@ -1597,7 +1597,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
51729 shost = sdev->host;
51730 scsi_init_cmd_errh(cmd);
51731 cmd->result = DID_NO_CONNECT << 16;
51732- atomic_inc(&cmd->device->iorequest_cnt);
51733+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51734
51735 /*
51736 * SCSI request completion path will do scsi_device_unbusy(),
51737@@ -1620,9 +1620,9 @@ static void scsi_softirq_done(struct request *rq)
51738
51739 INIT_LIST_HEAD(&cmd->eh_entry);
51740
51741- atomic_inc(&cmd->device->iodone_cnt);
51742+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
51743 if (cmd->result)
51744- atomic_inc(&cmd->device->ioerr_cnt);
51745+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
51746
51747 disposition = scsi_decide_disposition(cmd);
51748 if (disposition != SUCCESS &&
51749@@ -1663,7 +1663,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
51750 struct Scsi_Host *host = cmd->device->host;
51751 int rtn = 0;
51752
51753- atomic_inc(&cmd->device->iorequest_cnt);
51754+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
51755
51756 /* check if the device is still usable */
51757 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
51758diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
51759index 1ac38e7..6acc656 100644
51760--- a/drivers/scsi/scsi_sysfs.c
51761+++ b/drivers/scsi/scsi_sysfs.c
51762@@ -788,7 +788,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
51763 char *buf) \
51764 { \
51765 struct scsi_device *sdev = to_scsi_device(dev); \
51766- unsigned long long count = atomic_read(&sdev->field); \
51767+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
51768 return snprintf(buf, 20, "0x%llx\n", count); \
51769 } \
51770 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
51771diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
51772index 5d6f348..18778a6b 100644
51773--- a/drivers/scsi/scsi_transport_fc.c
51774+++ b/drivers/scsi/scsi_transport_fc.c
51775@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
51776 * Netlink Infrastructure
51777 */
51778
51779-static atomic_t fc_event_seq;
51780+static atomic_unchecked_t fc_event_seq;
51781
51782 /**
51783 * fc_get_event_number - Obtain the next sequential FC event number
51784@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
51785 u32
51786 fc_get_event_number(void)
51787 {
51788- return atomic_add_return(1, &fc_event_seq);
51789+ return atomic_add_return_unchecked(1, &fc_event_seq);
51790 }
51791 EXPORT_SYMBOL(fc_get_event_number);
51792
51793@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
51794 {
51795 int error;
51796
51797- atomic_set(&fc_event_seq, 0);
51798+ atomic_set_unchecked(&fc_event_seq, 0);
51799
51800 error = transport_class_register(&fc_host_class);
51801 if (error)
51802@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
51803 char *cp;
51804
51805 *val = simple_strtoul(buf, &cp, 0);
51806- if ((*cp && (*cp != '\n')) || (*val < 0))
51807+ if (*cp && (*cp != '\n'))
51808 return -EINVAL;
51809 /*
51810 * Check for overflow; dev_loss_tmo is u32
51811diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
51812index 67d43e3..8cee73c 100644
51813--- a/drivers/scsi/scsi_transport_iscsi.c
51814+++ b/drivers/scsi/scsi_transport_iscsi.c
51815@@ -79,7 +79,7 @@ struct iscsi_internal {
51816 struct transport_container session_cont;
51817 };
51818
51819-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
51820+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
51821 static struct workqueue_struct *iscsi_eh_timer_workq;
51822
51823 static DEFINE_IDA(iscsi_sess_ida);
51824@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
51825 int err;
51826
51827 ihost = shost->shost_data;
51828- session->sid = atomic_add_return(1, &iscsi_session_nr);
51829+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
51830
51831 if (target_id == ISCSI_MAX_TARGET) {
51832 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
51833@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
51834 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
51835 ISCSI_TRANSPORT_VERSION);
51836
51837- atomic_set(&iscsi_session_nr, 0);
51838+ atomic_set_unchecked(&iscsi_session_nr, 0);
51839
51840 err = class_register(&iscsi_transport_class);
51841 if (err)
51842diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
51843index ae45bd9..c32a586 100644
51844--- a/drivers/scsi/scsi_transport_srp.c
51845+++ b/drivers/scsi/scsi_transport_srp.c
51846@@ -35,7 +35,7 @@
51847 #include "scsi_priv.h"
51848
51849 struct srp_host_attrs {
51850- atomic_t next_port_id;
51851+ atomic_unchecked_t next_port_id;
51852 };
51853 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
51854
51855@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
51856 struct Scsi_Host *shost = dev_to_shost(dev);
51857 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
51858
51859- atomic_set(&srp_host->next_port_id, 0);
51860+ atomic_set_unchecked(&srp_host->next_port_id, 0);
51861 return 0;
51862 }
51863
51864@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
51865 rport_fast_io_fail_timedout);
51866 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
51867
51868- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
51869+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
51870 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
51871
51872 transport_setup_device(&rport->dev);
51873diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
51874index 05ea0d4..5af8049 100644
51875--- a/drivers/scsi/sd.c
51876+++ b/drivers/scsi/sd.c
51877@@ -3006,7 +3006,7 @@ static int sd_probe(struct device *dev)
51878 sdkp->disk = gd;
51879 sdkp->index = index;
51880 atomic_set(&sdkp->openers, 0);
51881- atomic_set(&sdkp->device->ioerr_cnt, 0);
51882+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
51883
51884 if (!sdp->request_queue->rq_timeout) {
51885 if (sdp->type != TYPE_MOD)
51886diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
51887index dbf8e77..0d565c7 100644
51888--- a/drivers/scsi/sg.c
51889+++ b/drivers/scsi/sg.c
51890@@ -1098,7 +1098,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
51891 sdp->disk->disk_name,
51892 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
51893 NULL,
51894- (char *)arg);
51895+ (char __user *)arg);
51896 case BLKTRACESTART:
51897 return blk_trace_startstop(sdp->device->request_queue, 1);
51898 case BLKTRACESTOP:
51899diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
51900index 011a336..fb2b7a0 100644
51901--- a/drivers/soc/tegra/fuse/fuse-tegra.c
51902+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
51903@@ -71,7 +71,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
51904 return i;
51905 }
51906
51907-static struct bin_attribute fuse_bin_attr = {
51908+static bin_attribute_no_const fuse_bin_attr = {
51909 .attr = { .name = "fuse", .mode = S_IRUGO, },
51910 .read = fuse_read,
51911 };
51912diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
51913index a17f533..a2ff039 100644
51914--- a/drivers/spi/spi.c
51915+++ b/drivers/spi/spi.c
51916@@ -2239,7 +2239,7 @@ int spi_bus_unlock(struct spi_master *master)
51917 EXPORT_SYMBOL_GPL(spi_bus_unlock);
51918
51919 /* portable code must never pass more than 32 bytes */
51920-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
51921+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
51922
51923 static u8 *buf;
51924
51925diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
51926index b41429f..2de5373 100644
51927--- a/drivers/staging/android/timed_output.c
51928+++ b/drivers/staging/android/timed_output.c
51929@@ -25,7 +25,7 @@
51930 #include "timed_output.h"
51931
51932 static struct class *timed_output_class;
51933-static atomic_t device_count;
51934+static atomic_unchecked_t device_count;
51935
51936 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
51937 char *buf)
51938@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
51939 timed_output_class = class_create(THIS_MODULE, "timed_output");
51940 if (IS_ERR(timed_output_class))
51941 return PTR_ERR(timed_output_class);
51942- atomic_set(&device_count, 0);
51943+ atomic_set_unchecked(&device_count, 0);
51944 timed_output_class->dev_groups = timed_output_groups;
51945 }
51946
51947@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
51948 if (ret < 0)
51949 return ret;
51950
51951- tdev->index = atomic_inc_return(&device_count);
51952+ tdev->index = atomic_inc_return_unchecked(&device_count);
51953 tdev->dev = device_create(timed_output_class, NULL,
51954 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
51955 if (IS_ERR(tdev->dev))
51956diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
51957index f143cb6..6fb8255 100644
51958--- a/drivers/staging/comedi/comedi_fops.c
51959+++ b/drivers/staging/comedi/comedi_fops.c
51960@@ -273,8 +273,8 @@ static void comedi_file_reset(struct file *file)
51961 }
51962 cfp->last_attached = dev->attached;
51963 cfp->last_detach_count = dev->detach_count;
51964- ACCESS_ONCE(cfp->read_subdev) = read_s;
51965- ACCESS_ONCE(cfp->write_subdev) = write_s;
51966+ ACCESS_ONCE_RW(cfp->read_subdev) = read_s;
51967+ ACCESS_ONCE_RW(cfp->write_subdev) = write_s;
51968 }
51969
51970 static void comedi_file_check(struct file *file)
51971@@ -1885,7 +1885,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51972 !(s_old->async->cmd.flags & CMDF_WRITE))
51973 return -EBUSY;
51974
51975- ACCESS_ONCE(cfp->read_subdev) = s_new;
51976+ ACCESS_ONCE_RW(cfp->read_subdev) = s_new;
51977 return 0;
51978 }
51979
51980@@ -1927,7 +1927,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
51981 (s_old->async->cmd.flags & CMDF_WRITE))
51982 return -EBUSY;
51983
51984- ACCESS_ONCE(cfp->write_subdev) = s_new;
51985+ ACCESS_ONCE_RW(cfp->write_subdev) = s_new;
51986 return 0;
51987 }
51988
51989diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
51990index 001348c..cfaac8a 100644
51991--- a/drivers/staging/gdm724x/gdm_tty.c
51992+++ b/drivers/staging/gdm724x/gdm_tty.c
51993@@ -44,7 +44,7 @@
51994 #define gdm_tty_send_control(n, r, v, d, l) (\
51995 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
51996
51997-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
51998+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
51999
52000 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52001 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52002diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
52003index 503b2d7..c904931 100644
52004--- a/drivers/staging/line6/driver.c
52005+++ b/drivers/staging/line6/driver.c
52006@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52007 {
52008 struct usb_device *usbdev = line6->usbdev;
52009 int ret;
52010- unsigned char len;
52011+ unsigned char *plen;
52012
52013 /* query the serial number: */
52014 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52015@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52016 return ret;
52017 }
52018
52019+ plen = kmalloc(1, GFP_KERNEL);
52020+ if (plen == NULL)
52021+ return -ENOMEM;
52022+
52023 /* Wait for data length. We'll get 0xff until length arrives. */
52024 do {
52025 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52026 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52027 USB_DIR_IN,
52028- 0x0012, 0x0000, &len, 1,
52029+ 0x0012, 0x0000, plen, 1,
52030 LINE6_TIMEOUT * HZ);
52031 if (ret < 0) {
52032 dev_err(line6->ifcdev,
52033 "receive length failed (error %d)\n", ret);
52034+ kfree(plen);
52035 return ret;
52036 }
52037- } while (len == 0xff);
52038+ } while (*plen == 0xff);
52039
52040- if (len != datalen) {
52041+ if (*plen != datalen) {
52042 /* should be equal or something went wrong */
52043 dev_err(line6->ifcdev,
52044 "length mismatch (expected %d, got %d)\n",
52045- (int)datalen, (int)len);
52046+ (int)datalen, (int)*plen);
52047+ kfree(plen);
52048 return -EINVAL;
52049 }
52050+ kfree(plen);
52051
52052 /* receive the result: */
52053 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52054@@ -520,7 +527,7 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
52055 {
52056 struct usb_device *usbdev = line6->usbdev;
52057 int ret;
52058- unsigned char status;
52059+ unsigned char *status;
52060
52061 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52062 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
52063@@ -533,26 +540,34 @@ int line6_write_data(struct usb_line6 *line6, int address, void *data,
52064 return ret;
52065 }
52066
52067+ status = kmalloc(1, GFP_KERNEL);
52068+ if (status == NULL)
52069+ return -ENOMEM;
52070+
52071 do {
52072 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
52073 0x67,
52074 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52075 USB_DIR_IN,
52076 0x0012, 0x0000,
52077- &status, 1, LINE6_TIMEOUT * HZ);
52078+ status, 1, LINE6_TIMEOUT * HZ);
52079
52080 if (ret < 0) {
52081 dev_err(line6->ifcdev,
52082 "receiving status failed (error %d)\n", ret);
52083+ kfree(status);
52084 return ret;
52085 }
52086- } while (status == 0xff);
52087+ } while (*status == 0xff);
52088
52089- if (status != 0) {
52090+ if (*status != 0) {
52091 dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
52092+ kfree(status);
52093 return -EINVAL;
52094 }
52095
52096+ kfree(status);
52097+
52098 return 0;
52099 }
52100
52101diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
52102index 6943715..0a93632 100644
52103--- a/drivers/staging/line6/toneport.c
52104+++ b/drivers/staging/line6/toneport.c
52105@@ -11,6 +11,7 @@
52106 */
52107
52108 #include <linux/wait.h>
52109+#include <linux/slab.h>
52110 #include <sound/control.h>
52111
52112 #include "audio.h"
52113@@ -307,14 +308,20 @@ static void toneport_destruct(struct usb_interface *interface)
52114 */
52115 static void toneport_setup(struct usb_line6_toneport *toneport)
52116 {
52117- int ticks;
52118+ int *ticks;
52119 struct usb_line6 *line6 = &toneport->line6;
52120 struct usb_device *usbdev = line6->usbdev;
52121 u16 idProduct = le16_to_cpu(usbdev->descriptor.idProduct);
52122
52123+ ticks = kmalloc(sizeof(int), GFP_KERNEL);
52124+ if (ticks == NULL)
52125+ return;
52126+
52127 /* sync time on device with host: */
52128- ticks = (int)get_seconds();
52129- line6_write_data(line6, 0x80c6, &ticks, 4);
52130+ *ticks = (int)get_seconds();
52131+ line6_write_data(line6, 0x80c6, ticks, sizeof(int));
52132+
52133+ kfree(ticks);
52134
52135 /* enable device: */
52136 toneport_send_cmd(usbdev, 0x0301, 0x0000);
52137diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52138index 463da07..e791ce9 100644
52139--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52140+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52141@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52142 return 0;
52143 }
52144
52145-sfw_test_client_ops_t brw_test_client;
52146-void brw_init_test_client(void)
52147-{
52148- brw_test_client.tso_init = brw_client_init;
52149- brw_test_client.tso_fini = brw_client_fini;
52150- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52151- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52152+sfw_test_client_ops_t brw_test_client = {
52153+ .tso_init = brw_client_init,
52154+ .tso_fini = brw_client_fini,
52155+ .tso_prep_rpc = brw_client_prep_rpc,
52156+ .tso_done_rpc = brw_client_done_rpc,
52157 };
52158
52159 srpc_service_t brw_test_service;
52160diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52161index cc9d182..8fabce3 100644
52162--- a/drivers/staging/lustre/lnet/selftest/framework.c
52163+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52164@@ -1628,12 +1628,10 @@ static srpc_service_t sfw_services[] = {
52165
52166 extern sfw_test_client_ops_t ping_test_client;
52167 extern srpc_service_t ping_test_service;
52168-extern void ping_init_test_client(void);
52169 extern void ping_init_test_service(void);
52170
52171 extern sfw_test_client_ops_t brw_test_client;
52172 extern srpc_service_t brw_test_service;
52173-extern void brw_init_test_client(void);
52174 extern void brw_init_test_service(void);
52175
52176
52177@@ -1675,12 +1673,10 @@ sfw_startup (void)
52178 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52179 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52180
52181- brw_init_test_client();
52182 brw_init_test_service();
52183 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52184 LASSERT (rc == 0);
52185
52186- ping_init_test_client();
52187 ping_init_test_service();
52188 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52189 LASSERT (rc == 0);
52190diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52191index d8c0df6..5041cbb 100644
52192--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52193+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52194@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52195 return 0;
52196 }
52197
52198-sfw_test_client_ops_t ping_test_client;
52199-void ping_init_test_client(void)
52200-{
52201- ping_test_client.tso_init = ping_client_init;
52202- ping_test_client.tso_fini = ping_client_fini;
52203- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52204- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52205-}
52206+sfw_test_client_ops_t ping_test_client = {
52207+ .tso_init = ping_client_init,
52208+ .tso_fini = ping_client_fini,
52209+ .tso_prep_rpc = ping_client_prep_rpc,
52210+ .tso_done_rpc = ping_client_done_rpc,
52211+};
52212
52213 srpc_service_t ping_test_service;
52214 void ping_init_test_service(void)
52215diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52216index 83bc0a9..12ba00a 100644
52217--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52218+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52219@@ -1139,7 +1139,7 @@ struct ldlm_callback_suite {
52220 ldlm_completion_callback lcs_completion;
52221 ldlm_blocking_callback lcs_blocking;
52222 ldlm_glimpse_callback lcs_glimpse;
52223-};
52224+} __no_const;
52225
52226 /* ldlm_lockd.c */
52227 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52228diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52229index 2a88b80..62e7e5f 100644
52230--- a/drivers/staging/lustre/lustre/include/obd.h
52231+++ b/drivers/staging/lustre/lustre/include/obd.h
52232@@ -1362,7 +1362,7 @@ struct md_ops {
52233 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52234 * wrapper function in include/linux/obd_class.h.
52235 */
52236-};
52237+} __no_const;
52238
52239 struct lsm_operations {
52240 void (*lsm_free)(struct lov_stripe_md *);
52241diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52242index a4c252f..b21acac 100644
52243--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52244+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52245@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52246 int added = (mode == LCK_NL);
52247 int overlaps = 0;
52248 int splitted = 0;
52249- const struct ldlm_callback_suite null_cbs = { NULL };
52250+ const struct ldlm_callback_suite null_cbs = { };
52251
52252 CDEBUG(D_DLMTRACE,
52253 "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52254diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52255index 83d3f08..b03adad 100644
52256--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52257+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52258@@ -236,7 +236,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52259 void __user *buffer, size_t *lenp, loff_t *ppos)
52260 {
52261 int rc, max_delay_cs;
52262- struct ctl_table dummy = *table;
52263+ ctl_table_no_const dummy = *table;
52264 long d;
52265
52266 dummy.data = &max_delay_cs;
52267@@ -268,7 +268,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52268 void __user *buffer, size_t *lenp, loff_t *ppos)
52269 {
52270 int rc, min_delay_cs;
52271- struct ctl_table dummy = *table;
52272+ ctl_table_no_const dummy = *table;
52273 long d;
52274
52275 dummy.data = &min_delay_cs;
52276@@ -300,7 +300,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52277 void __user *buffer, size_t *lenp, loff_t *ppos)
52278 {
52279 int rc, backoff;
52280- struct ctl_table dummy = *table;
52281+ ctl_table_no_const dummy = *table;
52282
52283 dummy.data = &backoff;
52284 dummy.proc_handler = &proc_dointvec;
52285diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52286index 2c4fc74..b04ca79 100644
52287--- a/drivers/staging/lustre/lustre/libcfs/module.c
52288+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52289@@ -315,11 +315,11 @@ out:
52290
52291
52292 struct cfs_psdev_ops libcfs_psdev_ops = {
52293- libcfs_psdev_open,
52294- libcfs_psdev_release,
52295- NULL,
52296- NULL,
52297- libcfs_ioctl
52298+ .p_open = libcfs_psdev_open,
52299+ .p_close = libcfs_psdev_release,
52300+ .p_read = NULL,
52301+ .p_write = NULL,
52302+ .p_ioctl = libcfs_ioctl
52303 };
52304
52305 extern int insert_proc(void);
52306diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52307index fcbe836..8a7ada4 100644
52308--- a/drivers/staging/octeon/ethernet-rx.c
52309+++ b/drivers/staging/octeon/ethernet-rx.c
52310@@ -352,14 +352,14 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52311 /* Increment RX stats for virtual ports */
52312 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52313 #ifdef CONFIG_64BIT
52314- atomic64_add(1,
52315+ atomic64_add_unchecked(1,
52316 (atomic64_t *)&priv->stats.rx_packets);
52317- atomic64_add(skb->len,
52318+ atomic64_add_unchecked(skb->len,
52319 (atomic64_t *)&priv->stats.rx_bytes);
52320 #else
52321- atomic_add(1,
52322+ atomic_add_unchecked(1,
52323 (atomic_t *)&priv->stats.rx_packets);
52324- atomic_add(skb->len,
52325+ atomic_add_unchecked(skb->len,
52326 (atomic_t *)&priv->stats.rx_bytes);
52327 #endif
52328 }
52329@@ -371,10 +371,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52330 dev->name);
52331 */
52332 #ifdef CONFIG_64BIT
52333- atomic64_add(1,
52334+ atomic64_add_unchecked(1,
52335 (atomic64_t *)&priv->stats.rx_dropped);
52336 #else
52337- atomic_add(1,
52338+ atomic_add_unchecked(1,
52339 (atomic_t *)&priv->stats.rx_dropped);
52340 #endif
52341 dev_kfree_skb_irq(skb);
52342diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52343index ee32149..052d1836 100644
52344--- a/drivers/staging/octeon/ethernet.c
52345+++ b/drivers/staging/octeon/ethernet.c
52346@@ -241,11 +241,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52347 * since the RX tasklet also increments it.
52348 */
52349 #ifdef CONFIG_64BIT
52350- atomic64_add(rx_status.dropped_packets,
52351- (atomic64_t *)&priv->stats.rx_dropped);
52352+ atomic64_add_unchecked(rx_status.dropped_packets,
52353+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52354 #else
52355- atomic_add(rx_status.dropped_packets,
52356- (atomic_t *)&priv->stats.rx_dropped);
52357+ atomic_add_unchecked(rx_status.dropped_packets,
52358+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52359 #endif
52360 }
52361
52362diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52363index 3b476d8..f522d68 100644
52364--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52365+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52366@@ -225,7 +225,7 @@ struct hal_ops {
52367
52368 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52369 void (*hal_reset_security_engine)(struct adapter *adapter);
52370-};
52371+} __no_const;
52372
52373 enum rt_eeprom_type {
52374 EEPROM_93C46,
52375diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52376index 070cc03..6806e37 100644
52377--- a/drivers/staging/rtl8712/rtl871x_io.h
52378+++ b/drivers/staging/rtl8712/rtl871x_io.h
52379@@ -108,7 +108,7 @@ struct _io_ops {
52380 u8 *pmem);
52381 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52382 u8 *pmem);
52383-};
52384+} __no_const;
52385
52386 struct io_req {
52387 struct list_head list;
52388diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52389index 46dad63..fe4acdc 100644
52390--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52391+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52392@@ -226,7 +226,7 @@ struct visorchipset_busdev_notifiers {
52393 void (*device_resume)(ulong bus_no, ulong dev_no);
52394 int (*get_channel_info)(uuid_le type_uuid, ulong *min_size,
52395 ulong *max_size);
52396-};
52397+} __no_const;
52398
52399 /* These functions live inside visorchipset, and will be called to indicate
52400 * responses to specific events (by code outside of visorchipset).
52401@@ -241,7 +241,7 @@ struct visorchipset_busdev_responders {
52402 void (*device_destroy)(ulong bus_no, ulong dev_no, int response);
52403 void (*device_pause)(ulong bus_no, ulong dev_no, int response);
52404 void (*device_resume)(ulong bus_no, ulong dev_no, int response);
52405-};
52406+} __no_const;
52407
52408 /** Register functions (in the bus driver) to get called by visorchipset
52409 * whenever a bus or device appears for which this service partition is
52410diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52411index 9512af6..045bf5a 100644
52412--- a/drivers/target/sbp/sbp_target.c
52413+++ b/drivers/target/sbp/sbp_target.c
52414@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52415
52416 #define SESSION_MAINTENANCE_INTERVAL HZ
52417
52418-static atomic_t login_id = ATOMIC_INIT(0);
52419+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52420
52421 static void session_maintenance_work(struct work_struct *);
52422 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52423@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52424 login->lun = se_lun;
52425 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52426 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52427- login->login_id = atomic_inc_return(&login_id);
52428+ login->login_id = atomic_inc_return_unchecked(&login_id);
52429
52430 login->tgt_agt = sbp_target_agent_register(login);
52431 if (IS_ERR(login->tgt_agt)) {
52432diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52433index 54da2a4..3dd6f57 100644
52434--- a/drivers/target/target_core_device.c
52435+++ b/drivers/target/target_core_device.c
52436@@ -1469,7 +1469,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52437 spin_lock_init(&dev->se_tmr_lock);
52438 spin_lock_init(&dev->qf_cmd_lock);
52439 sema_init(&dev->caw_sem, 1);
52440- atomic_set(&dev->dev_ordered_id, 0);
52441+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52442 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52443 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52444 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52445diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52446index ac3cbab..f0d1dd2 100644
52447--- a/drivers/target/target_core_transport.c
52448+++ b/drivers/target/target_core_transport.c
52449@@ -1168,7 +1168,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52450 * Used to determine when ORDERED commands should go from
52451 * Dormant to Active status.
52452 */
52453- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52454+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52455 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52456 cmd->se_ordered_id, cmd->sam_task_attr,
52457 dev->transport->name);
52458diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
52459index 65a98a9..d93d3a8 100644
52460--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
52461+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
52462@@ -277,8 +277,10 @@ static int int3400_thermal_probe(struct platform_device *pdev)
52463 platform_set_drvdata(pdev, priv);
52464
52465 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
52466- int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52467- int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52468+ pax_open_kernel();
52469+ *(void **)&int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
52470+ *(void **)&int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
52471+ pax_close_kernel();
52472 }
52473 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
52474 priv, &int3400_thermal_ops,
52475diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52476index d717f3d..cae1cc3e 100644
52477--- a/drivers/thermal/of-thermal.c
52478+++ b/drivers/thermal/of-thermal.c
52479@@ -31,6 +31,7 @@
52480 #include <linux/export.h>
52481 #include <linux/string.h>
52482 #include <linux/thermal.h>
52483+#include <linux/mm.h>
52484
52485 #include "thermal_core.h"
52486
52487@@ -412,9 +413,11 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52488 tz->ops = ops;
52489 tz->sensor_data = data;
52490
52491- tzd->ops->get_temp = of_thermal_get_temp;
52492- tzd->ops->get_trend = of_thermal_get_trend;
52493- tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52494+ pax_open_kernel();
52495+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52496+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52497+ *(void **)&tzd->ops->set_emul_temp = of_thermal_set_emul_temp;
52498+ pax_close_kernel();
52499 mutex_unlock(&tzd->lock);
52500
52501 return tzd;
52502@@ -541,9 +544,11 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52503 return;
52504
52505 mutex_lock(&tzd->lock);
52506- tzd->ops->get_temp = NULL;
52507- tzd->ops->get_trend = NULL;
52508- tzd->ops->set_emul_temp = NULL;
52509+ pax_open_kernel();
52510+ *(void **)&tzd->ops->get_temp = NULL;
52511+ *(void **)&tzd->ops->get_trend = NULL;
52512+ *(void **)&tzd->ops->set_emul_temp = NULL;
52513+ pax_close_kernel();
52514
52515 tz->ops = NULL;
52516 tz->sensor_data = NULL;
52517diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52518index fd66f57..48e6376 100644
52519--- a/drivers/tty/cyclades.c
52520+++ b/drivers/tty/cyclades.c
52521@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52522 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52523 info->port.count);
52524 #endif
52525- info->port.count++;
52526+ atomic_inc(&info->port.count);
52527 #ifdef CY_DEBUG_COUNT
52528 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52529- current->pid, info->port.count);
52530+ current->pid, atomic_read(&info->port.count));
52531 #endif
52532
52533 /*
52534@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52535 for (j = 0; j < cy_card[i].nports; j++) {
52536 info = &cy_card[i].ports[j];
52537
52538- if (info->port.count) {
52539+ if (atomic_read(&info->port.count)) {
52540 /* XXX is the ldisc num worth this? */
52541 struct tty_struct *tty;
52542 struct tty_ldisc *ld;
52543diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52544index 4fcec1d..5a036f7 100644
52545--- a/drivers/tty/hvc/hvc_console.c
52546+++ b/drivers/tty/hvc/hvc_console.c
52547@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52548
52549 spin_lock_irqsave(&hp->port.lock, flags);
52550 /* Check and then increment for fast path open. */
52551- if (hp->port.count++ > 0) {
52552+ if (atomic_inc_return(&hp->port.count) > 1) {
52553 spin_unlock_irqrestore(&hp->port.lock, flags);
52554 hvc_kick();
52555 return 0;
52556@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52557
52558 spin_lock_irqsave(&hp->port.lock, flags);
52559
52560- if (--hp->port.count == 0) {
52561+ if (atomic_dec_return(&hp->port.count) == 0) {
52562 spin_unlock_irqrestore(&hp->port.lock, flags);
52563 /* We are done with the tty pointer now. */
52564 tty_port_tty_set(&hp->port, NULL);
52565@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52566 */
52567 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52568 } else {
52569- if (hp->port.count < 0)
52570+ if (atomic_read(&hp->port.count) < 0)
52571 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52572- hp->vtermno, hp->port.count);
52573+ hp->vtermno, atomic_read(&hp->port.count));
52574 spin_unlock_irqrestore(&hp->port.lock, flags);
52575 }
52576 }
52577@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52578 * open->hangup case this can be called after the final close so prevent
52579 * that from happening for now.
52580 */
52581- if (hp->port.count <= 0) {
52582+ if (atomic_read(&hp->port.count) <= 0) {
52583 spin_unlock_irqrestore(&hp->port.lock, flags);
52584 return;
52585 }
52586
52587- hp->port.count = 0;
52588+ atomic_set(&hp->port.count, 0);
52589 spin_unlock_irqrestore(&hp->port.lock, flags);
52590 tty_port_tty_set(&hp->port, NULL);
52591
52592@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52593 return -EPIPE;
52594
52595 /* FIXME what's this (unprotected) check for? */
52596- if (hp->port.count <= 0)
52597+ if (atomic_read(&hp->port.count) <= 0)
52598 return -EIO;
52599
52600 spin_lock_irqsave(&hp->lock, flags);
52601diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52602index 81ff7e1..dfb7b71 100644
52603--- a/drivers/tty/hvc/hvcs.c
52604+++ b/drivers/tty/hvc/hvcs.c
52605@@ -83,6 +83,7 @@
52606 #include <asm/hvcserver.h>
52607 #include <asm/uaccess.h>
52608 #include <asm/vio.h>
52609+#include <asm/local.h>
52610
52611 /*
52612 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52613@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52614
52615 spin_lock_irqsave(&hvcsd->lock, flags);
52616
52617- if (hvcsd->port.count > 0) {
52618+ if (atomic_read(&hvcsd->port.count) > 0) {
52619 spin_unlock_irqrestore(&hvcsd->lock, flags);
52620 printk(KERN_INFO "HVCS: vterm state unchanged. "
52621 "The hvcs device node is still in use.\n");
52622@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52623 }
52624 }
52625
52626- hvcsd->port.count = 0;
52627+ atomic_set(&hvcsd->port.count, 0);
52628 hvcsd->port.tty = tty;
52629 tty->driver_data = hvcsd;
52630
52631@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52632 unsigned long flags;
52633
52634 spin_lock_irqsave(&hvcsd->lock, flags);
52635- hvcsd->port.count++;
52636+ atomic_inc(&hvcsd->port.count);
52637 hvcsd->todo_mask |= HVCS_SCHED_READ;
52638 spin_unlock_irqrestore(&hvcsd->lock, flags);
52639
52640@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52641 hvcsd = tty->driver_data;
52642
52643 spin_lock_irqsave(&hvcsd->lock, flags);
52644- if (--hvcsd->port.count == 0) {
52645+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52646
52647 vio_disable_interrupts(hvcsd->vdev);
52648
52649@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52650
52651 free_irq(irq, hvcsd);
52652 return;
52653- } else if (hvcsd->port.count < 0) {
52654+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52655 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52656 " is missmanaged.\n",
52657- hvcsd->vdev->unit_address, hvcsd->port.count);
52658+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52659 }
52660
52661 spin_unlock_irqrestore(&hvcsd->lock, flags);
52662@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52663
52664 spin_lock_irqsave(&hvcsd->lock, flags);
52665 /* Preserve this so that we know how many kref refs to put */
52666- temp_open_count = hvcsd->port.count;
52667+ temp_open_count = atomic_read(&hvcsd->port.count);
52668
52669 /*
52670 * Don't kref put inside the spinlock because the destruction
52671@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52672 tty->driver_data = NULL;
52673 hvcsd->port.tty = NULL;
52674
52675- hvcsd->port.count = 0;
52676+ atomic_set(&hvcsd->port.count, 0);
52677
52678 /* This will drop any buffered data on the floor which is OK in a hangup
52679 * scenario. */
52680@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52681 * the middle of a write operation? This is a crummy place to do this
52682 * but we want to keep it all in the spinlock.
52683 */
52684- if (hvcsd->port.count <= 0) {
52685+ if (atomic_read(&hvcsd->port.count) <= 0) {
52686 spin_unlock_irqrestore(&hvcsd->lock, flags);
52687 return -ENODEV;
52688 }
52689@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52690 {
52691 struct hvcs_struct *hvcsd = tty->driver_data;
52692
52693- if (!hvcsd || hvcsd->port.count <= 0)
52694+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52695 return 0;
52696
52697 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52698diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52699index 4190199..06d5bfa 100644
52700--- a/drivers/tty/hvc/hvsi.c
52701+++ b/drivers/tty/hvc/hvsi.c
52702@@ -85,7 +85,7 @@ struct hvsi_struct {
52703 int n_outbuf;
52704 uint32_t vtermno;
52705 uint32_t virq;
52706- atomic_t seqno; /* HVSI packet sequence number */
52707+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52708 uint16_t mctrl;
52709 uint8_t state; /* HVSI protocol state */
52710 uint8_t flags;
52711@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52712
52713 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52714 packet.hdr.len = sizeof(struct hvsi_query_response);
52715- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52716+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52717 packet.verb = VSV_SEND_VERSION_NUMBER;
52718 packet.u.version = HVSI_VERSION;
52719 packet.query_seqno = query_seqno+1;
52720@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52721
52722 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52723 packet.hdr.len = sizeof(struct hvsi_query);
52724- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52725+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52726 packet.verb = verb;
52727
52728 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52729@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52730 int wrote;
52731
52732 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52733- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52734+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52735 packet.hdr.len = sizeof(struct hvsi_control);
52736 packet.verb = VSV_SET_MODEM_CTL;
52737 packet.mask = HVSI_TSDTR;
52738@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52739 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52740
52741 packet.hdr.type = VS_DATA_PACKET_HEADER;
52742- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52743+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52744 packet.hdr.len = count + sizeof(struct hvsi_header);
52745 memcpy(&packet.data, buf, count);
52746
52747@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52748 struct hvsi_control packet __ALIGNED__;
52749
52750 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
52751- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52752+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52753 packet.hdr.len = 6;
52754 packet.verb = VSV_CLOSE_PROTOCOL;
52755
52756@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
52757
52758 tty_port_tty_set(&hp->port, tty);
52759 spin_lock_irqsave(&hp->lock, flags);
52760- hp->port.count++;
52761+ atomic_inc(&hp->port.count);
52762 atomic_set(&hp->seqno, 0);
52763 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
52764 spin_unlock_irqrestore(&hp->lock, flags);
52765@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52766
52767 spin_lock_irqsave(&hp->lock, flags);
52768
52769- if (--hp->port.count == 0) {
52770+ if (atomic_dec_return(&hp->port.count) == 0) {
52771 tty_port_tty_set(&hp->port, NULL);
52772 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
52773
52774@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
52775
52776 spin_lock_irqsave(&hp->lock, flags);
52777 }
52778- } else if (hp->port.count < 0)
52779+ } else if (atomic_read(&hp->port.count) < 0)
52780 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
52781- hp - hvsi_ports, hp->port.count);
52782+ hp - hvsi_ports, atomic_read(&hp->port.count));
52783
52784 spin_unlock_irqrestore(&hp->lock, flags);
52785 }
52786@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
52787 tty_port_tty_set(&hp->port, NULL);
52788
52789 spin_lock_irqsave(&hp->lock, flags);
52790- hp->port.count = 0;
52791+ atomic_set(&hp->port.count, 0);
52792 hp->n_outbuf = 0;
52793 spin_unlock_irqrestore(&hp->lock, flags);
52794 }
52795diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
52796index a270f04..7c77b5d 100644
52797--- a/drivers/tty/hvc/hvsi_lib.c
52798+++ b/drivers/tty/hvc/hvsi_lib.c
52799@@ -8,7 +8,7 @@
52800
52801 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
52802 {
52803- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
52804+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
52805
52806 /* Assumes that always succeeds, works in practice */
52807 return pv->put_chars(pv->termno, (char *)packet, packet->len);
52808@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
52809
52810 /* Reset state */
52811 pv->established = 0;
52812- atomic_set(&pv->seqno, 0);
52813+ atomic_set_unchecked(&pv->seqno, 0);
52814
52815 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
52816
52817diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
52818index 345cebb..d5a1e9e 100644
52819--- a/drivers/tty/ipwireless/tty.c
52820+++ b/drivers/tty/ipwireless/tty.c
52821@@ -28,6 +28,7 @@
52822 #include <linux/tty_driver.h>
52823 #include <linux/tty_flip.h>
52824 #include <linux/uaccess.h>
52825+#include <asm/local.h>
52826
52827 #include "tty.h"
52828 #include "network.h"
52829@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52830 return -ENODEV;
52831
52832 mutex_lock(&tty->ipw_tty_mutex);
52833- if (tty->port.count == 0)
52834+ if (atomic_read(&tty->port.count) == 0)
52835 tty->tx_bytes_queued = 0;
52836
52837- tty->port.count++;
52838+ atomic_inc(&tty->port.count);
52839
52840 tty->port.tty = linux_tty;
52841 linux_tty->driver_data = tty;
52842@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
52843
52844 static void do_ipw_close(struct ipw_tty *tty)
52845 {
52846- tty->port.count--;
52847-
52848- if (tty->port.count == 0) {
52849+ if (atomic_dec_return(&tty->port.count) == 0) {
52850 struct tty_struct *linux_tty = tty->port.tty;
52851
52852 if (linux_tty != NULL) {
52853@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
52854 return;
52855
52856 mutex_lock(&tty->ipw_tty_mutex);
52857- if (tty->port.count == 0) {
52858+ if (atomic_read(&tty->port.count) == 0) {
52859 mutex_unlock(&tty->ipw_tty_mutex);
52860 return;
52861 }
52862@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
52863
52864 mutex_lock(&tty->ipw_tty_mutex);
52865
52866- if (!tty->port.count) {
52867+ if (!atomic_read(&tty->port.count)) {
52868 mutex_unlock(&tty->ipw_tty_mutex);
52869 return;
52870 }
52871@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
52872 return -ENODEV;
52873
52874 mutex_lock(&tty->ipw_tty_mutex);
52875- if (!tty->port.count) {
52876+ if (!atomic_read(&tty->port.count)) {
52877 mutex_unlock(&tty->ipw_tty_mutex);
52878 return -EINVAL;
52879 }
52880@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
52881 if (!tty)
52882 return -ENODEV;
52883
52884- if (!tty->port.count)
52885+ if (!atomic_read(&tty->port.count))
52886 return -EINVAL;
52887
52888 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
52889@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
52890 if (!tty)
52891 return 0;
52892
52893- if (!tty->port.count)
52894+ if (!atomic_read(&tty->port.count))
52895 return 0;
52896
52897 return tty->tx_bytes_queued;
52898@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
52899 if (!tty)
52900 return -ENODEV;
52901
52902- if (!tty->port.count)
52903+ if (!atomic_read(&tty->port.count))
52904 return -EINVAL;
52905
52906 return get_control_lines(tty);
52907@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
52908 if (!tty)
52909 return -ENODEV;
52910
52911- if (!tty->port.count)
52912+ if (!atomic_read(&tty->port.count))
52913 return -EINVAL;
52914
52915 return set_control_lines(tty, set, clear);
52916@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
52917 if (!tty)
52918 return -ENODEV;
52919
52920- if (!tty->port.count)
52921+ if (!atomic_read(&tty->port.count))
52922 return -EINVAL;
52923
52924 /* FIXME: Exactly how is the tty object locked here .. */
52925@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
52926 * are gone */
52927 mutex_lock(&ttyj->ipw_tty_mutex);
52928 }
52929- while (ttyj->port.count)
52930+ while (atomic_read(&ttyj->port.count))
52931 do_ipw_close(ttyj);
52932 ipwireless_disassociate_network_ttys(network,
52933 ttyj->channel_idx);
52934diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
52935index 14c54e0..1efd4f2 100644
52936--- a/drivers/tty/moxa.c
52937+++ b/drivers/tty/moxa.c
52938@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
52939 }
52940
52941 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
52942- ch->port.count++;
52943+ atomic_inc(&ch->port.count);
52944 tty->driver_data = ch;
52945 tty_port_tty_set(&ch->port, tty);
52946 mutex_lock(&ch->port.mutex);
52947diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
52948index c434376..114ce13 100644
52949--- a/drivers/tty/n_gsm.c
52950+++ b/drivers/tty/n_gsm.c
52951@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
52952 spin_lock_init(&dlci->lock);
52953 mutex_init(&dlci->mutex);
52954 dlci->fifo = &dlci->_fifo;
52955- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
52956+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
52957 kfree(dlci);
52958 return NULL;
52959 }
52960@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
52961 struct gsm_dlci *dlci = tty->driver_data;
52962 struct tty_port *port = &dlci->port;
52963
52964- port->count++;
52965+ atomic_inc(&port->count);
52966 tty_port_tty_set(port, tty);
52967
52968 dlci->modem_rx = 0;
52969diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
52970index 6f8cf3a..c690dfb 100644
52971--- a/drivers/tty/n_tty.c
52972+++ b/drivers/tty/n_tty.c
52973@@ -115,7 +115,7 @@ struct n_tty_data {
52974 int minimum_to_wake;
52975
52976 /* consumer-published */
52977- size_t read_tail;
52978+ size_t read_tail __intentional_overflow(-1);
52979 size_t line_start;
52980
52981 /* protected by output lock */
52982@@ -2555,6 +2555,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
52983 {
52984 *ops = tty_ldisc_N_TTY;
52985 ops->owner = NULL;
52986- ops->refcount = ops->flags = 0;
52987+ atomic_set(&ops->refcount, 0);
52988+ ops->flags = 0;
52989 }
52990 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
52991diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
52992index 6e1f150..c3ba598 100644
52993--- a/drivers/tty/pty.c
52994+++ b/drivers/tty/pty.c
52995@@ -850,8 +850,10 @@ static void __init unix98_pty_init(void)
52996 panic("Couldn't register Unix98 pts driver");
52997
52998 /* Now create the /dev/ptmx special device */
52999+ pax_open_kernel();
53000 tty_default_fops(&ptmx_fops);
53001- ptmx_fops.open = ptmx_open;
53002+ *(void **)&ptmx_fops.open = ptmx_open;
53003+ pax_close_kernel();
53004
53005 cdev_init(&ptmx_cdev, &ptmx_fops);
53006 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53007diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53008index 383c4c7..d408e21 100644
53009--- a/drivers/tty/rocket.c
53010+++ b/drivers/tty/rocket.c
53011@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53012 tty->driver_data = info;
53013 tty_port_tty_set(port, tty);
53014
53015- if (port->count++ == 0) {
53016+ if (atomic_inc_return(&port->count) == 1) {
53017 atomic_inc(&rp_num_ports_open);
53018
53019 #ifdef ROCKET_DEBUG_OPEN
53020@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53021 #endif
53022 }
53023 #ifdef ROCKET_DEBUG_OPEN
53024- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53025+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53026 #endif
53027
53028 /*
53029@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53030 spin_unlock_irqrestore(&info->port.lock, flags);
53031 return;
53032 }
53033- if (info->port.count)
53034+ if (atomic_read(&info->port.count))
53035 atomic_dec(&rp_num_ports_open);
53036 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53037 spin_unlock_irqrestore(&info->port.lock, flags);
53038diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53039index aa28209..e08fb85 100644
53040--- a/drivers/tty/serial/ioc4_serial.c
53041+++ b/drivers/tty/serial/ioc4_serial.c
53042@@ -437,7 +437,7 @@ struct ioc4_soft {
53043 } is_intr_info[MAX_IOC4_INTR_ENTS];
53044
53045 /* Number of entries active in the above array */
53046- atomic_t is_num_intrs;
53047+ atomic_unchecked_t is_num_intrs;
53048 } is_intr_type[IOC4_NUM_INTR_TYPES];
53049
53050 /* is_ir_lock must be held while
53051@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53052 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53053 || (type == IOC4_OTHER_INTR_TYPE)));
53054
53055- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53056+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53057 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53058
53059 /* Save off the lower level interrupt handler */
53060@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53061
53062 soft = arg;
53063 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53064- num_intrs = (int)atomic_read(
53065+ num_intrs = (int)atomic_read_unchecked(
53066 &soft->is_intr_type[intr_type].is_num_intrs);
53067
53068 this_mir = this_ir = pending_intrs(soft, intr_type);
53069diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53070index 129dc5b..1da5bb8 100644
53071--- a/drivers/tty/serial/kgdb_nmi.c
53072+++ b/drivers/tty/serial/kgdb_nmi.c
53073@@ -53,7 +53,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53074 * I/O utilities that messages sent to the console will automatically
53075 * be displayed on the dbg_io.
53076 */
53077- dbg_io_ops->is_console = true;
53078+ pax_open_kernel();
53079+ *(int *)&dbg_io_ops->is_console = true;
53080+ pax_close_kernel();
53081
53082 return 0;
53083 }
53084diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53085index a260cde..6b2b5ce 100644
53086--- a/drivers/tty/serial/kgdboc.c
53087+++ b/drivers/tty/serial/kgdboc.c
53088@@ -24,8 +24,9 @@
53089 #define MAX_CONFIG_LEN 40
53090
53091 static struct kgdb_io kgdboc_io_ops;
53092+static struct kgdb_io kgdboc_io_ops_console;
53093
53094-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53095+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53096 static int configured = -1;
53097
53098 static char config[MAX_CONFIG_LEN];
53099@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53100 kgdboc_unregister_kbd();
53101 if (configured == 1)
53102 kgdb_unregister_io_module(&kgdboc_io_ops);
53103+ else if (configured == 2)
53104+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53105 }
53106
53107 static int configure_kgdboc(void)
53108@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53109 int err;
53110 char *cptr = config;
53111 struct console *cons;
53112+ int is_console = 0;
53113
53114 err = kgdboc_option_setup(config);
53115 if (err || !strlen(config) || isspace(config[0]))
53116 goto noconfig;
53117
53118 err = -ENODEV;
53119- kgdboc_io_ops.is_console = 0;
53120 kgdb_tty_driver = NULL;
53121
53122 kgdboc_use_kms = 0;
53123@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53124 int idx;
53125 if (cons->device && cons->device(cons, &idx) == p &&
53126 idx == tty_line) {
53127- kgdboc_io_ops.is_console = 1;
53128+ is_console = 1;
53129 break;
53130 }
53131 cons = cons->next;
53132@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53133 kgdb_tty_line = tty_line;
53134
53135 do_register:
53136- err = kgdb_register_io_module(&kgdboc_io_ops);
53137+ if (is_console) {
53138+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53139+ configured = 2;
53140+ } else {
53141+ err = kgdb_register_io_module(&kgdboc_io_ops);
53142+ configured = 1;
53143+ }
53144 if (err)
53145 goto noconfig;
53146
53147@@ -205,8 +214,6 @@ do_register:
53148 if (err)
53149 goto nmi_con_failed;
53150
53151- configured = 1;
53152-
53153 return 0;
53154
53155 nmi_con_failed:
53156@@ -223,7 +230,7 @@ noconfig:
53157 static int __init init_kgdboc(void)
53158 {
53159 /* Already configured? */
53160- if (configured == 1)
53161+ if (configured >= 1)
53162 return 0;
53163
53164 return configure_kgdboc();
53165@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53166 if (config[len - 1] == '\n')
53167 config[len - 1] = '\0';
53168
53169- if (configured == 1)
53170+ if (configured >= 1)
53171 cleanup_kgdboc();
53172
53173 /* Go and configure with the new params. */
53174@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53175 .post_exception = kgdboc_post_exp_handler,
53176 };
53177
53178+static struct kgdb_io kgdboc_io_ops_console = {
53179+ .name = "kgdboc",
53180+ .read_char = kgdboc_get_char,
53181+ .write_char = kgdboc_put_char,
53182+ .pre_exception = kgdboc_pre_exp_handler,
53183+ .post_exception = kgdboc_post_exp_handler,
53184+ .is_console = 1
53185+};
53186+
53187 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53188 /* This is only available if kgdboc is a built in for early debugging */
53189 static int __init kgdboc_early_init(char *opt)
53190diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53191index c88b522..e763029 100644
53192--- a/drivers/tty/serial/msm_serial.c
53193+++ b/drivers/tty/serial/msm_serial.c
53194@@ -1028,7 +1028,7 @@ static struct uart_driver msm_uart_driver = {
53195 .cons = MSM_CONSOLE,
53196 };
53197
53198-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53199+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53200
53201 static const struct of_device_id msm_uartdm_table[] = {
53202 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53203@@ -1052,7 +1052,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53204 line = pdev->id;
53205
53206 if (line < 0)
53207- line = atomic_inc_return(&msm_uart_next_id) - 1;
53208+ line = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53209
53210 if (unlikely(line < 0 || line >= UART_NR))
53211 return -ENXIO;
53212diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53213index 107e807..d4a02fa 100644
53214--- a/drivers/tty/serial/samsung.c
53215+++ b/drivers/tty/serial/samsung.c
53216@@ -480,11 +480,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53217 }
53218 }
53219
53220+static int s3c64xx_serial_startup(struct uart_port *port);
53221 static int s3c24xx_serial_startup(struct uart_port *port)
53222 {
53223 struct s3c24xx_uart_port *ourport = to_ourport(port);
53224 int ret;
53225
53226+ /* Startup sequence is different for s3c64xx and higher SoC's */
53227+ if (s3c24xx_serial_has_interrupt_mask(port))
53228+ return s3c64xx_serial_startup(port);
53229+
53230 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53231 port, (unsigned long long)port->mapbase, port->membase);
53232
53233@@ -1169,10 +1174,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53234 /* setup info for port */
53235 port->dev = &platdev->dev;
53236
53237- /* Startup sequence is different for s3c64xx and higher SoC's */
53238- if (s3c24xx_serial_has_interrupt_mask(port))
53239- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53240-
53241 port->uartclk = 1;
53242
53243 if (cfg->uart_flags & UPF_CONS_FLOW) {
53244diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53245index 984605b..e538330 100644
53246--- a/drivers/tty/serial/serial_core.c
53247+++ b/drivers/tty/serial/serial_core.c
53248@@ -1396,7 +1396,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53249 state = drv->state + tty->index;
53250 port = &state->port;
53251 spin_lock_irq(&port->lock);
53252- --port->count;
53253+ atomic_dec(&port->count);
53254 spin_unlock_irq(&port->lock);
53255 return;
53256 }
53257@@ -1406,7 +1406,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53258
53259 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53260
53261- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53262+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53263 return;
53264
53265 /*
53266@@ -1530,7 +1530,7 @@ static void uart_hangup(struct tty_struct *tty)
53267 uart_flush_buffer(tty);
53268 uart_shutdown(tty, state);
53269 spin_lock_irqsave(&port->lock, flags);
53270- port->count = 0;
53271+ atomic_set(&port->count, 0);
53272 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53273 spin_unlock_irqrestore(&port->lock, flags);
53274 tty_port_tty_set(port, NULL);
53275@@ -1617,7 +1617,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53276 pr_debug("uart_open(%d) called\n", line);
53277
53278 spin_lock_irq(&port->lock);
53279- ++port->count;
53280+ atomic_inc(&port->count);
53281 spin_unlock_irq(&port->lock);
53282
53283 /*
53284diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53285index b799170..87dafd5 100644
53286--- a/drivers/tty/synclink.c
53287+++ b/drivers/tty/synclink.c
53288@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53289
53290 if (debug_level >= DEBUG_LEVEL_INFO)
53291 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53292- __FILE__,__LINE__, info->device_name, info->port.count);
53293+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53294
53295 if (tty_port_close_start(&info->port, tty, filp) == 0)
53296 goto cleanup;
53297@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53298 cleanup:
53299 if (debug_level >= DEBUG_LEVEL_INFO)
53300 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53301- tty->driver->name, info->port.count);
53302+ tty->driver->name, atomic_read(&info->port.count));
53303
53304 } /* end of mgsl_close() */
53305
53306@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53307
53308 mgsl_flush_buffer(tty);
53309 shutdown(info);
53310-
53311- info->port.count = 0;
53312+
53313+ atomic_set(&info->port.count, 0);
53314 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53315 info->port.tty = NULL;
53316
53317@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53318
53319 if (debug_level >= DEBUG_LEVEL_INFO)
53320 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53321- __FILE__,__LINE__, tty->driver->name, port->count );
53322+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53323
53324 spin_lock_irqsave(&info->irq_spinlock, flags);
53325- port->count--;
53326+ atomic_dec(&port->count);
53327 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53328 port->blocked_open++;
53329
53330@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53331
53332 if (debug_level >= DEBUG_LEVEL_INFO)
53333 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53334- __FILE__,__LINE__, tty->driver->name, port->count );
53335+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53336
53337 tty_unlock(tty);
53338 schedule();
53339@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53340
53341 /* FIXME: Racy on hangup during close wait */
53342 if (!tty_hung_up_p(filp))
53343- port->count++;
53344+ atomic_inc(&port->count);
53345 port->blocked_open--;
53346
53347 if (debug_level >= DEBUG_LEVEL_INFO)
53348 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53349- __FILE__,__LINE__, tty->driver->name, port->count );
53350+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53351
53352 if (!retval)
53353 port->flags |= ASYNC_NORMAL_ACTIVE;
53354@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53355
53356 if (debug_level >= DEBUG_LEVEL_INFO)
53357 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53358- __FILE__,__LINE__,tty->driver->name, info->port.count);
53359+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53360
53361 /* If port is closing, signal caller to try again */
53362 if (info->port.flags & ASYNC_CLOSING){
53363@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53364 spin_unlock_irqrestore(&info->netlock, flags);
53365 goto cleanup;
53366 }
53367- info->port.count++;
53368+ atomic_inc(&info->port.count);
53369 spin_unlock_irqrestore(&info->netlock, flags);
53370
53371- if (info->port.count == 1) {
53372+ if (atomic_read(&info->port.count) == 1) {
53373 /* 1st open on this device, init hardware */
53374 retval = startup(info);
53375 if (retval < 0)
53376@@ -3442,8 +3442,8 @@ cleanup:
53377 if (retval) {
53378 if (tty->count == 1)
53379 info->port.tty = NULL; /* tty layer will release tty struct */
53380- if(info->port.count)
53381- info->port.count--;
53382+ if (atomic_read(&info->port.count))
53383+ atomic_dec(&info->port.count);
53384 }
53385
53386 return retval;
53387@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53388 unsigned short new_crctype;
53389
53390 /* return error if TTY interface open */
53391- if (info->port.count)
53392+ if (atomic_read(&info->port.count))
53393 return -EBUSY;
53394
53395 switch (encoding)
53396@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53397
53398 /* arbitrate between network and tty opens */
53399 spin_lock_irqsave(&info->netlock, flags);
53400- if (info->port.count != 0 || info->netcount != 0) {
53401+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53402 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53403 spin_unlock_irqrestore(&info->netlock, flags);
53404 return -EBUSY;
53405@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53406 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53407
53408 /* return error if TTY interface open */
53409- if (info->port.count)
53410+ if (atomic_read(&info->port.count))
53411 return -EBUSY;
53412
53413 if (cmd != SIOCWANDEV)
53414diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53415index 0e8c39b..e0cb171 100644
53416--- a/drivers/tty/synclink_gt.c
53417+++ b/drivers/tty/synclink_gt.c
53418@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53419 tty->driver_data = info;
53420 info->port.tty = tty;
53421
53422- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53423+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53424
53425 /* If port is closing, signal caller to try again */
53426 if (info->port.flags & ASYNC_CLOSING){
53427@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53428 mutex_unlock(&info->port.mutex);
53429 goto cleanup;
53430 }
53431- info->port.count++;
53432+ atomic_inc(&info->port.count);
53433 spin_unlock_irqrestore(&info->netlock, flags);
53434
53435- if (info->port.count == 1) {
53436+ if (atomic_read(&info->port.count) == 1) {
53437 /* 1st open on this device, init hardware */
53438 retval = startup(info);
53439 if (retval < 0) {
53440@@ -715,8 +715,8 @@ cleanup:
53441 if (retval) {
53442 if (tty->count == 1)
53443 info->port.tty = NULL; /* tty layer will release tty struct */
53444- if(info->port.count)
53445- info->port.count--;
53446+ if(atomic_read(&info->port.count))
53447+ atomic_dec(&info->port.count);
53448 }
53449
53450 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53451@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53452
53453 if (sanity_check(info, tty->name, "close"))
53454 return;
53455- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53456+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53457
53458 if (tty_port_close_start(&info->port, tty, filp) == 0)
53459 goto cleanup;
53460@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53461 tty_port_close_end(&info->port, tty);
53462 info->port.tty = NULL;
53463 cleanup:
53464- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53465+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53466 }
53467
53468 static void hangup(struct tty_struct *tty)
53469@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53470 shutdown(info);
53471
53472 spin_lock_irqsave(&info->port.lock, flags);
53473- info->port.count = 0;
53474+ atomic_set(&info->port.count, 0);
53475 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53476 info->port.tty = NULL;
53477 spin_unlock_irqrestore(&info->port.lock, flags);
53478@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53479 unsigned short new_crctype;
53480
53481 /* return error if TTY interface open */
53482- if (info->port.count)
53483+ if (atomic_read(&info->port.count))
53484 return -EBUSY;
53485
53486 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53487@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53488
53489 /* arbitrate between network and tty opens */
53490 spin_lock_irqsave(&info->netlock, flags);
53491- if (info->port.count != 0 || info->netcount != 0) {
53492+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53493 DBGINFO(("%s hdlc_open busy\n", dev->name));
53494 spin_unlock_irqrestore(&info->netlock, flags);
53495 return -EBUSY;
53496@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53497 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53498
53499 /* return error if TTY interface open */
53500- if (info->port.count)
53501+ if (atomic_read(&info->port.count))
53502 return -EBUSY;
53503
53504 if (cmd != SIOCWANDEV)
53505@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53506 if (port == NULL)
53507 continue;
53508 spin_lock(&port->lock);
53509- if ((port->port.count || port->netcount) &&
53510+ if ((atomic_read(&port->port.count) || port->netcount) &&
53511 port->pending_bh && !port->bh_running &&
53512 !port->bh_requested) {
53513 DBGISR(("%s bh queued\n", port->device_name));
53514@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53515 add_wait_queue(&port->open_wait, &wait);
53516
53517 spin_lock_irqsave(&info->lock, flags);
53518- port->count--;
53519+ atomic_dec(&port->count);
53520 spin_unlock_irqrestore(&info->lock, flags);
53521 port->blocked_open++;
53522
53523@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53524 remove_wait_queue(&port->open_wait, &wait);
53525
53526 if (!tty_hung_up_p(filp))
53527- port->count++;
53528+ atomic_inc(&port->count);
53529 port->blocked_open--;
53530
53531 if (!retval)
53532diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53533index c3f9091..abe4601 100644
53534--- a/drivers/tty/synclinkmp.c
53535+++ b/drivers/tty/synclinkmp.c
53536@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53537
53538 if (debug_level >= DEBUG_LEVEL_INFO)
53539 printk("%s(%d):%s open(), old ref count = %d\n",
53540- __FILE__,__LINE__,tty->driver->name, info->port.count);
53541+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53542
53543 /* If port is closing, signal caller to try again */
53544 if (info->port.flags & ASYNC_CLOSING){
53545@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53546 spin_unlock_irqrestore(&info->netlock, flags);
53547 goto cleanup;
53548 }
53549- info->port.count++;
53550+ atomic_inc(&info->port.count);
53551 spin_unlock_irqrestore(&info->netlock, flags);
53552
53553- if (info->port.count == 1) {
53554+ if (atomic_read(&info->port.count) == 1) {
53555 /* 1st open on this device, init hardware */
53556 retval = startup(info);
53557 if (retval < 0)
53558@@ -796,8 +796,8 @@ cleanup:
53559 if (retval) {
53560 if (tty->count == 1)
53561 info->port.tty = NULL; /* tty layer will release tty struct */
53562- if(info->port.count)
53563- info->port.count--;
53564+ if(atomic_read(&info->port.count))
53565+ atomic_dec(&info->port.count);
53566 }
53567
53568 return retval;
53569@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53570
53571 if (debug_level >= DEBUG_LEVEL_INFO)
53572 printk("%s(%d):%s close() entry, count=%d\n",
53573- __FILE__,__LINE__, info->device_name, info->port.count);
53574+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53575
53576 if (tty_port_close_start(&info->port, tty, filp) == 0)
53577 goto cleanup;
53578@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53579 cleanup:
53580 if (debug_level >= DEBUG_LEVEL_INFO)
53581 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53582- tty->driver->name, info->port.count);
53583+ tty->driver->name, atomic_read(&info->port.count));
53584 }
53585
53586 /* Called by tty_hangup() when a hangup is signaled.
53587@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53588 shutdown(info);
53589
53590 spin_lock_irqsave(&info->port.lock, flags);
53591- info->port.count = 0;
53592+ atomic_set(&info->port.count, 0);
53593 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53594 info->port.tty = NULL;
53595 spin_unlock_irqrestore(&info->port.lock, flags);
53596@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53597 unsigned short new_crctype;
53598
53599 /* return error if TTY interface open */
53600- if (info->port.count)
53601+ if (atomic_read(&info->port.count))
53602 return -EBUSY;
53603
53604 switch (encoding)
53605@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53606
53607 /* arbitrate between network and tty opens */
53608 spin_lock_irqsave(&info->netlock, flags);
53609- if (info->port.count != 0 || info->netcount != 0) {
53610+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53611 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53612 spin_unlock_irqrestore(&info->netlock, flags);
53613 return -EBUSY;
53614@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53615 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53616
53617 /* return error if TTY interface open */
53618- if (info->port.count)
53619+ if (atomic_read(&info->port.count))
53620 return -EBUSY;
53621
53622 if (cmd != SIOCWANDEV)
53623@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53624 * do not request bottom half processing if the
53625 * device is not open in a normal mode.
53626 */
53627- if ( port && (port->port.count || port->netcount) &&
53628+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53629 port->pending_bh && !port->bh_running &&
53630 !port->bh_requested ) {
53631 if ( debug_level >= DEBUG_LEVEL_ISR )
53632@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53633
53634 if (debug_level >= DEBUG_LEVEL_INFO)
53635 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53636- __FILE__,__LINE__, tty->driver->name, port->count );
53637+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53638
53639 spin_lock_irqsave(&info->lock, flags);
53640- port->count--;
53641+ atomic_dec(&port->count);
53642 spin_unlock_irqrestore(&info->lock, flags);
53643 port->blocked_open++;
53644
53645@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53646
53647 if (debug_level >= DEBUG_LEVEL_INFO)
53648 printk("%s(%d):%s block_til_ready() count=%d\n",
53649- __FILE__,__LINE__, tty->driver->name, port->count );
53650+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53651
53652 tty_unlock(tty);
53653 schedule();
53654@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53655 set_current_state(TASK_RUNNING);
53656 remove_wait_queue(&port->open_wait, &wait);
53657 if (!tty_hung_up_p(filp))
53658- port->count++;
53659+ atomic_inc(&port->count);
53660 port->blocked_open--;
53661
53662 if (debug_level >= DEBUG_LEVEL_INFO)
53663 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53664- __FILE__,__LINE__, tty->driver->name, port->count );
53665+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53666
53667 if (!retval)
53668 port->flags |= ASYNC_NORMAL_ACTIVE;
53669diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53670index 42bad18..447d7a2 100644
53671--- a/drivers/tty/sysrq.c
53672+++ b/drivers/tty/sysrq.c
53673@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53674 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53675 size_t count, loff_t *ppos)
53676 {
53677- if (count) {
53678+ if (count && capable(CAP_SYS_ADMIN)) {
53679 char c;
53680
53681 if (get_user(c, buf))
53682diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53683index 2bb4dfc..a7f6e86 100644
53684--- a/drivers/tty/tty_io.c
53685+++ b/drivers/tty/tty_io.c
53686@@ -3503,7 +3503,7 @@ EXPORT_SYMBOL(tty_devnum);
53687
53688 void tty_default_fops(struct file_operations *fops)
53689 {
53690- *fops = tty_fops;
53691+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53692 }
53693
53694 /*
53695diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53696index 3737f55..7cef448 100644
53697--- a/drivers/tty/tty_ldisc.c
53698+++ b/drivers/tty/tty_ldisc.c
53699@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53700 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53701 tty_ldiscs[disc] = new_ldisc;
53702 new_ldisc->num = disc;
53703- new_ldisc->refcount = 0;
53704+ atomic_set(&new_ldisc->refcount, 0);
53705 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53706
53707 return ret;
53708@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53709 return -EINVAL;
53710
53711 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53712- if (tty_ldiscs[disc]->refcount)
53713+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53714 ret = -EBUSY;
53715 else
53716 tty_ldiscs[disc] = NULL;
53717@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53718 if (ldops) {
53719 ret = ERR_PTR(-EAGAIN);
53720 if (try_module_get(ldops->owner)) {
53721- ldops->refcount++;
53722+ atomic_inc(&ldops->refcount);
53723 ret = ldops;
53724 }
53725 }
53726@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53727 unsigned long flags;
53728
53729 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53730- ldops->refcount--;
53731+ atomic_dec(&ldops->refcount);
53732 module_put(ldops->owner);
53733 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53734 }
53735diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53736index 40b31835..94d92ae 100644
53737--- a/drivers/tty/tty_port.c
53738+++ b/drivers/tty/tty_port.c
53739@@ -236,7 +236,7 @@ void tty_port_hangup(struct tty_port *port)
53740 unsigned long flags;
53741
53742 spin_lock_irqsave(&port->lock, flags);
53743- port->count = 0;
53744+ atomic_set(&port->count, 0);
53745 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53746 tty = port->tty;
53747 if (tty)
53748@@ -398,7 +398,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53749
53750 /* The port lock protects the port counts */
53751 spin_lock_irqsave(&port->lock, flags);
53752- port->count--;
53753+ atomic_dec(&port->count);
53754 port->blocked_open++;
53755 spin_unlock_irqrestore(&port->lock, flags);
53756
53757@@ -440,7 +440,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53758 we must not mess that up further */
53759 spin_lock_irqsave(&port->lock, flags);
53760 if (!tty_hung_up_p(filp))
53761- port->count++;
53762+ atomic_inc(&port->count);
53763 port->blocked_open--;
53764 if (retval == 0)
53765 port->flags |= ASYNC_NORMAL_ACTIVE;
53766@@ -476,19 +476,19 @@ int tty_port_close_start(struct tty_port *port,
53767 return 0;
53768
53769 spin_lock_irqsave(&port->lock, flags);
53770- if (tty->count == 1 && port->count != 1) {
53771+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
53772 printk(KERN_WARNING
53773 "tty_port_close_start: tty->count = 1 port count = %d.\n",
53774- port->count);
53775- port->count = 1;
53776+ atomic_read(&port->count));
53777+ atomic_set(&port->count, 1);
53778 }
53779- if (--port->count < 0) {
53780+ if (atomic_dec_return(&port->count) < 0) {
53781 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
53782- port->count);
53783- port->count = 0;
53784+ atomic_read(&port->count));
53785+ atomic_set(&port->count, 0);
53786 }
53787
53788- if (port->count) {
53789+ if (atomic_read(&port->count)) {
53790 spin_unlock_irqrestore(&port->lock, flags);
53791 return 0;
53792 }
53793@@ -590,7 +590,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
53794 struct file *filp)
53795 {
53796 spin_lock_irq(&port->lock);
53797- ++port->count;
53798+ atomic_inc(&port->count);
53799 spin_unlock_irq(&port->lock);
53800 tty_port_tty_set(port, tty);
53801
53802diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
53803index 8a89f6e..50b32af 100644
53804--- a/drivers/tty/vt/keyboard.c
53805+++ b/drivers/tty/vt/keyboard.c
53806@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
53807 kbd->kbdmode == VC_OFF) &&
53808 value != KVAL(K_SAK))
53809 return; /* SAK is allowed even in raw mode */
53810+
53811+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53812+ {
53813+ void *func = fn_handler[value];
53814+ if (func == fn_show_state || func == fn_show_ptregs ||
53815+ func == fn_show_mem)
53816+ return;
53817+ }
53818+#endif
53819+
53820 fn_handler[value](vc);
53821 }
53822
53823@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53824 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
53825 return -EFAULT;
53826
53827- if (!capable(CAP_SYS_TTY_CONFIG))
53828- perm = 0;
53829-
53830 switch (cmd) {
53831 case KDGKBENT:
53832 /* Ensure another thread doesn't free it under us */
53833@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
53834 spin_unlock_irqrestore(&kbd_event_lock, flags);
53835 return put_user(val, &user_kbe->kb_value);
53836 case KDSKBENT:
53837+ if (!capable(CAP_SYS_TTY_CONFIG))
53838+ perm = 0;
53839+
53840 if (!perm)
53841 return -EPERM;
53842 if (!i && v == K_NOSUCHMAP) {
53843@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53844 int i, j, k;
53845 int ret;
53846
53847- if (!capable(CAP_SYS_TTY_CONFIG))
53848- perm = 0;
53849-
53850 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
53851 if (!kbs) {
53852 ret = -ENOMEM;
53853@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
53854 kfree(kbs);
53855 return ((p && *p) ? -EOVERFLOW : 0);
53856 case KDSKBSENT:
53857+ if (!capable(CAP_SYS_TTY_CONFIG))
53858+ perm = 0;
53859+
53860 if (!perm) {
53861 ret = -EPERM;
53862 goto reterr;
53863diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
53864index 6276f13..84f2449 100644
53865--- a/drivers/uio/uio.c
53866+++ b/drivers/uio/uio.c
53867@@ -25,6 +25,7 @@
53868 #include <linux/kobject.h>
53869 #include <linux/cdev.h>
53870 #include <linux/uio_driver.h>
53871+#include <asm/local.h>
53872
53873 #define UIO_MAX_DEVICES (1U << MINORBITS)
53874
53875@@ -231,7 +232,7 @@ static ssize_t event_show(struct device *dev,
53876 struct device_attribute *attr, char *buf)
53877 {
53878 struct uio_device *idev = dev_get_drvdata(dev);
53879- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
53880+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
53881 }
53882 static DEVICE_ATTR_RO(event);
53883
53884@@ -393,7 +394,7 @@ void uio_event_notify(struct uio_info *info)
53885 {
53886 struct uio_device *idev = info->uio_dev;
53887
53888- atomic_inc(&idev->event);
53889+ atomic_inc_unchecked(&idev->event);
53890 wake_up_interruptible(&idev->wait);
53891 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
53892 }
53893@@ -446,7 +447,7 @@ static int uio_open(struct inode *inode, struct file *filep)
53894 }
53895
53896 listener->dev = idev;
53897- listener->event_count = atomic_read(&idev->event);
53898+ listener->event_count = atomic_read_unchecked(&idev->event);
53899 filep->private_data = listener;
53900
53901 if (idev->info->open) {
53902@@ -497,7 +498,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
53903 return -EIO;
53904
53905 poll_wait(filep, &idev->wait, wait);
53906- if (listener->event_count != atomic_read(&idev->event))
53907+ if (listener->event_count != atomic_read_unchecked(&idev->event))
53908 return POLLIN | POLLRDNORM;
53909 return 0;
53910 }
53911@@ -522,7 +523,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
53912 do {
53913 set_current_state(TASK_INTERRUPTIBLE);
53914
53915- event_count = atomic_read(&idev->event);
53916+ event_count = atomic_read_unchecked(&idev->event);
53917 if (event_count != listener->event_count) {
53918 if (copy_to_user(buf, &event_count, count))
53919 retval = -EFAULT;
53920@@ -579,9 +580,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
53921 static int uio_find_mem_index(struct vm_area_struct *vma)
53922 {
53923 struct uio_device *idev = vma->vm_private_data;
53924+ unsigned long size;
53925
53926 if (vma->vm_pgoff < MAX_UIO_MAPS) {
53927- if (idev->info->mem[vma->vm_pgoff].size == 0)
53928+ size = idev->info->mem[vma->vm_pgoff].size;
53929+ if (size == 0)
53930+ return -1;
53931+ if (vma->vm_end - vma->vm_start > size)
53932 return -1;
53933 return (int)vma->vm_pgoff;
53934 }
53935@@ -813,7 +818,7 @@ int __uio_register_device(struct module *owner,
53936 idev->owner = owner;
53937 idev->info = info;
53938 init_waitqueue_head(&idev->wait);
53939- atomic_set(&idev->event, 0);
53940+ atomic_set_unchecked(&idev->event, 0);
53941
53942 ret = uio_get_minor(idev);
53943 if (ret)
53944diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
53945index 813d4d3..a71934f 100644
53946--- a/drivers/usb/atm/cxacru.c
53947+++ b/drivers/usb/atm/cxacru.c
53948@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
53949 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
53950 if (ret < 2)
53951 return -EINVAL;
53952- if (index < 0 || index > 0x7f)
53953+ if (index > 0x7f)
53954 return -EINVAL;
53955 pos += tmp;
53956
53957diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
53958index dada014..1d0d517 100644
53959--- a/drivers/usb/atm/usbatm.c
53960+++ b/drivers/usb/atm/usbatm.c
53961@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53962 if (printk_ratelimit())
53963 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
53964 __func__, vpi, vci);
53965- atomic_inc(&vcc->stats->rx_err);
53966+ atomic_inc_unchecked(&vcc->stats->rx_err);
53967 return;
53968 }
53969
53970@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53971 if (length > ATM_MAX_AAL5_PDU) {
53972 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
53973 __func__, length, vcc);
53974- atomic_inc(&vcc->stats->rx_err);
53975+ atomic_inc_unchecked(&vcc->stats->rx_err);
53976 goto out;
53977 }
53978
53979@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53980 if (sarb->len < pdu_length) {
53981 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
53982 __func__, pdu_length, sarb->len, vcc);
53983- atomic_inc(&vcc->stats->rx_err);
53984+ atomic_inc_unchecked(&vcc->stats->rx_err);
53985 goto out;
53986 }
53987
53988 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
53989 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
53990 __func__, vcc);
53991- atomic_inc(&vcc->stats->rx_err);
53992+ atomic_inc_unchecked(&vcc->stats->rx_err);
53993 goto out;
53994 }
53995
53996@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
53997 if (printk_ratelimit())
53998 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
53999 __func__, length);
54000- atomic_inc(&vcc->stats->rx_drop);
54001+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54002 goto out;
54003 }
54004
54005@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54006
54007 vcc->push(vcc, skb);
54008
54009- atomic_inc(&vcc->stats->rx);
54010+ atomic_inc_unchecked(&vcc->stats->rx);
54011 out:
54012 skb_trim(sarb, 0);
54013 }
54014@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54015 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54016
54017 usbatm_pop(vcc, skb);
54018- atomic_inc(&vcc->stats->tx);
54019+ atomic_inc_unchecked(&vcc->stats->tx);
54020
54021 skb = skb_dequeue(&instance->sndqueue);
54022 }
54023@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54024 if (!left--)
54025 return sprintf(page,
54026 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54027- atomic_read(&atm_dev->stats.aal5.tx),
54028- atomic_read(&atm_dev->stats.aal5.tx_err),
54029- atomic_read(&atm_dev->stats.aal5.rx),
54030- atomic_read(&atm_dev->stats.aal5.rx_err),
54031- atomic_read(&atm_dev->stats.aal5.rx_drop));
54032+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54033+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54034+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54035+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54036+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54037
54038 if (!left--) {
54039 if (instance->disconnected)
54040diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54041index 2a3bbdf..91d72cf 100644
54042--- a/drivers/usb/core/devices.c
54043+++ b/drivers/usb/core/devices.c
54044@@ -126,7 +126,7 @@ static const char format_endpt[] =
54045 * time it gets called.
54046 */
54047 static struct device_connect_event {
54048- atomic_t count;
54049+ atomic_unchecked_t count;
54050 wait_queue_head_t wait;
54051 } device_event = {
54052 .count = ATOMIC_INIT(1),
54053@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54054
54055 void usbfs_conn_disc_event(void)
54056 {
54057- atomic_add(2, &device_event.count);
54058+ atomic_add_unchecked(2, &device_event.count);
54059 wake_up(&device_event.wait);
54060 }
54061
54062@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54063
54064 poll_wait(file, &device_event.wait, wait);
54065
54066- event_count = atomic_read(&device_event.count);
54067+ event_count = atomic_read_unchecked(&device_event.count);
54068 if (file->f_version != event_count) {
54069 file->f_version = event_count;
54070 return POLLIN | POLLRDNORM;
54071diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54072index e500243..401300f 100644
54073--- a/drivers/usb/core/devio.c
54074+++ b/drivers/usb/core/devio.c
54075@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54076 struct usb_dev_state *ps = file->private_data;
54077 struct usb_device *dev = ps->dev;
54078 ssize_t ret = 0;
54079- unsigned len;
54080+ size_t len;
54081 loff_t pos;
54082 int i;
54083
54084@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54085 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54086 struct usb_config_descriptor *config =
54087 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54088- unsigned int length = le16_to_cpu(config->wTotalLength);
54089+ size_t length = le16_to_cpu(config->wTotalLength);
54090
54091 if (*ppos < pos + length) {
54092
54093 /* The descriptor may claim to be longer than it
54094 * really is. Here is the actual allocated length. */
54095- unsigned alloclen =
54096+ size_t alloclen =
54097 le16_to_cpu(dev->config[i].desc.wTotalLength);
54098
54099- len = length - (*ppos - pos);
54100+ len = length + pos - *ppos;
54101 if (len > nbytes)
54102 len = nbytes;
54103
54104 /* Simply don't write (skip over) unallocated parts */
54105 if (alloclen > (*ppos - pos)) {
54106- alloclen -= (*ppos - pos);
54107+ alloclen = alloclen + pos - *ppos;
54108 if (copy_to_user(buf,
54109 dev->rawdescriptors[i] + (*ppos - pos),
54110 min(len, alloclen))) {
54111diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54112index 45a915c..09f9735 100644
54113--- a/drivers/usb/core/hcd.c
54114+++ b/drivers/usb/core/hcd.c
54115@@ -1551,7 +1551,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54116 */
54117 usb_get_urb(urb);
54118 atomic_inc(&urb->use_count);
54119- atomic_inc(&urb->dev->urbnum);
54120+ atomic_inc_unchecked(&urb->dev->urbnum);
54121 usbmon_urb_submit(&hcd->self, urb);
54122
54123 /* NOTE requirements on root-hub callers (usbfs and the hub
54124@@ -1578,7 +1578,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54125 urb->hcpriv = NULL;
54126 INIT_LIST_HEAD(&urb->urb_list);
54127 atomic_dec(&urb->use_count);
54128- atomic_dec(&urb->dev->urbnum);
54129+ atomic_dec_unchecked(&urb->dev->urbnum);
54130 if (atomic_read(&urb->reject))
54131 wake_up(&usb_kill_urb_queue);
54132 usb_put_urb(urb);
54133diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54134index b4bfa3a..008f926 100644
54135--- a/drivers/usb/core/hub.c
54136+++ b/drivers/usb/core/hub.c
54137@@ -26,6 +26,7 @@
54138 #include <linux/mutex.h>
54139 #include <linux/random.h>
54140 #include <linux/pm_qos.h>
54141+#include <linux/grsecurity.h>
54142
54143 #include <asm/uaccess.h>
54144 #include <asm/byteorder.h>
54145@@ -4664,6 +4665,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54146 goto done;
54147 return;
54148 }
54149+
54150+ if (gr_handle_new_usb())
54151+ goto done;
54152+
54153 if (hub_is_superspeed(hub->hdev))
54154 unit_load = 150;
54155 else
54156diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54157index f368d20..0c30ac5 100644
54158--- a/drivers/usb/core/message.c
54159+++ b/drivers/usb/core/message.c
54160@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54161 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54162 * error number.
54163 */
54164-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54165+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54166 __u8 requesttype, __u16 value, __u16 index, void *data,
54167 __u16 size, int timeout)
54168 {
54169@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54170 * If successful, 0. Otherwise a negative error number. The number of actual
54171 * bytes transferred will be stored in the @actual_length parameter.
54172 */
54173-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54174+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54175 void *data, int len, int *actual_length, int timeout)
54176 {
54177 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54178@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54179 * bytes transferred will be stored in the @actual_length parameter.
54180 *
54181 */
54182-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54183+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54184 void *data, int len, int *actual_length, int timeout)
54185 {
54186 struct urb *urb;
54187diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54188index d269738..7340cd7 100644
54189--- a/drivers/usb/core/sysfs.c
54190+++ b/drivers/usb/core/sysfs.c
54191@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54192 struct usb_device *udev;
54193
54194 udev = to_usb_device(dev);
54195- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54196+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54197 }
54198 static DEVICE_ATTR_RO(urbnum);
54199
54200diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54201index b1fb9ae..4224885 100644
54202--- a/drivers/usb/core/usb.c
54203+++ b/drivers/usb/core/usb.c
54204@@ -431,7 +431,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54205 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54206 dev->state = USB_STATE_ATTACHED;
54207 dev->lpm_disable_count = 1;
54208- atomic_set(&dev->urbnum, 0);
54209+ atomic_set_unchecked(&dev->urbnum, 0);
54210
54211 INIT_LIST_HEAD(&dev->ep0.urb_list);
54212 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54213diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54214index 8cfc319..4868255 100644
54215--- a/drivers/usb/early/ehci-dbgp.c
54216+++ b/drivers/usb/early/ehci-dbgp.c
54217@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54218
54219 #ifdef CONFIG_KGDB
54220 static struct kgdb_io kgdbdbgp_io_ops;
54221-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54222+static struct kgdb_io kgdbdbgp_io_ops_console;
54223+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54224 #else
54225 #define dbgp_kgdb_mode (0)
54226 #endif
54227@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54228 .write_char = kgdbdbgp_write_char,
54229 };
54230
54231+static struct kgdb_io kgdbdbgp_io_ops_console = {
54232+ .name = "kgdbdbgp",
54233+ .read_char = kgdbdbgp_read_char,
54234+ .write_char = kgdbdbgp_write_char,
54235+ .is_console = 1
54236+};
54237+
54238 static int kgdbdbgp_wait_time;
54239
54240 static int __init kgdbdbgp_parse_config(char *str)
54241@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54242 ptr++;
54243 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54244 }
54245- kgdb_register_io_module(&kgdbdbgp_io_ops);
54246- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54247+ if (early_dbgp_console.index != -1)
54248+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54249+ else
54250+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54251
54252 return 0;
54253 }
54254diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54255index e971584..03495ab 100644
54256--- a/drivers/usb/gadget/function/f_uac1.c
54257+++ b/drivers/usb/gadget/function/f_uac1.c
54258@@ -14,6 +14,7 @@
54259 #include <linux/module.h>
54260 #include <linux/device.h>
54261 #include <linux/atomic.h>
54262+#include <linux/module.h>
54263
54264 #include "u_uac1.h"
54265
54266diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54267index 491082a..dfd7d17 100644
54268--- a/drivers/usb/gadget/function/u_serial.c
54269+++ b/drivers/usb/gadget/function/u_serial.c
54270@@ -729,9 +729,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54271 spin_lock_irq(&port->port_lock);
54272
54273 /* already open? Great. */
54274- if (port->port.count) {
54275+ if (atomic_read(&port->port.count)) {
54276 status = 0;
54277- port->port.count++;
54278+ atomic_inc(&port->port.count);
54279
54280 /* currently opening/closing? wait ... */
54281 } else if (port->openclose) {
54282@@ -790,7 +790,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54283 tty->driver_data = port;
54284 port->port.tty = tty;
54285
54286- port->port.count = 1;
54287+ atomic_set(&port->port.count, 1);
54288 port->openclose = false;
54289
54290 /* if connected, start the I/O stream */
54291@@ -832,11 +832,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54292
54293 spin_lock_irq(&port->port_lock);
54294
54295- if (port->port.count != 1) {
54296- if (port->port.count == 0)
54297+ if (atomic_read(&port->port.count) != 1) {
54298+ if (atomic_read(&port->port.count) == 0)
54299 WARN_ON(1);
54300 else
54301- --port->port.count;
54302+ atomic_dec(&port->port.count);
54303 goto exit;
54304 }
54305
54306@@ -846,7 +846,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54307 * and sleep if necessary
54308 */
54309 port->openclose = true;
54310- port->port.count = 0;
54311+ atomic_set(&port->port.count, 0);
54312
54313 gser = port->port_usb;
54314 if (gser && gser->disconnect)
54315@@ -1062,7 +1062,7 @@ static int gs_closed(struct gs_port *port)
54316 int cond;
54317
54318 spin_lock_irq(&port->port_lock);
54319- cond = (port->port.count == 0) && !port->openclose;
54320+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54321 spin_unlock_irq(&port->port_lock);
54322 return cond;
54323 }
54324@@ -1205,7 +1205,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54325 /* if it's already open, start I/O ... and notify the serial
54326 * protocol about open/close status (connect/disconnect).
54327 */
54328- if (port->port.count) {
54329+ if (atomic_read(&port->port.count)) {
54330 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54331 gs_start_io(port);
54332 if (gser->connect)
54333@@ -1252,7 +1252,7 @@ void gserial_disconnect(struct gserial *gser)
54334
54335 port->port_usb = NULL;
54336 gser->ioport = NULL;
54337- if (port->port.count > 0 || port->openclose) {
54338+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54339 wake_up_interruptible(&port->drain_wait);
54340 if (port->port.tty)
54341 tty_hangup(port->port.tty);
54342@@ -1268,7 +1268,7 @@ void gserial_disconnect(struct gserial *gser)
54343
54344 /* finally, free any unused/unusable I/O buffers */
54345 spin_lock_irqsave(&port->port_lock, flags);
54346- if (port->port.count == 0 && !port->openclose)
54347+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54348 gs_buf_free(&port->port_write_buf);
54349 gs_free_requests(gser->out, &port->read_pool, NULL);
54350 gs_free_requests(gser->out, &port->read_queue, NULL);
54351diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54352index 53842a1..2bef3b6 100644
54353--- a/drivers/usb/gadget/function/u_uac1.c
54354+++ b/drivers/usb/gadget/function/u_uac1.c
54355@@ -17,6 +17,7 @@
54356 #include <linux/ctype.h>
54357 #include <linux/random.h>
54358 #include <linux/syscalls.h>
54359+#include <linux/module.h>
54360
54361 #include "u_uac1.h"
54362
54363diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54364index 118edb7..7a6415f 100644
54365--- a/drivers/usb/host/ehci-hub.c
54366+++ b/drivers/usb/host/ehci-hub.c
54367@@ -769,7 +769,7 @@ static struct urb *request_single_step_set_feature_urb(
54368 urb->transfer_flags = URB_DIR_IN;
54369 usb_get_urb(urb);
54370 atomic_inc(&urb->use_count);
54371- atomic_inc(&urb->dev->urbnum);
54372+ atomic_inc_unchecked(&urb->dev->urbnum);
54373 urb->setup_dma = dma_map_single(
54374 hcd->self.controller,
54375 urb->setup_packet,
54376@@ -836,7 +836,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54377 urb->status = -EINPROGRESS;
54378 usb_get_urb(urb);
54379 atomic_inc(&urb->use_count);
54380- atomic_inc(&urb->dev->urbnum);
54381+ atomic_inc_unchecked(&urb->dev->urbnum);
54382 retval = submit_single_step_set_feature(hcd, urb, 0);
54383 if (!retval && !wait_for_completion_timeout(&done,
54384 msecs_to_jiffies(2000))) {
54385diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54386index 1db0626..4948782 100644
54387--- a/drivers/usb/host/hwa-hc.c
54388+++ b/drivers/usb/host/hwa-hc.c
54389@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54390 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54391 struct wahc *wa = &hwahc->wa;
54392 struct device *dev = &wa->usb_iface->dev;
54393- u8 mas_le[UWB_NUM_MAS/8];
54394+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54395+
54396+ if (mas_le == NULL)
54397+ return -ENOMEM;
54398
54399 /* Set the stream index */
54400 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54401@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54402 WUSB_REQ_SET_WUSB_MAS,
54403 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54404 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54405- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54406+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54407 if (result < 0)
54408 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54409 out:
54410+ kfree(mas_le);
54411+
54412 return result;
54413 }
54414
54415diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54416index b3d245e..99549ed 100644
54417--- a/drivers/usb/misc/appledisplay.c
54418+++ b/drivers/usb/misc/appledisplay.c
54419@@ -84,7 +84,7 @@ struct appledisplay {
54420 struct mutex sysfslock; /* concurrent read and write */
54421 };
54422
54423-static atomic_t count_displays = ATOMIC_INIT(0);
54424+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54425 static struct workqueue_struct *wq;
54426
54427 static void appledisplay_complete(struct urb *urb)
54428@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54429
54430 /* Register backlight device */
54431 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54432- atomic_inc_return(&count_displays) - 1);
54433+ atomic_inc_return_unchecked(&count_displays) - 1);
54434 memset(&props, 0, sizeof(struct backlight_properties));
54435 props.type = BACKLIGHT_RAW;
54436 props.max_brightness = 0xff;
54437diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54438index 29fa1c3..a57b08e 100644
54439--- a/drivers/usb/serial/console.c
54440+++ b/drivers/usb/serial/console.c
54441@@ -125,7 +125,7 @@ static int usb_console_setup(struct console *co, char *options)
54442
54443 info->port = port;
54444
54445- ++port->port.count;
54446+ atomic_inc(&port->port.count);
54447 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54448 if (serial->type->set_termios) {
54449 /*
54450@@ -173,7 +173,7 @@ static int usb_console_setup(struct console *co, char *options)
54451 }
54452 /* Now that any required fake tty operations are completed restore
54453 * the tty port count */
54454- --port->port.count;
54455+ atomic_dec(&port->port.count);
54456 /* The console is special in terms of closing the device so
54457 * indicate this port is now acting as a system console. */
54458 port->port.console = 1;
54459@@ -186,7 +186,7 @@ static int usb_console_setup(struct console *co, char *options)
54460 put_tty:
54461 tty_kref_put(tty);
54462 reset_open_count:
54463- port->port.count = 0;
54464+ atomic_set(&port->port.count, 0);
54465 usb_autopm_put_interface(serial->interface);
54466 error_get_interface:
54467 usb_serial_put(serial);
54468@@ -197,7 +197,7 @@ static int usb_console_setup(struct console *co, char *options)
54469 static void usb_console_write(struct console *co,
54470 const char *buf, unsigned count)
54471 {
54472- static struct usbcons_info *info = &usbcons_info;
54473+ struct usbcons_info *info = &usbcons_info;
54474 struct usb_serial_port *port = info->port;
54475 struct usb_serial *serial;
54476 int retval = -ENODEV;
54477diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54478index 307e339..6aa97cb 100644
54479--- a/drivers/usb/storage/usb.h
54480+++ b/drivers/usb/storage/usb.h
54481@@ -63,7 +63,7 @@ struct us_unusual_dev {
54482 __u8 useProtocol;
54483 __u8 useTransport;
54484 int (*initFunction)(struct us_data *);
54485-};
54486+} __do_const;
54487
54488
54489 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54490diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54491index a863a98..d272795 100644
54492--- a/drivers/usb/usbip/vhci.h
54493+++ b/drivers/usb/usbip/vhci.h
54494@@ -83,7 +83,7 @@ struct vhci_hcd {
54495 unsigned resuming:1;
54496 unsigned long re_timeout;
54497
54498- atomic_t seqnum;
54499+ atomic_unchecked_t seqnum;
54500
54501 /*
54502 * NOTE:
54503diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54504index 1ae9d40..c62604b 100644
54505--- a/drivers/usb/usbip/vhci_hcd.c
54506+++ b/drivers/usb/usbip/vhci_hcd.c
54507@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54508
54509 spin_lock(&vdev->priv_lock);
54510
54511- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54512+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54513 if (priv->seqnum == 0xffff)
54514 dev_info(&urb->dev->dev, "seqnum max\n");
54515
54516@@ -684,7 +684,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54517 return -ENOMEM;
54518 }
54519
54520- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54521+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54522 if (unlink->seqnum == 0xffff)
54523 pr_info("seqnum max\n");
54524
54525@@ -888,7 +888,7 @@ static int vhci_start(struct usb_hcd *hcd)
54526 vdev->rhport = rhport;
54527 }
54528
54529- atomic_set(&vhci->seqnum, 0);
54530+ atomic_set_unchecked(&vhci->seqnum, 0);
54531 spin_lock_init(&vhci->lock);
54532
54533 hcd->power_budget = 0; /* no limit */
54534diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54535index 00e4a54..d676f85 100644
54536--- a/drivers/usb/usbip/vhci_rx.c
54537+++ b/drivers/usb/usbip/vhci_rx.c
54538@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54539 if (!urb) {
54540 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54541 pr_info("max seqnum %d\n",
54542- atomic_read(&the_controller->seqnum));
54543+ atomic_read_unchecked(&the_controller->seqnum));
54544 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54545 return;
54546 }
54547diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54548index edc7267..9f65ce2 100644
54549--- a/drivers/usb/wusbcore/wa-hc.h
54550+++ b/drivers/usb/wusbcore/wa-hc.h
54551@@ -240,7 +240,7 @@ struct wahc {
54552 spinlock_t xfer_list_lock;
54553 struct work_struct xfer_enqueue_work;
54554 struct work_struct xfer_error_work;
54555- atomic_t xfer_id_count;
54556+ atomic_unchecked_t xfer_id_count;
54557
54558 kernel_ulong_t quirks;
54559 };
54560@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54561 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54562 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54563 wa->dto_in_use = 0;
54564- atomic_set(&wa->xfer_id_count, 1);
54565+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54566 /* init the buf in URBs */
54567 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54568 usb_init_urb(&(wa->buf_in_urbs[index]));
54569diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54570index 69af4fd..da390d7 100644
54571--- a/drivers/usb/wusbcore/wa-xfer.c
54572+++ b/drivers/usb/wusbcore/wa-xfer.c
54573@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54574 */
54575 static void wa_xfer_id_init(struct wa_xfer *xfer)
54576 {
54577- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54578+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54579 }
54580
54581 /* Return the xfer's ID. */
54582diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54583index f018d8d..ccab63f 100644
54584--- a/drivers/vfio/vfio.c
54585+++ b/drivers/vfio/vfio.c
54586@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54587 return 0;
54588
54589 /* TODO Prevent device auto probing */
54590- WARN("Device %s added to live group %d!\n", dev_name(dev),
54591+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54592 iommu_group_id(group->iommu_group));
54593
54594 return 0;
54595diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
54596index 9484d56..d415d69 100644
54597--- a/drivers/vhost/net.c
54598+++ b/drivers/vhost/net.c
54599@@ -650,10 +650,8 @@ static void handle_rx(struct vhost_net *net)
54600 break;
54601 }
54602 /* TODO: Should check and handle checksum. */
54603-
54604- hdr.num_buffers = cpu_to_vhost16(vq, headcount);
54605 if (likely(mergeable) &&
54606- memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
54607+ memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
54608 offsetof(typeof(hdr), num_buffers),
54609 sizeof hdr.num_buffers)) {
54610 vq_err(vq, "Failed num_buffers write");
54611diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54612index 3bb02c6..a01ff38 100644
54613--- a/drivers/vhost/vringh.c
54614+++ b/drivers/vhost/vringh.c
54615@@ -551,7 +551,7 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54616 static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
54617 {
54618 __virtio16 v = 0;
54619- int rc = get_user(v, (__force __virtio16 __user *)p);
54620+ int rc = get_user(v, (__force_user __virtio16 *)p);
54621 *val = vringh16_to_cpu(vrh, v);
54622 return rc;
54623 }
54624@@ -559,12 +559,12 @@ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio
54625 static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
54626 {
54627 __virtio16 v = cpu_to_vringh16(vrh, val);
54628- return put_user(v, (__force __virtio16 __user *)p);
54629+ return put_user(v, (__force_user __virtio16 *)p);
54630 }
54631
54632 static inline int copydesc_user(void *dst, const void *src, size_t len)
54633 {
54634- return copy_from_user(dst, (__force void __user *)src, len) ?
54635+ return copy_from_user(dst, (void __force_user *)src, len) ?
54636 -EFAULT : 0;
54637 }
54638
54639@@ -572,19 +572,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54640 const struct vring_used_elem *src,
54641 unsigned int num)
54642 {
54643- return copy_to_user((__force void __user *)dst, src,
54644+ return copy_to_user((void __force_user *)dst, src,
54645 sizeof(*dst) * num) ? -EFAULT : 0;
54646 }
54647
54648 static inline int xfer_from_user(void *src, void *dst, size_t len)
54649 {
54650- return copy_from_user(dst, (__force void __user *)src, len) ?
54651+ return copy_from_user(dst, (void __force_user *)src, len) ?
54652 -EFAULT : 0;
54653 }
54654
54655 static inline int xfer_to_user(void *dst, void *src, size_t len)
54656 {
54657- return copy_to_user((__force void __user *)dst, src, len) ?
54658+ return copy_to_user((void __force_user *)dst, src, len) ?
54659 -EFAULT : 0;
54660 }
54661
54662@@ -621,9 +621,9 @@ int vringh_init_user(struct vringh *vrh, u64 features,
54663 vrh->last_used_idx = 0;
54664 vrh->vring.num = num;
54665 /* vring expects kernel addresses, but only used via accessors. */
54666- vrh->vring.desc = (__force struct vring_desc *)desc;
54667- vrh->vring.avail = (__force struct vring_avail *)avail;
54668- vrh->vring.used = (__force struct vring_used *)used;
54669+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54670+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54671+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54672 return 0;
54673 }
54674 EXPORT_SYMBOL(vringh_init_user);
54675@@ -826,7 +826,7 @@ static inline int getu16_kern(const struct vringh *vrh,
54676
54677 static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
54678 {
54679- ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
54680+ ACCESS_ONCE_RW(*p) = cpu_to_vringh16(vrh, val);
54681 return 0;
54682 }
54683
54684diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54685index 84a110a..96312c3 100644
54686--- a/drivers/video/backlight/kb3886_bl.c
54687+++ b/drivers/video/backlight/kb3886_bl.c
54688@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54689 static unsigned long kb3886bl_flags;
54690 #define KB3886BL_SUSPENDED 0x01
54691
54692-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54693+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54694 {
54695 .ident = "Sahara Touch-iT",
54696 .matches = {
54697diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54698index 1b0b233..6f34c2c 100644
54699--- a/drivers/video/fbdev/arcfb.c
54700+++ b/drivers/video/fbdev/arcfb.c
54701@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54702 return -ENOSPC;
54703
54704 err = 0;
54705- if ((count + p) > fbmemlength) {
54706+ if (count > (fbmemlength - p)) {
54707 count = fbmemlength - p;
54708 err = -ENOSPC;
54709 }
54710diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54711index aedf2fb..47c9aca 100644
54712--- a/drivers/video/fbdev/aty/aty128fb.c
54713+++ b/drivers/video/fbdev/aty/aty128fb.c
54714@@ -149,7 +149,7 @@ enum {
54715 };
54716
54717 /* Must match above enum */
54718-static char * const r128_family[] = {
54719+static const char * const r128_family[] = {
54720 "AGP",
54721 "PCI",
54722 "PRO AGP",
54723diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54724index 37ec09b..98f8862 100644
54725--- a/drivers/video/fbdev/aty/atyfb_base.c
54726+++ b/drivers/video/fbdev/aty/atyfb_base.c
54727@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54728 par->accel_flags = var->accel_flags; /* hack */
54729
54730 if (var->accel_flags) {
54731- info->fbops->fb_sync = atyfb_sync;
54732+ pax_open_kernel();
54733+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54734+ pax_close_kernel();
54735 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54736 } else {
54737- info->fbops->fb_sync = NULL;
54738+ pax_open_kernel();
54739+ *(void **)&info->fbops->fb_sync = NULL;
54740+ pax_close_kernel();
54741 info->flags |= FBINFO_HWACCEL_DISABLED;
54742 }
54743
54744diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54745index 2fa0317..4983f2a 100644
54746--- a/drivers/video/fbdev/aty/mach64_cursor.c
54747+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54748@@ -8,6 +8,7 @@
54749 #include "../core/fb_draw.h"
54750
54751 #include <asm/io.h>
54752+#include <asm/pgtable.h>
54753
54754 #ifdef __sparc__
54755 #include <asm/fbio.h>
54756@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54757 info->sprite.buf_align = 16; /* and 64 lines tall. */
54758 info->sprite.flags = FB_PIXMAP_IO;
54759
54760- info->fbops->fb_cursor = atyfb_cursor;
54761+ pax_open_kernel();
54762+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
54763+ pax_close_kernel();
54764
54765 return 0;
54766 }
54767diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
54768index d6cab1f..112f680 100644
54769--- a/drivers/video/fbdev/core/fb_defio.c
54770+++ b/drivers/video/fbdev/core/fb_defio.c
54771@@ -207,7 +207,9 @@ void fb_deferred_io_init(struct fb_info *info)
54772
54773 BUG_ON(!fbdefio);
54774 mutex_init(&fbdefio->lock);
54775- info->fbops->fb_mmap = fb_deferred_io_mmap;
54776+ pax_open_kernel();
54777+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
54778+ pax_close_kernel();
54779 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
54780 INIT_LIST_HEAD(&fbdefio->pagelist);
54781 if (fbdefio->delay == 0) /* set a default of 1 s */
54782@@ -238,7 +240,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
54783 page->mapping = NULL;
54784 }
54785
54786- info->fbops->fb_mmap = NULL;
54787+ *(void **)&info->fbops->fb_mmap = NULL;
54788 mutex_destroy(&fbdefio->lock);
54789 }
54790 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
54791diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
54792index 0705d88..d9429bf 100644
54793--- a/drivers/video/fbdev/core/fbmem.c
54794+++ b/drivers/video/fbdev/core/fbmem.c
54795@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
54796 __u32 data;
54797 int err;
54798
54799- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
54800+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
54801
54802 data = (__u32) (unsigned long) fix->smem_start;
54803 err |= put_user(data, &fix32->smem_start);
54804diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
54805index 4254336..282567e 100644
54806--- a/drivers/video/fbdev/hyperv_fb.c
54807+++ b/drivers/video/fbdev/hyperv_fb.c
54808@@ -240,7 +240,7 @@ static uint screen_fb_size;
54809 static inline int synthvid_send(struct hv_device *hdev,
54810 struct synthvid_msg *msg)
54811 {
54812- static atomic64_t request_id = ATOMIC64_INIT(0);
54813+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
54814 int ret;
54815
54816 msg->pipe_hdr.type = PIPE_MSG_DATA;
54817@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
54818
54819 ret = vmbus_sendpacket(hdev->channel, msg,
54820 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
54821- atomic64_inc_return(&request_id),
54822+ atomic64_inc_return_unchecked(&request_id),
54823 VM_PKT_DATA_INBAND, 0);
54824
54825 if (ret)
54826diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
54827index 7672d2e..b56437f 100644
54828--- a/drivers/video/fbdev/i810/i810_accel.c
54829+++ b/drivers/video/fbdev/i810/i810_accel.c
54830@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
54831 }
54832 }
54833 printk("ringbuffer lockup!!!\n");
54834+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
54835 i810_report_error(mmio);
54836 par->dev_flags |= LOCKUP;
54837 info->pixmap.scan_align = 1;
54838diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54839index a01147f..5d896f8 100644
54840--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54841+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
54842@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
54843
54844 #ifdef CONFIG_FB_MATROX_MYSTIQUE
54845 struct matrox_switch matrox_mystique = {
54846- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
54847+ .preinit = MGA1064_preinit,
54848+ .reset = MGA1064_reset,
54849+ .init = MGA1064_init,
54850+ .restore = MGA1064_restore,
54851 };
54852 EXPORT_SYMBOL(matrox_mystique);
54853 #endif
54854
54855 #ifdef CONFIG_FB_MATROX_G
54856 struct matrox_switch matrox_G100 = {
54857- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
54858+ .preinit = MGAG100_preinit,
54859+ .reset = MGAG100_reset,
54860+ .init = MGAG100_init,
54861+ .restore = MGAG100_restore,
54862 };
54863 EXPORT_SYMBOL(matrox_G100);
54864 #endif
54865diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54866index 195ad7c..09743fc 100644
54867--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54868+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
54869@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
54870 }
54871
54872 struct matrox_switch matrox_millennium = {
54873- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
54874+ .preinit = Ti3026_preinit,
54875+ .reset = Ti3026_reset,
54876+ .init = Ti3026_init,
54877+ .restore = Ti3026_restore
54878 };
54879 EXPORT_SYMBOL(matrox_millennium);
54880 #endif
54881diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54882index fe92eed..106e085 100644
54883--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54884+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
54885@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
54886 struct mb862xxfb_par *par = info->par;
54887
54888 if (info->var.bits_per_pixel == 32) {
54889- info->fbops->fb_fillrect = cfb_fillrect;
54890- info->fbops->fb_copyarea = cfb_copyarea;
54891- info->fbops->fb_imageblit = cfb_imageblit;
54892+ pax_open_kernel();
54893+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54894+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54895+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54896+ pax_close_kernel();
54897 } else {
54898 outreg(disp, GC_L0EM, 3);
54899- info->fbops->fb_fillrect = mb86290fb_fillrect;
54900- info->fbops->fb_copyarea = mb86290fb_copyarea;
54901- info->fbops->fb_imageblit = mb86290fb_imageblit;
54902+ pax_open_kernel();
54903+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
54904+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
54905+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
54906+ pax_close_kernel();
54907 }
54908 outreg(draw, GDC_REG_DRAW_BASE, 0);
54909 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
54910diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
54911index def0412..fed6529 100644
54912--- a/drivers/video/fbdev/nvidia/nvidia.c
54913+++ b/drivers/video/fbdev/nvidia/nvidia.c
54914@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
54915 info->fix.line_length = (info->var.xres_virtual *
54916 info->var.bits_per_pixel) >> 3;
54917 if (info->var.accel_flags) {
54918- info->fbops->fb_imageblit = nvidiafb_imageblit;
54919- info->fbops->fb_fillrect = nvidiafb_fillrect;
54920- info->fbops->fb_copyarea = nvidiafb_copyarea;
54921- info->fbops->fb_sync = nvidiafb_sync;
54922+ pax_open_kernel();
54923+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
54924+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
54925+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
54926+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
54927+ pax_close_kernel();
54928 info->pixmap.scan_align = 4;
54929 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54930 info->flags |= FBINFO_READS_FAST;
54931 NVResetGraphics(info);
54932 } else {
54933- info->fbops->fb_imageblit = cfb_imageblit;
54934- info->fbops->fb_fillrect = cfb_fillrect;
54935- info->fbops->fb_copyarea = cfb_copyarea;
54936- info->fbops->fb_sync = NULL;
54937+ pax_open_kernel();
54938+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
54939+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
54940+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
54941+ *(void **)&info->fbops->fb_sync = NULL;
54942+ pax_close_kernel();
54943 info->pixmap.scan_align = 1;
54944 info->flags |= FBINFO_HWACCEL_DISABLED;
54945 info->flags &= ~FBINFO_READS_FAST;
54946@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
54947 info->pixmap.size = 8 * 1024;
54948 info->pixmap.flags = FB_PIXMAP_SYSTEM;
54949
54950- if (!hwcur)
54951- info->fbops->fb_cursor = NULL;
54952+ if (!hwcur) {
54953+ pax_open_kernel();
54954+ *(void **)&info->fbops->fb_cursor = NULL;
54955+ pax_close_kernel();
54956+ }
54957
54958 info->var.accel_flags = (!noaccel);
54959
54960diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
54961index 2412a0d..294215b 100644
54962--- a/drivers/video/fbdev/omap2/dss/display.c
54963+++ b/drivers/video/fbdev/omap2/dss/display.c
54964@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
54965 if (dssdev->name == NULL)
54966 dssdev->name = dssdev->alias;
54967
54968+ pax_open_kernel();
54969 if (drv && drv->get_resolution == NULL)
54970- drv->get_resolution = omapdss_default_get_resolution;
54971+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
54972 if (drv && drv->get_recommended_bpp == NULL)
54973- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54974+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
54975 if (drv && drv->get_timings == NULL)
54976- drv->get_timings = omapdss_default_get_timings;
54977+ *(void **)&drv->get_timings = omapdss_default_get_timings;
54978+ pax_close_kernel();
54979
54980 mutex_lock(&panel_list_mutex);
54981 list_add_tail(&dssdev->panel_list, &panel_list);
54982diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
54983index 83433cb..71e9b98 100644
54984--- a/drivers/video/fbdev/s1d13xxxfb.c
54985+++ b/drivers/video/fbdev/s1d13xxxfb.c
54986@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
54987
54988 switch(prod_id) {
54989 case S1D13506_PROD_ID: /* activate acceleration */
54990- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54991- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54992+ pax_open_kernel();
54993+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
54994+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
54995+ pax_close_kernel();
54996 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
54997 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
54998 break;
54999diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55000index d3013cd..95b8285 100644
55001--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55002+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55003@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55004 }
55005
55006 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55007- lcdc_sys_write_index,
55008- lcdc_sys_write_data,
55009- lcdc_sys_read_data,
55010+ .write_index = lcdc_sys_write_index,
55011+ .write_data = lcdc_sys_write_data,
55012+ .read_data = lcdc_sys_read_data,
55013 };
55014
55015 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55016diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55017index 9279e5f..d5f5276 100644
55018--- a/drivers/video/fbdev/smscufx.c
55019+++ b/drivers/video/fbdev/smscufx.c
55020@@ -1174,7 +1174,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55021 fb_deferred_io_cleanup(info);
55022 kfree(info->fbdefio);
55023 info->fbdefio = NULL;
55024- info->fbops->fb_mmap = ufx_ops_mmap;
55025+ pax_open_kernel();
55026+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55027+ pax_close_kernel();
55028 }
55029
55030 pr_debug("released /dev/fb%d user=%d count=%d",
55031diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55032index ff2b873..626a8d5 100644
55033--- a/drivers/video/fbdev/udlfb.c
55034+++ b/drivers/video/fbdev/udlfb.c
55035@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55036 dlfb_urb_completion(urb);
55037
55038 error:
55039- atomic_add(bytes_sent, &dev->bytes_sent);
55040- atomic_add(bytes_identical, &dev->bytes_identical);
55041- atomic_add(width*height*2, &dev->bytes_rendered);
55042+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55043+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55044+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55045 end_cycles = get_cycles();
55046- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55047+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55048 >> 10)), /* Kcycles */
55049 &dev->cpu_kcycles_used);
55050
55051@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55052 dlfb_urb_completion(urb);
55053
55054 error:
55055- atomic_add(bytes_sent, &dev->bytes_sent);
55056- atomic_add(bytes_identical, &dev->bytes_identical);
55057- atomic_add(bytes_rendered, &dev->bytes_rendered);
55058+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55059+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55060+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55061 end_cycles = get_cycles();
55062- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55063+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55064 >> 10)), /* Kcycles */
55065 &dev->cpu_kcycles_used);
55066 }
55067@@ -991,7 +991,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55068 fb_deferred_io_cleanup(info);
55069 kfree(info->fbdefio);
55070 info->fbdefio = NULL;
55071- info->fbops->fb_mmap = dlfb_ops_mmap;
55072+ pax_open_kernel();
55073+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55074+ pax_close_kernel();
55075 }
55076
55077 pr_warn("released /dev/fb%d user=%d count=%d\n",
55078@@ -1373,7 +1375,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55079 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55080 struct dlfb_data *dev = fb_info->par;
55081 return snprintf(buf, PAGE_SIZE, "%u\n",
55082- atomic_read(&dev->bytes_rendered));
55083+ atomic_read_unchecked(&dev->bytes_rendered));
55084 }
55085
55086 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55087@@ -1381,7 +1383,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55088 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55089 struct dlfb_data *dev = fb_info->par;
55090 return snprintf(buf, PAGE_SIZE, "%u\n",
55091- atomic_read(&dev->bytes_identical));
55092+ atomic_read_unchecked(&dev->bytes_identical));
55093 }
55094
55095 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55096@@ -1389,7 +1391,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55097 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55098 struct dlfb_data *dev = fb_info->par;
55099 return snprintf(buf, PAGE_SIZE, "%u\n",
55100- atomic_read(&dev->bytes_sent));
55101+ atomic_read_unchecked(&dev->bytes_sent));
55102 }
55103
55104 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55105@@ -1397,7 +1399,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55106 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55107 struct dlfb_data *dev = fb_info->par;
55108 return snprintf(buf, PAGE_SIZE, "%u\n",
55109- atomic_read(&dev->cpu_kcycles_used));
55110+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55111 }
55112
55113 static ssize_t edid_show(
55114@@ -1457,10 +1459,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55115 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55116 struct dlfb_data *dev = fb_info->par;
55117
55118- atomic_set(&dev->bytes_rendered, 0);
55119- atomic_set(&dev->bytes_identical, 0);
55120- atomic_set(&dev->bytes_sent, 0);
55121- atomic_set(&dev->cpu_kcycles_used, 0);
55122+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55123+ atomic_set_unchecked(&dev->bytes_identical, 0);
55124+ atomic_set_unchecked(&dev->bytes_sent, 0);
55125+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55126
55127 return count;
55128 }
55129diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55130index d32d1c4..46722e6 100644
55131--- a/drivers/video/fbdev/uvesafb.c
55132+++ b/drivers/video/fbdev/uvesafb.c
55133@@ -19,6 +19,7 @@
55134 #include <linux/io.h>
55135 #include <linux/mutex.h>
55136 #include <linux/slab.h>
55137+#include <linux/moduleloader.h>
55138 #include <video/edid.h>
55139 #include <video/uvesafb.h>
55140 #ifdef CONFIG_X86
55141@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55142 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55143 par->pmi_setpal = par->ypan = 0;
55144 } else {
55145+
55146+#ifdef CONFIG_PAX_KERNEXEC
55147+#ifdef CONFIG_MODULES
55148+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55149+#endif
55150+ if (!par->pmi_code) {
55151+ par->pmi_setpal = par->ypan = 0;
55152+ return 0;
55153+ }
55154+#endif
55155+
55156 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55157 + task->t.regs.edi);
55158+
55159+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55160+ pax_open_kernel();
55161+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55162+ pax_close_kernel();
55163+
55164+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55165+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55166+#else
55167 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55168 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55169+#endif
55170+
55171 printk(KERN_INFO "uvesafb: protected mode interface info at "
55172 "%04x:%04x\n",
55173 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55174@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55175 par->ypan = ypan;
55176
55177 if (par->pmi_setpal || par->ypan) {
55178+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55179 if (__supported_pte_mask & _PAGE_NX) {
55180 par->pmi_setpal = par->ypan = 0;
55181 printk(KERN_WARNING "uvesafb: NX protection is active, "
55182 "better not use the PMI.\n");
55183- } else {
55184+ } else
55185+#endif
55186 uvesafb_vbe_getpmi(task, par);
55187- }
55188 }
55189 #else
55190 /* The protected mode interface is not available on non-x86. */
55191@@ -1452,8 +1476,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55192 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55193
55194 /* Disable blanking if the user requested so. */
55195- if (!blank)
55196- info->fbops->fb_blank = NULL;
55197+ if (!blank) {
55198+ pax_open_kernel();
55199+ *(void **)&info->fbops->fb_blank = NULL;
55200+ pax_close_kernel();
55201+ }
55202
55203 /*
55204 * Find out how much IO memory is required for the mode with
55205@@ -1524,8 +1551,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55206 info->flags = FBINFO_FLAG_DEFAULT |
55207 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55208
55209- if (!par->ypan)
55210- info->fbops->fb_pan_display = NULL;
55211+ if (!par->ypan) {
55212+ pax_open_kernel();
55213+ *(void **)&info->fbops->fb_pan_display = NULL;
55214+ pax_close_kernel();
55215+ }
55216 }
55217
55218 static void uvesafb_init_mtrr(struct fb_info *info)
55219@@ -1786,6 +1816,11 @@ out_mode:
55220 out:
55221 kfree(par->vbe_modes);
55222
55223+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55224+ if (par->pmi_code)
55225+ module_memfree_exec(par->pmi_code);
55226+#endif
55227+
55228 framebuffer_release(info);
55229 return err;
55230 }
55231@@ -1810,6 +1845,11 @@ static int uvesafb_remove(struct platform_device *dev)
55232 kfree(par->vbe_state_orig);
55233 kfree(par->vbe_state_saved);
55234
55235+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55236+ if (par->pmi_code)
55237+ module_memfree_exec(par->pmi_code);
55238+#endif
55239+
55240 framebuffer_release(info);
55241 }
55242 return 0;
55243diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55244index d79a0ac..2d0c3d4 100644
55245--- a/drivers/video/fbdev/vesafb.c
55246+++ b/drivers/video/fbdev/vesafb.c
55247@@ -9,6 +9,7 @@
55248 */
55249
55250 #include <linux/module.h>
55251+#include <linux/moduleloader.h>
55252 #include <linux/kernel.h>
55253 #include <linux/errno.h>
55254 #include <linux/string.h>
55255@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55256 static int vram_total; /* Set total amount of memory */
55257 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55258 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55259-static void (*pmi_start)(void) __read_mostly;
55260-static void (*pmi_pal) (void) __read_mostly;
55261+static void (*pmi_start)(void) __read_only;
55262+static void (*pmi_pal) (void) __read_only;
55263 static int depth __read_mostly;
55264 static int vga_compat __read_mostly;
55265 /* --------------------------------------------------------------------- */
55266@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55267 unsigned int size_remap;
55268 unsigned int size_total;
55269 char *option = NULL;
55270+ void *pmi_code = NULL;
55271
55272 /* ignore error return of fb_get_options */
55273 fb_get_options("vesafb", &option);
55274@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55275 size_remap = size_total;
55276 vesafb_fix.smem_len = size_remap;
55277
55278-#ifndef __i386__
55279- screen_info.vesapm_seg = 0;
55280-#endif
55281-
55282 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55283 printk(KERN_WARNING
55284 "vesafb: cannot reserve video memory at 0x%lx\n",
55285@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55286 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55287 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55288
55289+#ifdef __i386__
55290+
55291+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55292+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55293+ if (!pmi_code)
55294+#elif !defined(CONFIG_PAX_KERNEXEC)
55295+ if (0)
55296+#endif
55297+
55298+#endif
55299+ screen_info.vesapm_seg = 0;
55300+
55301 if (screen_info.vesapm_seg) {
55302- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55303- screen_info.vesapm_seg,screen_info.vesapm_off);
55304+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55305+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55306 }
55307
55308 if (screen_info.vesapm_seg < 0xc000)
55309@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55310
55311 if (ypan || pmi_setpal) {
55312 unsigned short *pmi_base;
55313+
55314 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55315- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55316- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55317+
55318+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55319+ pax_open_kernel();
55320+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55321+#else
55322+ pmi_code = pmi_base;
55323+#endif
55324+
55325+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55326+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55327+
55328+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55329+ pmi_start = ktva_ktla(pmi_start);
55330+ pmi_pal = ktva_ktla(pmi_pal);
55331+ pax_close_kernel();
55332+#endif
55333+
55334 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55335 if (pmi_base[3]) {
55336 printk(KERN_INFO "vesafb: pmi: ports = ");
55337@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55338 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55339 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55340
55341- if (!ypan)
55342- info->fbops->fb_pan_display = NULL;
55343+ if (!ypan) {
55344+ pax_open_kernel();
55345+ *(void **)&info->fbops->fb_pan_display = NULL;
55346+ pax_close_kernel();
55347+ }
55348
55349 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55350 err = -ENOMEM;
55351@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55352 fb_info(info, "%s frame buffer device\n", info->fix.id);
55353 return 0;
55354 err:
55355+
55356+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55357+ module_memfree_exec(pmi_code);
55358+#endif
55359+
55360 if (info->screen_base)
55361 iounmap(info->screen_base);
55362 framebuffer_release(info);
55363diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55364index 88714ae..16c2e11 100644
55365--- a/drivers/video/fbdev/via/via_clock.h
55366+++ b/drivers/video/fbdev/via/via_clock.h
55367@@ -56,7 +56,7 @@ struct via_clock {
55368
55369 void (*set_engine_pll_state)(u8 state);
55370 void (*set_engine_pll)(struct via_pll_config config);
55371-};
55372+} __no_const;
55373
55374
55375 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55376diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55377index 3c14e43..2630570 100644
55378--- a/drivers/video/logo/logo_linux_clut224.ppm
55379+++ b/drivers/video/logo/logo_linux_clut224.ppm
55380@@ -2,1603 +2,1123 @@ P3
55381 # Standard 224-color Linux logo
55382 80 80
55383 255
55384- 0 0 0 0 0 0 0 0 0 0 0 0
55385- 0 0 0 0 0 0 0 0 0 0 0 0
55386- 0 0 0 0 0 0 0 0 0 0 0 0
55387- 0 0 0 0 0 0 0 0 0 0 0 0
55388- 0 0 0 0 0 0 0 0 0 0 0 0
55389- 0 0 0 0 0 0 0 0 0 0 0 0
55390- 0 0 0 0 0 0 0 0 0 0 0 0
55391- 0 0 0 0 0 0 0 0 0 0 0 0
55392- 0 0 0 0 0 0 0 0 0 0 0 0
55393- 6 6 6 6 6 6 10 10 10 10 10 10
55394- 10 10 10 6 6 6 6 6 6 6 6 6
55395- 0 0 0 0 0 0 0 0 0 0 0 0
55396- 0 0 0 0 0 0 0 0 0 0 0 0
55397- 0 0 0 0 0 0 0 0 0 0 0 0
55398- 0 0 0 0 0 0 0 0 0 0 0 0
55399- 0 0 0 0 0 0 0 0 0 0 0 0
55400- 0 0 0 0 0 0 0 0 0 0 0 0
55401- 0 0 0 0 0 0 0 0 0 0 0 0
55402- 0 0 0 0 0 0 0 0 0 0 0 0
55403- 0 0 0 0 0 0 0 0 0 0 0 0
55404- 0 0 0 0 0 0 0 0 0 0 0 0
55405- 0 0 0 0 0 0 0 0 0 0 0 0
55406- 0 0 0 0 0 0 0 0 0 0 0 0
55407- 0 0 0 0 0 0 0 0 0 0 0 0
55408- 0 0 0 0 0 0 0 0 0 0 0 0
55409- 0 0 0 0 0 0 0 0 0 0 0 0
55410- 0 0 0 0 0 0 0 0 0 0 0 0
55411- 0 0 0 0 0 0 0 0 0 0 0 0
55412- 0 0 0 6 6 6 10 10 10 14 14 14
55413- 22 22 22 26 26 26 30 30 30 34 34 34
55414- 30 30 30 30 30 30 26 26 26 18 18 18
55415- 14 14 14 10 10 10 6 6 6 0 0 0
55416- 0 0 0 0 0 0 0 0 0 0 0 0
55417- 0 0 0 0 0 0 0 0 0 0 0 0
55418- 0 0 0 0 0 0 0 0 0 0 0 0
55419- 0 0 0 0 0 0 0 0 0 0 0 0
55420- 0 0 0 0 0 0 0 0 0 0 0 0
55421- 0 0 0 0 0 0 0 0 0 0 0 0
55422- 0 0 0 0 0 0 0 0 0 0 0 0
55423- 0 0 0 0 0 0 0 0 0 0 0 0
55424- 0 0 0 0 0 0 0 0 0 0 0 0
55425- 0 0 0 0 0 1 0 0 1 0 0 0
55426- 0 0 0 0 0 0 0 0 0 0 0 0
55427- 0 0 0 0 0 0 0 0 0 0 0 0
55428- 0 0 0 0 0 0 0 0 0 0 0 0
55429- 0 0 0 0 0 0 0 0 0 0 0 0
55430- 0 0 0 0 0 0 0 0 0 0 0 0
55431- 0 0 0 0 0 0 0 0 0 0 0 0
55432- 6 6 6 14 14 14 26 26 26 42 42 42
55433- 54 54 54 66 66 66 78 78 78 78 78 78
55434- 78 78 78 74 74 74 66 66 66 54 54 54
55435- 42 42 42 26 26 26 18 18 18 10 10 10
55436- 6 6 6 0 0 0 0 0 0 0 0 0
55437- 0 0 0 0 0 0 0 0 0 0 0 0
55438- 0 0 0 0 0 0 0 0 0 0 0 0
55439- 0 0 0 0 0 0 0 0 0 0 0 0
55440- 0 0 0 0 0 0 0 0 0 0 0 0
55441- 0 0 0 0 0 0 0 0 0 0 0 0
55442- 0 0 0 0 0 0 0 0 0 0 0 0
55443- 0 0 0 0 0 0 0 0 0 0 0 0
55444- 0 0 0 0 0 0 0 0 0 0 0 0
55445- 0 0 1 0 0 0 0 0 0 0 0 0
55446- 0 0 0 0 0 0 0 0 0 0 0 0
55447- 0 0 0 0 0 0 0 0 0 0 0 0
55448- 0 0 0 0 0 0 0 0 0 0 0 0
55449- 0 0 0 0 0 0 0 0 0 0 0 0
55450- 0 0 0 0 0 0 0 0 0 0 0 0
55451- 0 0 0 0 0 0 0 0 0 10 10 10
55452- 22 22 22 42 42 42 66 66 66 86 86 86
55453- 66 66 66 38 38 38 38 38 38 22 22 22
55454- 26 26 26 34 34 34 54 54 54 66 66 66
55455- 86 86 86 70 70 70 46 46 46 26 26 26
55456- 14 14 14 6 6 6 0 0 0 0 0 0
55457- 0 0 0 0 0 0 0 0 0 0 0 0
55458- 0 0 0 0 0 0 0 0 0 0 0 0
55459- 0 0 0 0 0 0 0 0 0 0 0 0
55460- 0 0 0 0 0 0 0 0 0 0 0 0
55461- 0 0 0 0 0 0 0 0 0 0 0 0
55462- 0 0 0 0 0 0 0 0 0 0 0 0
55463- 0 0 0 0 0 0 0 0 0 0 0 0
55464- 0 0 0 0 0 0 0 0 0 0 0 0
55465- 0 0 1 0 0 1 0 0 1 0 0 0
55466- 0 0 0 0 0 0 0 0 0 0 0 0
55467- 0 0 0 0 0 0 0 0 0 0 0 0
55468- 0 0 0 0 0 0 0 0 0 0 0 0
55469- 0 0 0 0 0 0 0 0 0 0 0 0
55470- 0 0 0 0 0 0 0 0 0 0 0 0
55471- 0 0 0 0 0 0 10 10 10 26 26 26
55472- 50 50 50 82 82 82 58 58 58 6 6 6
55473- 2 2 6 2 2 6 2 2 6 2 2 6
55474- 2 2 6 2 2 6 2 2 6 2 2 6
55475- 6 6 6 54 54 54 86 86 86 66 66 66
55476- 38 38 38 18 18 18 6 6 6 0 0 0
55477- 0 0 0 0 0 0 0 0 0 0 0 0
55478- 0 0 0 0 0 0 0 0 0 0 0 0
55479- 0 0 0 0 0 0 0 0 0 0 0 0
55480- 0 0 0 0 0 0 0 0 0 0 0 0
55481- 0 0 0 0 0 0 0 0 0 0 0 0
55482- 0 0 0 0 0 0 0 0 0 0 0 0
55483- 0 0 0 0 0 0 0 0 0 0 0 0
55484- 0 0 0 0 0 0 0 0 0 0 0 0
55485- 0 0 0 0 0 0 0 0 0 0 0 0
55486- 0 0 0 0 0 0 0 0 0 0 0 0
55487- 0 0 0 0 0 0 0 0 0 0 0 0
55488- 0 0 0 0 0 0 0 0 0 0 0 0
55489- 0 0 0 0 0 0 0 0 0 0 0 0
55490- 0 0 0 0 0 0 0 0 0 0 0 0
55491- 0 0 0 6 6 6 22 22 22 50 50 50
55492- 78 78 78 34 34 34 2 2 6 2 2 6
55493- 2 2 6 2 2 6 2 2 6 2 2 6
55494- 2 2 6 2 2 6 2 2 6 2 2 6
55495- 2 2 6 2 2 6 6 6 6 70 70 70
55496- 78 78 78 46 46 46 22 22 22 6 6 6
55497- 0 0 0 0 0 0 0 0 0 0 0 0
55498- 0 0 0 0 0 0 0 0 0 0 0 0
55499- 0 0 0 0 0 0 0 0 0 0 0 0
55500- 0 0 0 0 0 0 0 0 0 0 0 0
55501- 0 0 0 0 0 0 0 0 0 0 0 0
55502- 0 0 0 0 0 0 0 0 0 0 0 0
55503- 0 0 0 0 0 0 0 0 0 0 0 0
55504- 0 0 0 0 0 0 0 0 0 0 0 0
55505- 0 0 1 0 0 1 0 0 1 0 0 0
55506- 0 0 0 0 0 0 0 0 0 0 0 0
55507- 0 0 0 0 0 0 0 0 0 0 0 0
55508- 0 0 0 0 0 0 0 0 0 0 0 0
55509- 0 0 0 0 0 0 0 0 0 0 0 0
55510- 0 0 0 0 0 0 0 0 0 0 0 0
55511- 6 6 6 18 18 18 42 42 42 82 82 82
55512- 26 26 26 2 2 6 2 2 6 2 2 6
55513- 2 2 6 2 2 6 2 2 6 2 2 6
55514- 2 2 6 2 2 6 2 2 6 14 14 14
55515- 46 46 46 34 34 34 6 6 6 2 2 6
55516- 42 42 42 78 78 78 42 42 42 18 18 18
55517- 6 6 6 0 0 0 0 0 0 0 0 0
55518- 0 0 0 0 0 0 0 0 0 0 0 0
55519- 0 0 0 0 0 0 0 0 0 0 0 0
55520- 0 0 0 0 0 0 0 0 0 0 0 0
55521- 0 0 0 0 0 0 0 0 0 0 0 0
55522- 0 0 0 0 0 0 0 0 0 0 0 0
55523- 0 0 0 0 0 0 0 0 0 0 0 0
55524- 0 0 0 0 0 0 0 0 0 0 0 0
55525- 0 0 1 0 0 0 0 0 1 0 0 0
55526- 0 0 0 0 0 0 0 0 0 0 0 0
55527- 0 0 0 0 0 0 0 0 0 0 0 0
55528- 0 0 0 0 0 0 0 0 0 0 0 0
55529- 0 0 0 0 0 0 0 0 0 0 0 0
55530- 0 0 0 0 0 0 0 0 0 0 0 0
55531- 10 10 10 30 30 30 66 66 66 58 58 58
55532- 2 2 6 2 2 6 2 2 6 2 2 6
55533- 2 2 6 2 2 6 2 2 6 2 2 6
55534- 2 2 6 2 2 6 2 2 6 26 26 26
55535- 86 86 86 101 101 101 46 46 46 10 10 10
55536- 2 2 6 58 58 58 70 70 70 34 34 34
55537- 10 10 10 0 0 0 0 0 0 0 0 0
55538- 0 0 0 0 0 0 0 0 0 0 0 0
55539- 0 0 0 0 0 0 0 0 0 0 0 0
55540- 0 0 0 0 0 0 0 0 0 0 0 0
55541- 0 0 0 0 0 0 0 0 0 0 0 0
55542- 0 0 0 0 0 0 0 0 0 0 0 0
55543- 0 0 0 0 0 0 0 0 0 0 0 0
55544- 0 0 0 0 0 0 0 0 0 0 0 0
55545- 0 0 1 0 0 1 0 0 1 0 0 0
55546- 0 0 0 0 0 0 0 0 0 0 0 0
55547- 0 0 0 0 0 0 0 0 0 0 0 0
55548- 0 0 0 0 0 0 0 0 0 0 0 0
55549- 0 0 0 0 0 0 0 0 0 0 0 0
55550- 0 0 0 0 0 0 0 0 0 0 0 0
55551- 14 14 14 42 42 42 86 86 86 10 10 10
55552- 2 2 6 2 2 6 2 2 6 2 2 6
55553- 2 2 6 2 2 6 2 2 6 2 2 6
55554- 2 2 6 2 2 6 2 2 6 30 30 30
55555- 94 94 94 94 94 94 58 58 58 26 26 26
55556- 2 2 6 6 6 6 78 78 78 54 54 54
55557- 22 22 22 6 6 6 0 0 0 0 0 0
55558- 0 0 0 0 0 0 0 0 0 0 0 0
55559- 0 0 0 0 0 0 0 0 0 0 0 0
55560- 0 0 0 0 0 0 0 0 0 0 0 0
55561- 0 0 0 0 0 0 0 0 0 0 0 0
55562- 0 0 0 0 0 0 0 0 0 0 0 0
55563- 0 0 0 0 0 0 0 0 0 0 0 0
55564- 0 0 0 0 0 0 0 0 0 0 0 0
55565- 0 0 0 0 0 0 0 0 0 0 0 0
55566- 0 0 0 0 0 0 0 0 0 0 0 0
55567- 0 0 0 0 0 0 0 0 0 0 0 0
55568- 0 0 0 0 0 0 0 0 0 0 0 0
55569- 0 0 0 0 0 0 0 0 0 0 0 0
55570- 0 0 0 0 0 0 0 0 0 6 6 6
55571- 22 22 22 62 62 62 62 62 62 2 2 6
55572- 2 2 6 2 2 6 2 2 6 2 2 6
55573- 2 2 6 2 2 6 2 2 6 2 2 6
55574- 2 2 6 2 2 6 2 2 6 26 26 26
55575- 54 54 54 38 38 38 18 18 18 10 10 10
55576- 2 2 6 2 2 6 34 34 34 82 82 82
55577- 38 38 38 14 14 14 0 0 0 0 0 0
55578- 0 0 0 0 0 0 0 0 0 0 0 0
55579- 0 0 0 0 0 0 0 0 0 0 0 0
55580- 0 0 0 0 0 0 0 0 0 0 0 0
55581- 0 0 0 0 0 0 0 0 0 0 0 0
55582- 0 0 0 0 0 0 0 0 0 0 0 0
55583- 0 0 0 0 0 0 0 0 0 0 0 0
55584- 0 0 0 0 0 0 0 0 0 0 0 0
55585- 0 0 0 0 0 1 0 0 1 0 0 0
55586- 0 0 0 0 0 0 0 0 0 0 0 0
55587- 0 0 0 0 0 0 0 0 0 0 0 0
55588- 0 0 0 0 0 0 0 0 0 0 0 0
55589- 0 0 0 0 0 0 0 0 0 0 0 0
55590- 0 0 0 0 0 0 0 0 0 6 6 6
55591- 30 30 30 78 78 78 30 30 30 2 2 6
55592- 2 2 6 2 2 6 2 2 6 2 2 6
55593- 2 2 6 2 2 6 2 2 6 2 2 6
55594- 2 2 6 2 2 6 2 2 6 10 10 10
55595- 10 10 10 2 2 6 2 2 6 2 2 6
55596- 2 2 6 2 2 6 2 2 6 78 78 78
55597- 50 50 50 18 18 18 6 6 6 0 0 0
55598- 0 0 0 0 0 0 0 0 0 0 0 0
55599- 0 0 0 0 0 0 0 0 0 0 0 0
55600- 0 0 0 0 0 0 0 0 0 0 0 0
55601- 0 0 0 0 0 0 0 0 0 0 0 0
55602- 0 0 0 0 0 0 0 0 0 0 0 0
55603- 0 0 0 0 0 0 0 0 0 0 0 0
55604- 0 0 0 0 0 0 0 0 0 0 0 0
55605- 0 0 1 0 0 0 0 0 0 0 0 0
55606- 0 0 0 0 0 0 0 0 0 0 0 0
55607- 0 0 0 0 0 0 0 0 0 0 0 0
55608- 0 0 0 0 0 0 0 0 0 0 0 0
55609- 0 0 0 0 0 0 0 0 0 0 0 0
55610- 0 0 0 0 0 0 0 0 0 10 10 10
55611- 38 38 38 86 86 86 14 14 14 2 2 6
55612- 2 2 6 2 2 6 2 2 6 2 2 6
55613- 2 2 6 2 2 6 2 2 6 2 2 6
55614- 2 2 6 2 2 6 2 2 6 2 2 6
55615- 2 2 6 2 2 6 2 2 6 2 2 6
55616- 2 2 6 2 2 6 2 2 6 54 54 54
55617- 66 66 66 26 26 26 6 6 6 0 0 0
55618- 0 0 0 0 0 0 0 0 0 0 0 0
55619- 0 0 0 0 0 0 0 0 0 0 0 0
55620- 0 0 0 0 0 0 0 0 0 0 0 0
55621- 0 0 0 0 0 0 0 0 0 0 0 0
55622- 0 0 0 0 0 0 0 0 0 0 0 0
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 1 0 0 1 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 14 14 14
55631- 42 42 42 82 82 82 2 2 6 2 2 6
55632- 2 2 6 6 6 6 10 10 10 2 2 6
55633- 2 2 6 2 2 6 2 2 6 2 2 6
55634- 2 2 6 2 2 6 2 2 6 6 6 6
55635- 14 14 14 10 10 10 2 2 6 2 2 6
55636- 2 2 6 2 2 6 2 2 6 18 18 18
55637- 82 82 82 34 34 34 10 10 10 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 1 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 14 14 14
55651- 46 46 46 86 86 86 2 2 6 2 2 6
55652- 6 6 6 6 6 6 22 22 22 34 34 34
55653- 6 6 6 2 2 6 2 2 6 2 2 6
55654- 2 2 6 2 2 6 18 18 18 34 34 34
55655- 10 10 10 50 50 50 22 22 22 2 2 6
55656- 2 2 6 2 2 6 2 2 6 10 10 10
55657- 86 86 86 42 42 42 14 14 14 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 1 0 0 1 0 0 1 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 14 14 14
55671- 46 46 46 86 86 86 2 2 6 2 2 6
55672- 38 38 38 116 116 116 94 94 94 22 22 22
55673- 22 22 22 2 2 6 2 2 6 2 2 6
55674- 14 14 14 86 86 86 138 138 138 162 162 162
55675-154 154 154 38 38 38 26 26 26 6 6 6
55676- 2 2 6 2 2 6 2 2 6 2 2 6
55677- 86 86 86 46 46 46 14 14 14 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 14 14 14
55691- 46 46 46 86 86 86 2 2 6 14 14 14
55692-134 134 134 198 198 198 195 195 195 116 116 116
55693- 10 10 10 2 2 6 2 2 6 6 6 6
55694-101 98 89 187 187 187 210 210 210 218 218 218
55695-214 214 214 134 134 134 14 14 14 6 6 6
55696- 2 2 6 2 2 6 2 2 6 2 2 6
55697- 86 86 86 50 50 50 18 18 18 6 6 6
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 1 0 0 0
55705- 0 0 1 0 0 1 0 0 1 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 14 14 14
55711- 46 46 46 86 86 86 2 2 6 54 54 54
55712-218 218 218 195 195 195 226 226 226 246 246 246
55713- 58 58 58 2 2 6 2 2 6 30 30 30
55714-210 210 210 253 253 253 174 174 174 123 123 123
55715-221 221 221 234 234 234 74 74 74 2 2 6
55716- 2 2 6 2 2 6 2 2 6 2 2 6
55717- 70 70 70 58 58 58 22 22 22 6 6 6
55718- 0 0 0 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 14 14 14
55731- 46 46 46 82 82 82 2 2 6 106 106 106
55732-170 170 170 26 26 26 86 86 86 226 226 226
55733-123 123 123 10 10 10 14 14 14 46 46 46
55734-231 231 231 190 190 190 6 6 6 70 70 70
55735- 90 90 90 238 238 238 158 158 158 2 2 6
55736- 2 2 6 2 2 6 2 2 6 2 2 6
55737- 70 70 70 58 58 58 22 22 22 6 6 6
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 0 0 0 0 0 0 1 0 0 0
55745- 0 0 1 0 0 1 0 0 1 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 14 14 14
55751- 42 42 42 86 86 86 6 6 6 116 116 116
55752-106 106 106 6 6 6 70 70 70 149 149 149
55753-128 128 128 18 18 18 38 38 38 54 54 54
55754-221 221 221 106 106 106 2 2 6 14 14 14
55755- 46 46 46 190 190 190 198 198 198 2 2 6
55756- 2 2 6 2 2 6 2 2 6 2 2 6
55757- 74 74 74 62 62 62 22 22 22 6 6 6
55758- 0 0 0 0 0 0 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 0 0 0 0 0 0 1 0 0 0
55765- 0 0 1 0 0 0 0 0 1 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 14 14 14
55771- 42 42 42 94 94 94 14 14 14 101 101 101
55772-128 128 128 2 2 6 18 18 18 116 116 116
55773-118 98 46 121 92 8 121 92 8 98 78 10
55774-162 162 162 106 106 106 2 2 6 2 2 6
55775- 2 2 6 195 195 195 195 195 195 6 6 6
55776- 2 2 6 2 2 6 2 2 6 2 2 6
55777- 74 74 74 62 62 62 22 22 22 6 6 6
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 0 0 0 0 0 0 1 0 0 1
55785- 0 0 1 0 0 0 0 0 1 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 10 10 10
55791- 38 38 38 90 90 90 14 14 14 58 58 58
55792-210 210 210 26 26 26 54 38 6 154 114 10
55793-226 170 11 236 186 11 225 175 15 184 144 12
55794-215 174 15 175 146 61 37 26 9 2 2 6
55795- 70 70 70 246 246 246 138 138 138 2 2 6
55796- 2 2 6 2 2 6 2 2 6 2 2 6
55797- 70 70 70 66 66 66 26 26 26 6 6 6
55798- 0 0 0 0 0 0 0 0 0 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 10 10 10
55811- 38 38 38 86 86 86 14 14 14 10 10 10
55812-195 195 195 188 164 115 192 133 9 225 175 15
55813-239 182 13 234 190 10 232 195 16 232 200 30
55814-245 207 45 241 208 19 232 195 16 184 144 12
55815-218 194 134 211 206 186 42 42 42 2 2 6
55816- 2 2 6 2 2 6 2 2 6 2 2 6
55817- 50 50 50 74 74 74 30 30 30 6 6 6
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 0 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 10 10 10
55831- 34 34 34 86 86 86 14 14 14 2 2 6
55832-121 87 25 192 133 9 219 162 10 239 182 13
55833-236 186 11 232 195 16 241 208 19 244 214 54
55834-246 218 60 246 218 38 246 215 20 241 208 19
55835-241 208 19 226 184 13 121 87 25 2 2 6
55836- 2 2 6 2 2 6 2 2 6 2 2 6
55837- 50 50 50 82 82 82 34 34 34 10 10 10
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 0 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 10 10 10
55851- 34 34 34 82 82 82 30 30 30 61 42 6
55852-180 123 7 206 145 10 230 174 11 239 182 13
55853-234 190 10 238 202 15 241 208 19 246 218 74
55854-246 218 38 246 215 20 246 215 20 246 215 20
55855-226 184 13 215 174 15 184 144 12 6 6 6
55856- 2 2 6 2 2 6 2 2 6 2 2 6
55857- 26 26 26 94 94 94 42 42 42 14 14 14
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 0 0 0 0 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 10 10 10
55871- 30 30 30 78 78 78 50 50 50 104 69 6
55872-192 133 9 216 158 10 236 178 12 236 186 11
55873-232 195 16 241 208 19 244 214 54 245 215 43
55874-246 215 20 246 215 20 241 208 19 198 155 10
55875-200 144 11 216 158 10 156 118 10 2 2 6
55876- 2 2 6 2 2 6 2 2 6 2 2 6
55877- 6 6 6 90 90 90 54 54 54 18 18 18
55878- 6 6 6 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 0 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 10 10 10
55891- 30 30 30 78 78 78 46 46 46 22 22 22
55892-137 92 6 210 162 10 239 182 13 238 190 10
55893-238 202 15 241 208 19 246 215 20 246 215 20
55894-241 208 19 203 166 17 185 133 11 210 150 10
55895-216 158 10 210 150 10 102 78 10 2 2 6
55896- 6 6 6 54 54 54 14 14 14 2 2 6
55897- 2 2 6 62 62 62 74 74 74 30 30 30
55898- 10 10 10 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 0 0 0 0 0 0 0 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 0 0 0 10 10 10
55911- 34 34 34 78 78 78 50 50 50 6 6 6
55912- 94 70 30 139 102 15 190 146 13 226 184 13
55913-232 200 30 232 195 16 215 174 15 190 146 13
55914-168 122 10 192 133 9 210 150 10 213 154 11
55915-202 150 34 182 157 106 101 98 89 2 2 6
55916- 2 2 6 78 78 78 116 116 116 58 58 58
55917- 2 2 6 22 22 22 90 90 90 46 46 46
55918- 18 18 18 6 6 6 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 0 0 0 0 0 0 10 10 10
55931- 38 38 38 86 86 86 50 50 50 6 6 6
55932-128 128 128 174 154 114 156 107 11 168 122 10
55933-198 155 10 184 144 12 197 138 11 200 144 11
55934-206 145 10 206 145 10 197 138 11 188 164 115
55935-195 195 195 198 198 198 174 174 174 14 14 14
55936- 2 2 6 22 22 22 116 116 116 116 116 116
55937- 22 22 22 2 2 6 74 74 74 70 70 70
55938- 30 30 30 10 10 10 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 0 0 0 0
55944- 0 0 0 0 0 0 0 0 0 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 0 0 0 0 0 0 6 6 6 18 18 18
55951- 50 50 50 101 101 101 26 26 26 10 10 10
55952-138 138 138 190 190 190 174 154 114 156 107 11
55953-197 138 11 200 144 11 197 138 11 192 133 9
55954-180 123 7 190 142 34 190 178 144 187 187 187
55955-202 202 202 221 221 221 214 214 214 66 66 66
55956- 2 2 6 2 2 6 50 50 50 62 62 62
55957- 6 6 6 2 2 6 10 10 10 90 90 90
55958- 50 50 50 18 18 18 6 6 6 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 0 0 0 0 0 0 10 10 10 34 34 34
55971- 74 74 74 74 74 74 2 2 6 6 6 6
55972-144 144 144 198 198 198 190 190 190 178 166 146
55973-154 121 60 156 107 11 156 107 11 168 124 44
55974-174 154 114 187 187 187 190 190 190 210 210 210
55975-246 246 246 253 253 253 253 253 253 182 182 182
55976- 6 6 6 2 2 6 2 2 6 2 2 6
55977- 2 2 6 2 2 6 2 2 6 62 62 62
55978- 74 74 74 34 34 34 14 14 14 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 0 0 0 0
55984- 0 0 0 0 0 0 0 0 0 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 0 0 0 10 10 10 22 22 22 54 54 54
55991- 94 94 94 18 18 18 2 2 6 46 46 46
55992-234 234 234 221 221 221 190 190 190 190 190 190
55993-190 190 190 187 187 187 187 187 187 190 190 190
55994-190 190 190 195 195 195 214 214 214 242 242 242
55995-253 253 253 253 253 253 253 253 253 253 253 253
55996- 82 82 82 2 2 6 2 2 6 2 2 6
55997- 2 2 6 2 2 6 2 2 6 14 14 14
55998- 86 86 86 54 54 54 22 22 22 6 6 6
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 0 0 0 0
56004- 0 0 0 0 0 0 0 0 0 0 0 0
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 0 0 0
56010- 6 6 6 18 18 18 46 46 46 90 90 90
56011- 46 46 46 18 18 18 6 6 6 182 182 182
56012-253 253 253 246 246 246 206 206 206 190 190 190
56013-190 190 190 190 190 190 190 190 190 190 190 190
56014-206 206 206 231 231 231 250 250 250 253 253 253
56015-253 253 253 253 253 253 253 253 253 253 253 253
56016-202 202 202 14 14 14 2 2 6 2 2 6
56017- 2 2 6 2 2 6 2 2 6 2 2 6
56018- 42 42 42 86 86 86 42 42 42 18 18 18
56019- 6 6 6 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 0 0 0 0
56024- 0 0 0 0 0 0 0 0 0 0 0 0
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 6 6 6
56030- 14 14 14 38 38 38 74 74 74 66 66 66
56031- 2 2 6 6 6 6 90 90 90 250 250 250
56032-253 253 253 253 253 253 238 238 238 198 198 198
56033-190 190 190 190 190 190 195 195 195 221 221 221
56034-246 246 246 253 253 253 253 253 253 253 253 253
56035-253 253 253 253 253 253 253 253 253 253 253 253
56036-253 253 253 82 82 82 2 2 6 2 2 6
56037- 2 2 6 2 2 6 2 2 6 2 2 6
56038- 2 2 6 78 78 78 70 70 70 34 34 34
56039- 14 14 14 6 6 6 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 14 14 14
56050- 34 34 34 66 66 66 78 78 78 6 6 6
56051- 2 2 6 18 18 18 218 218 218 253 253 253
56052-253 253 253 253 253 253 253 253 253 246 246 246
56053-226 226 226 231 231 231 246 246 246 253 253 253
56054-253 253 253 253 253 253 253 253 253 253 253 253
56055-253 253 253 253 253 253 253 253 253 253 253 253
56056-253 253 253 178 178 178 2 2 6 2 2 6
56057- 2 2 6 2 2 6 2 2 6 2 2 6
56058- 2 2 6 18 18 18 90 90 90 62 62 62
56059- 30 30 30 10 10 10 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 10 10 10 26 26 26
56070- 58 58 58 90 90 90 18 18 18 2 2 6
56071- 2 2 6 110 110 110 253 253 253 253 253 253
56072-253 253 253 253 253 253 253 253 253 253 253 253
56073-250 250 250 253 253 253 253 253 253 253 253 253
56074-253 253 253 253 253 253 253 253 253 253 253 253
56075-253 253 253 253 253 253 253 253 253 253 253 253
56076-253 253 253 231 231 231 18 18 18 2 2 6
56077- 2 2 6 2 2 6 2 2 6 2 2 6
56078- 2 2 6 2 2 6 18 18 18 94 94 94
56079- 54 54 54 26 26 26 10 10 10 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 6 6 6 22 22 22 50 50 50
56090- 90 90 90 26 26 26 2 2 6 2 2 6
56091- 14 14 14 195 195 195 250 250 250 253 253 253
56092-253 253 253 253 253 253 253 253 253 253 253 253
56093-253 253 253 253 253 253 253 253 253 253 253 253
56094-253 253 253 253 253 253 253 253 253 253 253 253
56095-253 253 253 253 253 253 253 253 253 253 253 253
56096-250 250 250 242 242 242 54 54 54 2 2 6
56097- 2 2 6 2 2 6 2 2 6 2 2 6
56098- 2 2 6 2 2 6 2 2 6 38 38 38
56099- 86 86 86 50 50 50 22 22 22 6 6 6
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 6 6 6 14 14 14 38 38 38 82 82 82
56110- 34 34 34 2 2 6 2 2 6 2 2 6
56111- 42 42 42 195 195 195 246 246 246 253 253 253
56112-253 253 253 253 253 253 253 253 253 250 250 250
56113-242 242 242 242 242 242 250 250 250 253 253 253
56114-253 253 253 253 253 253 253 253 253 253 253 253
56115-253 253 253 250 250 250 246 246 246 238 238 238
56116-226 226 226 231 231 231 101 101 101 6 6 6
56117- 2 2 6 2 2 6 2 2 6 2 2 6
56118- 2 2 6 2 2 6 2 2 6 2 2 6
56119- 38 38 38 82 82 82 42 42 42 14 14 14
56120- 6 6 6 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 10 10 10 26 26 26 62 62 62 66 66 66
56130- 2 2 6 2 2 6 2 2 6 6 6 6
56131- 70 70 70 170 170 170 206 206 206 234 234 234
56132-246 246 246 250 250 250 250 250 250 238 238 238
56133-226 226 226 231 231 231 238 238 238 250 250 250
56134-250 250 250 250 250 250 246 246 246 231 231 231
56135-214 214 214 206 206 206 202 202 202 202 202 202
56136-198 198 198 202 202 202 182 182 182 18 18 18
56137- 2 2 6 2 2 6 2 2 6 2 2 6
56138- 2 2 6 2 2 6 2 2 6 2 2 6
56139- 2 2 6 62 62 62 66 66 66 30 30 30
56140- 10 10 10 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 0 0 0 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 14 14 14 42 42 42 82 82 82 18 18 18
56150- 2 2 6 2 2 6 2 2 6 10 10 10
56151- 94 94 94 182 182 182 218 218 218 242 242 242
56152-250 250 250 253 253 253 253 253 253 250 250 250
56153-234 234 234 253 253 253 253 253 253 253 253 253
56154-253 253 253 253 253 253 253 253 253 246 246 246
56155-238 238 238 226 226 226 210 210 210 202 202 202
56156-195 195 195 195 195 195 210 210 210 158 158 158
56157- 6 6 6 14 14 14 50 50 50 14 14 14
56158- 2 2 6 2 2 6 2 2 6 2 2 6
56159- 2 2 6 6 6 6 86 86 86 46 46 46
56160- 18 18 18 6 6 6 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 0 0 0 0 0 0 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 6 6 6
56169- 22 22 22 54 54 54 70 70 70 2 2 6
56170- 2 2 6 10 10 10 2 2 6 22 22 22
56171-166 166 166 231 231 231 250 250 250 253 253 253
56172-253 253 253 253 253 253 253 253 253 250 250 250
56173-242 242 242 253 253 253 253 253 253 253 253 253
56174-253 253 253 253 253 253 253 253 253 253 253 253
56175-253 253 253 253 253 253 253 253 253 246 246 246
56176-231 231 231 206 206 206 198 198 198 226 226 226
56177- 94 94 94 2 2 6 6 6 6 38 38 38
56178- 30 30 30 2 2 6 2 2 6 2 2 6
56179- 2 2 6 2 2 6 62 62 62 66 66 66
56180- 26 26 26 10 10 10 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 0 0 0
56185- 0 0 0 0 0 0 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 10 10 10
56189- 30 30 30 74 74 74 50 50 50 2 2 6
56190- 26 26 26 26 26 26 2 2 6 106 106 106
56191-238 238 238 253 253 253 253 253 253 253 253 253
56192-253 253 253 253 253 253 253 253 253 253 253 253
56193-253 253 253 253 253 253 253 253 253 253 253 253
56194-253 253 253 253 253 253 253 253 253 253 253 253
56195-253 253 253 253 253 253 253 253 253 253 253 253
56196-253 253 253 246 246 246 218 218 218 202 202 202
56197-210 210 210 14 14 14 2 2 6 2 2 6
56198- 30 30 30 22 22 22 2 2 6 2 2 6
56199- 2 2 6 2 2 6 18 18 18 86 86 86
56200- 42 42 42 14 14 14 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 0 0 0
56205- 0 0 0 0 0 0 0 0 0 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 14 14 14
56209- 42 42 42 90 90 90 22 22 22 2 2 6
56210- 42 42 42 2 2 6 18 18 18 218 218 218
56211-253 253 253 253 253 253 253 253 253 253 253 253
56212-253 253 253 253 253 253 253 253 253 253 253 253
56213-253 253 253 253 253 253 253 253 253 253 253 253
56214-253 253 253 253 253 253 253 253 253 253 253 253
56215-253 253 253 253 253 253 253 253 253 253 253 253
56216-253 253 253 253 253 253 250 250 250 221 221 221
56217-218 218 218 101 101 101 2 2 6 14 14 14
56218- 18 18 18 38 38 38 10 10 10 2 2 6
56219- 2 2 6 2 2 6 2 2 6 78 78 78
56220- 58 58 58 22 22 22 6 6 6 0 0 0
56221- 0 0 0 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 0 0 0
56225- 0 0 0 0 0 0 0 0 0 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 6 6 6 18 18 18
56229- 54 54 54 82 82 82 2 2 6 26 26 26
56230- 22 22 22 2 2 6 123 123 123 253 253 253
56231-253 253 253 253 253 253 253 253 253 253 253 253
56232-253 253 253 253 253 253 253 253 253 253 253 253
56233-253 253 253 253 253 253 253 253 253 253 253 253
56234-253 253 253 253 253 253 253 253 253 253 253 253
56235-253 253 253 253 253 253 253 253 253 253 253 253
56236-253 253 253 253 253 253 253 253 253 250 250 250
56237-238 238 238 198 198 198 6 6 6 38 38 38
56238- 58 58 58 26 26 26 38 38 38 2 2 6
56239- 2 2 6 2 2 6 2 2 6 46 46 46
56240- 78 78 78 30 30 30 10 10 10 0 0 0
56241- 0 0 0 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 0 0 0 0 0 0
56245- 0 0 0 0 0 0 0 0 0 0 0 0
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 10 10 10 30 30 30
56249- 74 74 74 58 58 58 2 2 6 42 42 42
56250- 2 2 6 22 22 22 231 231 231 253 253 253
56251-253 253 253 253 253 253 253 253 253 253 253 253
56252-253 253 253 253 253 253 253 253 253 250 250 250
56253-253 253 253 253 253 253 253 253 253 253 253 253
56254-253 253 253 253 253 253 253 253 253 253 253 253
56255-253 253 253 253 253 253 253 253 253 253 253 253
56256-253 253 253 253 253 253 253 253 253 253 253 253
56257-253 253 253 246 246 246 46 46 46 38 38 38
56258- 42 42 42 14 14 14 38 38 38 14 14 14
56259- 2 2 6 2 2 6 2 2 6 6 6 6
56260- 86 86 86 46 46 46 14 14 14 0 0 0
56261- 0 0 0 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 0 0 0 0 0 0
56265- 0 0 0 0 0 0 0 0 0 0 0 0
56266- 0 0 0 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 6 6 6 14 14 14 42 42 42
56269- 90 90 90 18 18 18 18 18 18 26 26 26
56270- 2 2 6 116 116 116 253 253 253 253 253 253
56271-253 253 253 253 253 253 253 253 253 253 253 253
56272-253 253 253 253 253 253 250 250 250 238 238 238
56273-253 253 253 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 253 253 253 253 253 253
56275-253 253 253 253 253 253 253 253 253 253 253 253
56276-253 253 253 253 253 253 253 253 253 253 253 253
56277-253 253 253 253 253 253 94 94 94 6 6 6
56278- 2 2 6 2 2 6 10 10 10 34 34 34
56279- 2 2 6 2 2 6 2 2 6 2 2 6
56280- 74 74 74 58 58 58 22 22 22 6 6 6
56281- 0 0 0 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 0 0 0 0 0 0
56285- 0 0 0 0 0 0 0 0 0 0 0 0
56286- 0 0 0 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 10 10 10 26 26 26 66 66 66
56289- 82 82 82 2 2 6 38 38 38 6 6 6
56290- 14 14 14 210 210 210 253 253 253 253 253 253
56291-253 253 253 253 253 253 253 253 253 253 253 253
56292-253 253 253 253 253 253 246 246 246 242 242 242
56293-253 253 253 253 253 253 253 253 253 253 253 253
56294-253 253 253 253 253 253 253 253 253 253 253 253
56295-253 253 253 253 253 253 253 253 253 253 253 253
56296-253 253 253 253 253 253 253 253 253 253 253 253
56297-253 253 253 253 253 253 144 144 144 2 2 6
56298- 2 2 6 2 2 6 2 2 6 46 46 46
56299- 2 2 6 2 2 6 2 2 6 2 2 6
56300- 42 42 42 74 74 74 30 30 30 10 10 10
56301- 0 0 0 0 0 0 0 0 0 0 0 0
56302- 0 0 0 0 0 0 0 0 0 0 0 0
56303- 0 0 0 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 0 0 0 0 0 0
56305- 0 0 0 0 0 0 0 0 0 0 0 0
56306- 0 0 0 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 6 6 6 14 14 14 42 42 42 90 90 90
56309- 26 26 26 6 6 6 42 42 42 2 2 6
56310- 74 74 74 250 250 250 253 253 253 253 253 253
56311-253 253 253 253 253 253 253 253 253 253 253 253
56312-253 253 253 253 253 253 242 242 242 242 242 242
56313-253 253 253 253 253 253 253 253 253 253 253 253
56314-253 253 253 253 253 253 253 253 253 253 253 253
56315-253 253 253 253 253 253 253 253 253 253 253 253
56316-253 253 253 253 253 253 253 253 253 253 253 253
56317-253 253 253 253 253 253 182 182 182 2 2 6
56318- 2 2 6 2 2 6 2 2 6 46 46 46
56319- 2 2 6 2 2 6 2 2 6 2 2 6
56320- 10 10 10 86 86 86 38 38 38 10 10 10
56321- 0 0 0 0 0 0 0 0 0 0 0 0
56322- 0 0 0 0 0 0 0 0 0 0 0 0
56323- 0 0 0 0 0 0 0 0 0 0 0 0
56324- 0 0 0 0 0 0 0 0 0 0 0 0
56325- 0 0 0 0 0 0 0 0 0 0 0 0
56326- 0 0 0 0 0 0 0 0 0 0 0 0
56327- 0 0 0 0 0 0 0 0 0 0 0 0
56328- 10 10 10 26 26 26 66 66 66 82 82 82
56329- 2 2 6 22 22 22 18 18 18 2 2 6
56330-149 149 149 253 253 253 253 253 253 253 253 253
56331-253 253 253 253 253 253 253 253 253 253 253 253
56332-253 253 253 253 253 253 234 234 234 242 242 242
56333-253 253 253 253 253 253 253 253 253 253 253 253
56334-253 253 253 253 253 253 253 253 253 253 253 253
56335-253 253 253 253 253 253 253 253 253 253 253 253
56336-253 253 253 253 253 253 253 253 253 253 253 253
56337-253 253 253 253 253 253 206 206 206 2 2 6
56338- 2 2 6 2 2 6 2 2 6 38 38 38
56339- 2 2 6 2 2 6 2 2 6 2 2 6
56340- 6 6 6 86 86 86 46 46 46 14 14 14
56341- 0 0 0 0 0 0 0 0 0 0 0 0
56342- 0 0 0 0 0 0 0 0 0 0 0 0
56343- 0 0 0 0 0 0 0 0 0 0 0 0
56344- 0 0 0 0 0 0 0 0 0 0 0 0
56345- 0 0 0 0 0 0 0 0 0 0 0 0
56346- 0 0 0 0 0 0 0 0 0 0 0 0
56347- 0 0 0 0 0 0 0 0 0 6 6 6
56348- 18 18 18 46 46 46 86 86 86 18 18 18
56349- 2 2 6 34 34 34 10 10 10 6 6 6
56350-210 210 210 253 253 253 253 253 253 253 253 253
56351-253 253 253 253 253 253 253 253 253 253 253 253
56352-253 253 253 253 253 253 234 234 234 242 242 242
56353-253 253 253 253 253 253 253 253 253 253 253 253
56354-253 253 253 253 253 253 253 253 253 253 253 253
56355-253 253 253 253 253 253 253 253 253 253 253 253
56356-253 253 253 253 253 253 253 253 253 253 253 253
56357-253 253 253 253 253 253 221 221 221 6 6 6
56358- 2 2 6 2 2 6 6 6 6 30 30 30
56359- 2 2 6 2 2 6 2 2 6 2 2 6
56360- 2 2 6 82 82 82 54 54 54 18 18 18
56361- 6 6 6 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 0 0 0 0 0 0 0 0 0 10 10 10
56368- 26 26 26 66 66 66 62 62 62 2 2 6
56369- 2 2 6 38 38 38 10 10 10 26 26 26
56370-238 238 238 253 253 253 253 253 253 253 253 253
56371-253 253 253 253 253 253 253 253 253 253 253 253
56372-253 253 253 253 253 253 231 231 231 238 238 238
56373-253 253 253 253 253 253 253 253 253 253 253 253
56374-253 253 253 253 253 253 253 253 253 253 253 253
56375-253 253 253 253 253 253 253 253 253 253 253 253
56376-253 253 253 253 253 253 253 253 253 253 253 253
56377-253 253 253 253 253 253 231 231 231 6 6 6
56378- 2 2 6 2 2 6 10 10 10 30 30 30
56379- 2 2 6 2 2 6 2 2 6 2 2 6
56380- 2 2 6 66 66 66 58 58 58 22 22 22
56381- 6 6 6 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 0 0 0 0 0 0 0 0 0
56387- 0 0 0 0 0 0 0 0 0 10 10 10
56388- 38 38 38 78 78 78 6 6 6 2 2 6
56389- 2 2 6 46 46 46 14 14 14 42 42 42
56390-246 246 246 253 253 253 253 253 253 253 253 253
56391-253 253 253 253 253 253 253 253 253 253 253 253
56392-253 253 253 253 253 253 231 231 231 242 242 242
56393-253 253 253 253 253 253 253 253 253 253 253 253
56394-253 253 253 253 253 253 253 253 253 253 253 253
56395-253 253 253 253 253 253 253 253 253 253 253 253
56396-253 253 253 253 253 253 253 253 253 253 253 253
56397-253 253 253 253 253 253 234 234 234 10 10 10
56398- 2 2 6 2 2 6 22 22 22 14 14 14
56399- 2 2 6 2 2 6 2 2 6 2 2 6
56400- 2 2 6 66 66 66 62 62 62 22 22 22
56401- 6 6 6 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 0 0 0 0 0 0 0 0 0 0 0 0
56407- 0 0 0 0 0 0 6 6 6 18 18 18
56408- 50 50 50 74 74 74 2 2 6 2 2 6
56409- 14 14 14 70 70 70 34 34 34 62 62 62
56410-250 250 250 253 253 253 253 253 253 253 253 253
56411-253 253 253 253 253 253 253 253 253 253 253 253
56412-253 253 253 253 253 253 231 231 231 246 246 246
56413-253 253 253 253 253 253 253 253 253 253 253 253
56414-253 253 253 253 253 253 253 253 253 253 253 253
56415-253 253 253 253 253 253 253 253 253 253 253 253
56416-253 253 253 253 253 253 253 253 253 253 253 253
56417-253 253 253 253 253 253 234 234 234 14 14 14
56418- 2 2 6 2 2 6 30 30 30 2 2 6
56419- 2 2 6 2 2 6 2 2 6 2 2 6
56420- 2 2 6 66 66 66 62 62 62 22 22 22
56421- 6 6 6 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 0 0 0
56426- 0 0 0 0 0 0 0 0 0 0 0 0
56427- 0 0 0 0 0 0 6 6 6 18 18 18
56428- 54 54 54 62 62 62 2 2 6 2 2 6
56429- 2 2 6 30 30 30 46 46 46 70 70 70
56430-250 250 250 253 253 253 253 253 253 253 253 253
56431-253 253 253 253 253 253 253 253 253 253 253 253
56432-253 253 253 253 253 253 231 231 231 246 246 246
56433-253 253 253 253 253 253 253 253 253 253 253 253
56434-253 253 253 253 253 253 253 253 253 253 253 253
56435-253 253 253 253 253 253 253 253 253 253 253 253
56436-253 253 253 253 253 253 253 253 253 253 253 253
56437-253 253 253 253 253 253 226 226 226 10 10 10
56438- 2 2 6 6 6 6 30 30 30 2 2 6
56439- 2 2 6 2 2 6 2 2 6 2 2 6
56440- 2 2 6 66 66 66 58 58 58 22 22 22
56441- 6 6 6 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 6 6 6 22 22 22
56448- 58 58 58 62 62 62 2 2 6 2 2 6
56449- 2 2 6 2 2 6 30 30 30 78 78 78
56450-250 250 250 253 253 253 253 253 253 253 253 253
56451-253 253 253 253 253 253 253 253 253 253 253 253
56452-253 253 253 253 253 253 231 231 231 246 246 246
56453-253 253 253 253 253 253 253 253 253 253 253 253
56454-253 253 253 253 253 253 253 253 253 253 253 253
56455-253 253 253 253 253 253 253 253 253 253 253 253
56456-253 253 253 253 253 253 253 253 253 253 253 253
56457-253 253 253 253 253 253 206 206 206 2 2 6
56458- 22 22 22 34 34 34 18 14 6 22 22 22
56459- 26 26 26 18 18 18 6 6 6 2 2 6
56460- 2 2 6 82 82 82 54 54 54 18 18 18
56461- 6 6 6 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 0 0 0
56467- 0 0 0 0 0 0 6 6 6 26 26 26
56468- 62 62 62 106 106 106 74 54 14 185 133 11
56469-210 162 10 121 92 8 6 6 6 62 62 62
56470-238 238 238 253 253 253 253 253 253 253 253 253
56471-253 253 253 253 253 253 253 253 253 253 253 253
56472-253 253 253 253 253 253 231 231 231 246 246 246
56473-253 253 253 253 253 253 253 253 253 253 253 253
56474-253 253 253 253 253 253 253 253 253 253 253 253
56475-253 253 253 253 253 253 253 253 253 253 253 253
56476-253 253 253 253 253 253 253 253 253 253 253 253
56477-253 253 253 253 253 253 158 158 158 18 18 18
56478- 14 14 14 2 2 6 2 2 6 2 2 6
56479- 6 6 6 18 18 18 66 66 66 38 38 38
56480- 6 6 6 94 94 94 50 50 50 18 18 18
56481- 6 6 6 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 6 6 6
56487- 10 10 10 10 10 10 18 18 18 38 38 38
56488- 78 78 78 142 134 106 216 158 10 242 186 14
56489-246 190 14 246 190 14 156 118 10 10 10 10
56490- 90 90 90 238 238 238 253 253 253 253 253 253
56491-253 253 253 253 253 253 253 253 253 253 253 253
56492-253 253 253 253 253 253 231 231 231 250 250 250
56493-253 253 253 253 253 253 253 253 253 253 253 253
56494-253 253 253 253 253 253 253 253 253 253 253 253
56495-253 253 253 253 253 253 253 253 253 253 253 253
56496-253 253 253 253 253 253 253 253 253 246 230 190
56497-238 204 91 238 204 91 181 142 44 37 26 9
56498- 2 2 6 2 2 6 2 2 6 2 2 6
56499- 2 2 6 2 2 6 38 38 38 46 46 46
56500- 26 26 26 106 106 106 54 54 54 18 18 18
56501- 6 6 6 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 6 6 6 14 14 14 22 22 22
56507- 30 30 30 38 38 38 50 50 50 70 70 70
56508-106 106 106 190 142 34 226 170 11 242 186 14
56509-246 190 14 246 190 14 246 190 14 154 114 10
56510- 6 6 6 74 74 74 226 226 226 253 253 253
56511-253 253 253 253 253 253 253 253 253 253 253 253
56512-253 253 253 253 253 253 231 231 231 250 250 250
56513-253 253 253 253 253 253 253 253 253 253 253 253
56514-253 253 253 253 253 253 253 253 253 253 253 253
56515-253 253 253 253 253 253 253 253 253 253 253 253
56516-253 253 253 253 253 253 253 253 253 228 184 62
56517-241 196 14 241 208 19 232 195 16 38 30 10
56518- 2 2 6 2 2 6 2 2 6 2 2 6
56519- 2 2 6 6 6 6 30 30 30 26 26 26
56520-203 166 17 154 142 90 66 66 66 26 26 26
56521- 6 6 6 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 6 6 6 18 18 18 38 38 38 58 58 58
56527- 78 78 78 86 86 86 101 101 101 123 123 123
56528-175 146 61 210 150 10 234 174 13 246 186 14
56529-246 190 14 246 190 14 246 190 14 238 190 10
56530-102 78 10 2 2 6 46 46 46 198 198 198
56531-253 253 253 253 253 253 253 253 253 253 253 253
56532-253 253 253 253 253 253 234 234 234 242 242 242
56533-253 253 253 253 253 253 253 253 253 253 253 253
56534-253 253 253 253 253 253 253 253 253 253 253 253
56535-253 253 253 253 253 253 253 253 253 253 253 253
56536-253 253 253 253 253 253 253 253 253 224 178 62
56537-242 186 14 241 196 14 210 166 10 22 18 6
56538- 2 2 6 2 2 6 2 2 6 2 2 6
56539- 2 2 6 2 2 6 6 6 6 121 92 8
56540-238 202 15 232 195 16 82 82 82 34 34 34
56541- 10 10 10 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 14 14 14 38 38 38 70 70 70 154 122 46
56547-190 142 34 200 144 11 197 138 11 197 138 11
56548-213 154 11 226 170 11 242 186 14 246 190 14
56549-246 190 14 246 190 14 246 190 14 246 190 14
56550-225 175 15 46 32 6 2 2 6 22 22 22
56551-158 158 158 250 250 250 253 253 253 253 253 253
56552-253 253 253 253 253 253 253 253 253 253 253 253
56553-253 253 253 253 253 253 253 253 253 253 253 253
56554-253 253 253 253 253 253 253 253 253 253 253 253
56555-253 253 253 253 253 253 253 253 253 253 253 253
56556-253 253 253 250 250 250 242 242 242 224 178 62
56557-239 182 13 236 186 11 213 154 11 46 32 6
56558- 2 2 6 2 2 6 2 2 6 2 2 6
56559- 2 2 6 2 2 6 61 42 6 225 175 15
56560-238 190 10 236 186 11 112 100 78 42 42 42
56561- 14 14 14 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 6 6 6
56566- 22 22 22 54 54 54 154 122 46 213 154 11
56567-226 170 11 230 174 11 226 170 11 226 170 11
56568-236 178 12 242 186 14 246 190 14 246 190 14
56569-246 190 14 246 190 14 246 190 14 246 190 14
56570-241 196 14 184 144 12 10 10 10 2 2 6
56571- 6 6 6 116 116 116 242 242 242 253 253 253
56572-253 253 253 253 253 253 253 253 253 253 253 253
56573-253 253 253 253 253 253 253 253 253 253 253 253
56574-253 253 253 253 253 253 253 253 253 253 253 253
56575-253 253 253 253 253 253 253 253 253 253 253 253
56576-253 253 253 231 231 231 198 198 198 214 170 54
56577-236 178 12 236 178 12 210 150 10 137 92 6
56578- 18 14 6 2 2 6 2 2 6 2 2 6
56579- 6 6 6 70 47 6 200 144 11 236 178 12
56580-239 182 13 239 182 13 124 112 88 58 58 58
56581- 22 22 22 6 6 6 0 0 0 0 0 0
56582- 0 0 0 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 10 10 10
56586- 30 30 30 70 70 70 180 133 36 226 170 11
56587-239 182 13 242 186 14 242 186 14 246 186 14
56588-246 190 14 246 190 14 246 190 14 246 190 14
56589-246 190 14 246 190 14 246 190 14 246 190 14
56590-246 190 14 232 195 16 98 70 6 2 2 6
56591- 2 2 6 2 2 6 66 66 66 221 221 221
56592-253 253 253 253 253 253 253 253 253 253 253 253
56593-253 253 253 253 253 253 253 253 253 253 253 253
56594-253 253 253 253 253 253 253 253 253 253 253 253
56595-253 253 253 253 253 253 253 253 253 253 253 253
56596-253 253 253 206 206 206 198 198 198 214 166 58
56597-230 174 11 230 174 11 216 158 10 192 133 9
56598-163 110 8 116 81 8 102 78 10 116 81 8
56599-167 114 7 197 138 11 226 170 11 239 182 13
56600-242 186 14 242 186 14 162 146 94 78 78 78
56601- 34 34 34 14 14 14 6 6 6 0 0 0
56602- 0 0 0 0 0 0 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 6 6 6
56606- 30 30 30 78 78 78 190 142 34 226 170 11
56607-239 182 13 246 190 14 246 190 14 246 190 14
56608-246 190 14 246 190 14 246 190 14 246 190 14
56609-246 190 14 246 190 14 246 190 14 246 190 14
56610-246 190 14 241 196 14 203 166 17 22 18 6
56611- 2 2 6 2 2 6 2 2 6 38 38 38
56612-218 218 218 253 253 253 253 253 253 253 253 253
56613-253 253 253 253 253 253 253 253 253 253 253 253
56614-253 253 253 253 253 253 253 253 253 253 253 253
56615-253 253 253 253 253 253 253 253 253 253 253 253
56616-250 250 250 206 206 206 198 198 198 202 162 69
56617-226 170 11 236 178 12 224 166 10 210 150 10
56618-200 144 11 197 138 11 192 133 9 197 138 11
56619-210 150 10 226 170 11 242 186 14 246 190 14
56620-246 190 14 246 186 14 225 175 15 124 112 88
56621- 62 62 62 30 30 30 14 14 14 6 6 6
56622- 0 0 0 0 0 0 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 10 10 10
56626- 30 30 30 78 78 78 174 135 50 224 166 10
56627-239 182 13 246 190 14 246 190 14 246 190 14
56628-246 190 14 246 190 14 246 190 14 246 190 14
56629-246 190 14 246 190 14 246 190 14 246 190 14
56630-246 190 14 246 190 14 241 196 14 139 102 15
56631- 2 2 6 2 2 6 2 2 6 2 2 6
56632- 78 78 78 250 250 250 253 253 253 253 253 253
56633-253 253 253 253 253 253 253 253 253 253 253 253
56634-253 253 253 253 253 253 253 253 253 253 253 253
56635-253 253 253 253 253 253 253 253 253 253 253 253
56636-250 250 250 214 214 214 198 198 198 190 150 46
56637-219 162 10 236 178 12 234 174 13 224 166 10
56638-216 158 10 213 154 11 213 154 11 216 158 10
56639-226 170 11 239 182 13 246 190 14 246 190 14
56640-246 190 14 246 190 14 242 186 14 206 162 42
56641-101 101 101 58 58 58 30 30 30 14 14 14
56642- 6 6 6 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 10 10 10
56646- 30 30 30 74 74 74 174 135 50 216 158 10
56647-236 178 12 246 190 14 246 190 14 246 190 14
56648-246 190 14 246 190 14 246 190 14 246 190 14
56649-246 190 14 246 190 14 246 190 14 246 190 14
56650-246 190 14 246 190 14 241 196 14 226 184 13
56651- 61 42 6 2 2 6 2 2 6 2 2 6
56652- 22 22 22 238 238 238 253 253 253 253 253 253
56653-253 253 253 253 253 253 253 253 253 253 253 253
56654-253 253 253 253 253 253 253 253 253 253 253 253
56655-253 253 253 253 253 253 253 253 253 253 253 253
56656-253 253 253 226 226 226 187 187 187 180 133 36
56657-216 158 10 236 178 12 239 182 13 236 178 12
56658-230 174 11 226 170 11 226 170 11 230 174 11
56659-236 178 12 242 186 14 246 190 14 246 190 14
56660-246 190 14 246 190 14 246 186 14 239 182 13
56661-206 162 42 106 106 106 66 66 66 34 34 34
56662- 14 14 14 6 6 6 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 6 6 6
56666- 26 26 26 70 70 70 163 133 67 213 154 11
56667-236 178 12 246 190 14 246 190 14 246 190 14
56668-246 190 14 246 190 14 246 190 14 246 190 14
56669-246 190 14 246 190 14 246 190 14 246 190 14
56670-246 190 14 246 190 14 246 190 14 241 196 14
56671-190 146 13 18 14 6 2 2 6 2 2 6
56672- 46 46 46 246 246 246 253 253 253 253 253 253
56673-253 253 253 253 253 253 253 253 253 253 253 253
56674-253 253 253 253 253 253 253 253 253 253 253 253
56675-253 253 253 253 253 253 253 253 253 253 253 253
56676-253 253 253 221 221 221 86 86 86 156 107 11
56677-216 158 10 236 178 12 242 186 14 246 186 14
56678-242 186 14 239 182 13 239 182 13 242 186 14
56679-242 186 14 246 186 14 246 190 14 246 190 14
56680-246 190 14 246 190 14 246 190 14 246 190 14
56681-242 186 14 225 175 15 142 122 72 66 66 66
56682- 30 30 30 10 10 10 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 6 6 6
56686- 26 26 26 70 70 70 163 133 67 210 150 10
56687-236 178 12 246 190 14 246 190 14 246 190 14
56688-246 190 14 246 190 14 246 190 14 246 190 14
56689-246 190 14 246 190 14 246 190 14 246 190 14
56690-246 190 14 246 190 14 246 190 14 246 190 14
56691-232 195 16 121 92 8 34 34 34 106 106 106
56692-221 221 221 253 253 253 253 253 253 253 253 253
56693-253 253 253 253 253 253 253 253 253 253 253 253
56694-253 253 253 253 253 253 253 253 253 253 253 253
56695-253 253 253 253 253 253 253 253 253 253 253 253
56696-242 242 242 82 82 82 18 14 6 163 110 8
56697-216 158 10 236 178 12 242 186 14 246 190 14
56698-246 190 14 246 190 14 246 190 14 246 190 14
56699-246 190 14 246 190 14 246 190 14 246 190 14
56700-246 190 14 246 190 14 246 190 14 246 190 14
56701-246 190 14 246 190 14 242 186 14 163 133 67
56702- 46 46 46 18 18 18 6 6 6 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 10 10 10
56706- 30 30 30 78 78 78 163 133 67 210 150 10
56707-236 178 12 246 186 14 246 190 14 246 190 14
56708-246 190 14 246 190 14 246 190 14 246 190 14
56709-246 190 14 246 190 14 246 190 14 246 190 14
56710-246 190 14 246 190 14 246 190 14 246 190 14
56711-241 196 14 215 174 15 190 178 144 253 253 253
56712-253 253 253 253 253 253 253 253 253 253 253 253
56713-253 253 253 253 253 253 253 253 253 253 253 253
56714-253 253 253 253 253 253 253 253 253 253 253 253
56715-253 253 253 253 253 253 253 253 253 218 218 218
56716- 58 58 58 2 2 6 22 18 6 167 114 7
56717-216 158 10 236 178 12 246 186 14 246 190 14
56718-246 190 14 246 190 14 246 190 14 246 190 14
56719-246 190 14 246 190 14 246 190 14 246 190 14
56720-246 190 14 246 190 14 246 190 14 246 190 14
56721-246 190 14 246 186 14 242 186 14 190 150 46
56722- 54 54 54 22 22 22 6 6 6 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 14 14 14
56726- 38 38 38 86 86 86 180 133 36 213 154 11
56727-236 178 12 246 186 14 246 190 14 246 190 14
56728-246 190 14 246 190 14 246 190 14 246 190 14
56729-246 190 14 246 190 14 246 190 14 246 190 14
56730-246 190 14 246 190 14 246 190 14 246 190 14
56731-246 190 14 232 195 16 190 146 13 214 214 214
56732-253 253 253 253 253 253 253 253 253 253 253 253
56733-253 253 253 253 253 253 253 253 253 253 253 253
56734-253 253 253 253 253 253 253 253 253 253 253 253
56735-253 253 253 250 250 250 170 170 170 26 26 26
56736- 2 2 6 2 2 6 37 26 9 163 110 8
56737-219 162 10 239 182 13 246 186 14 246 190 14
56738-246 190 14 246 190 14 246 190 14 246 190 14
56739-246 190 14 246 190 14 246 190 14 246 190 14
56740-246 190 14 246 190 14 246 190 14 246 190 14
56741-246 186 14 236 178 12 224 166 10 142 122 72
56742- 46 46 46 18 18 18 6 6 6 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 0 0 0 6 6 6 18 18 18
56746- 50 50 50 109 106 95 192 133 9 224 166 10
56747-242 186 14 246 190 14 246 190 14 246 190 14
56748-246 190 14 246 190 14 246 190 14 246 190 14
56749-246 190 14 246 190 14 246 190 14 246 190 14
56750-246 190 14 246 190 14 246 190 14 246 190 14
56751-242 186 14 226 184 13 210 162 10 142 110 46
56752-226 226 226 253 253 253 253 253 253 253 253 253
56753-253 253 253 253 253 253 253 253 253 253 253 253
56754-253 253 253 253 253 253 253 253 253 253 253 253
56755-198 198 198 66 66 66 2 2 6 2 2 6
56756- 2 2 6 2 2 6 50 34 6 156 107 11
56757-219 162 10 239 182 13 246 186 14 246 190 14
56758-246 190 14 246 190 14 246 190 14 246 190 14
56759-246 190 14 246 190 14 246 190 14 246 190 14
56760-246 190 14 246 190 14 246 190 14 242 186 14
56761-234 174 13 213 154 11 154 122 46 66 66 66
56762- 30 30 30 10 10 10 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 0 0 0 0 0 0 6 6 6 22 22 22
56766- 58 58 58 154 121 60 206 145 10 234 174 13
56767-242 186 14 246 186 14 246 190 14 246 190 14
56768-246 190 14 246 190 14 246 190 14 246 190 14
56769-246 190 14 246 190 14 246 190 14 246 190 14
56770-246 190 14 246 190 14 246 190 14 246 190 14
56771-246 186 14 236 178 12 210 162 10 163 110 8
56772- 61 42 6 138 138 138 218 218 218 250 250 250
56773-253 253 253 253 253 253 253 253 253 250 250 250
56774-242 242 242 210 210 210 144 144 144 66 66 66
56775- 6 6 6 2 2 6 2 2 6 2 2 6
56776- 2 2 6 2 2 6 61 42 6 163 110 8
56777-216 158 10 236 178 12 246 190 14 246 190 14
56778-246 190 14 246 190 14 246 190 14 246 190 14
56779-246 190 14 246 190 14 246 190 14 246 190 14
56780-246 190 14 239 182 13 230 174 11 216 158 10
56781-190 142 34 124 112 88 70 70 70 38 38 38
56782- 18 18 18 6 6 6 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 0 0 0 0 0 0 6 6 6 22 22 22
56786- 62 62 62 168 124 44 206 145 10 224 166 10
56787-236 178 12 239 182 13 242 186 14 242 186 14
56788-246 186 14 246 190 14 246 190 14 246 190 14
56789-246 190 14 246 190 14 246 190 14 246 190 14
56790-246 190 14 246 190 14 246 190 14 246 190 14
56791-246 190 14 236 178 12 216 158 10 175 118 6
56792- 80 54 7 2 2 6 6 6 6 30 30 30
56793- 54 54 54 62 62 62 50 50 50 38 38 38
56794- 14 14 14 2 2 6 2 2 6 2 2 6
56795- 2 2 6 2 2 6 2 2 6 2 2 6
56796- 2 2 6 6 6 6 80 54 7 167 114 7
56797-213 154 11 236 178 12 246 190 14 246 190 14
56798-246 190 14 246 190 14 246 190 14 246 190 14
56799-246 190 14 242 186 14 239 182 13 239 182 13
56800-230 174 11 210 150 10 174 135 50 124 112 88
56801- 82 82 82 54 54 54 34 34 34 18 18 18
56802- 6 6 6 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 0 0 0
56805- 0 0 0 0 0 0 6 6 6 18 18 18
56806- 50 50 50 158 118 36 192 133 9 200 144 11
56807-216 158 10 219 162 10 224 166 10 226 170 11
56808-230 174 11 236 178 12 239 182 13 239 182 13
56809-242 186 14 246 186 14 246 190 14 246 190 14
56810-246 190 14 246 190 14 246 190 14 246 190 14
56811-246 186 14 230 174 11 210 150 10 163 110 8
56812-104 69 6 10 10 10 2 2 6 2 2 6
56813- 2 2 6 2 2 6 2 2 6 2 2 6
56814- 2 2 6 2 2 6 2 2 6 2 2 6
56815- 2 2 6 2 2 6 2 2 6 2 2 6
56816- 2 2 6 6 6 6 91 60 6 167 114 7
56817-206 145 10 230 174 11 242 186 14 246 190 14
56818-246 190 14 246 190 14 246 186 14 242 186 14
56819-239 182 13 230 174 11 224 166 10 213 154 11
56820-180 133 36 124 112 88 86 86 86 58 58 58
56821- 38 38 38 22 22 22 10 10 10 6 6 6
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 0 0 0
56825- 0 0 0 0 0 0 0 0 0 14 14 14
56826- 34 34 34 70 70 70 138 110 50 158 118 36
56827-167 114 7 180 123 7 192 133 9 197 138 11
56828-200 144 11 206 145 10 213 154 11 219 162 10
56829-224 166 10 230 174 11 239 182 13 242 186 14
56830-246 186 14 246 186 14 246 186 14 246 186 14
56831-239 182 13 216 158 10 185 133 11 152 99 6
56832-104 69 6 18 14 6 2 2 6 2 2 6
56833- 2 2 6 2 2 6 2 2 6 2 2 6
56834- 2 2 6 2 2 6 2 2 6 2 2 6
56835- 2 2 6 2 2 6 2 2 6 2 2 6
56836- 2 2 6 6 6 6 80 54 7 152 99 6
56837-192 133 9 219 162 10 236 178 12 239 182 13
56838-246 186 14 242 186 14 239 182 13 236 178 12
56839-224 166 10 206 145 10 192 133 9 154 121 60
56840- 94 94 94 62 62 62 42 42 42 22 22 22
56841- 14 14 14 6 6 6 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 0 0 0
56845- 0 0 0 0 0 0 0 0 0 6 6 6
56846- 18 18 18 34 34 34 58 58 58 78 78 78
56847-101 98 89 124 112 88 142 110 46 156 107 11
56848-163 110 8 167 114 7 175 118 6 180 123 7
56849-185 133 11 197 138 11 210 150 10 219 162 10
56850-226 170 11 236 178 12 236 178 12 234 174 13
56851-219 162 10 197 138 11 163 110 8 130 83 6
56852- 91 60 6 10 10 10 2 2 6 2 2 6
56853- 18 18 18 38 38 38 38 38 38 38 38 38
56854- 38 38 38 38 38 38 38 38 38 38 38 38
56855- 38 38 38 38 38 38 26 26 26 2 2 6
56856- 2 2 6 6 6 6 70 47 6 137 92 6
56857-175 118 6 200 144 11 219 162 10 230 174 11
56858-234 174 13 230 174 11 219 162 10 210 150 10
56859-192 133 9 163 110 8 124 112 88 82 82 82
56860- 50 50 50 30 30 30 14 14 14 6 6 6
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 0 0 0
56865- 0 0 0 0 0 0 0 0 0 0 0 0
56866- 6 6 6 14 14 14 22 22 22 34 34 34
56867- 42 42 42 58 58 58 74 74 74 86 86 86
56868-101 98 89 122 102 70 130 98 46 121 87 25
56869-137 92 6 152 99 6 163 110 8 180 123 7
56870-185 133 11 197 138 11 206 145 10 200 144 11
56871-180 123 7 156 107 11 130 83 6 104 69 6
56872- 50 34 6 54 54 54 110 110 110 101 98 89
56873- 86 86 86 82 82 82 78 78 78 78 78 78
56874- 78 78 78 78 78 78 78 78 78 78 78 78
56875- 78 78 78 82 82 82 86 86 86 94 94 94
56876-106 106 106 101 101 101 86 66 34 124 80 6
56877-156 107 11 180 123 7 192 133 9 200 144 11
56878-206 145 10 200 144 11 192 133 9 175 118 6
56879-139 102 15 109 106 95 70 70 70 42 42 42
56880- 22 22 22 10 10 10 0 0 0 0 0 0
56881- 0 0 0 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 0 0 0
56885- 0 0 0 0 0 0 0 0 0 0 0 0
56886- 0 0 0 0 0 0 6 6 6 10 10 10
56887- 14 14 14 22 22 22 30 30 30 38 38 38
56888- 50 50 50 62 62 62 74 74 74 90 90 90
56889-101 98 89 112 100 78 121 87 25 124 80 6
56890-137 92 6 152 99 6 152 99 6 152 99 6
56891-138 86 6 124 80 6 98 70 6 86 66 30
56892-101 98 89 82 82 82 58 58 58 46 46 46
56893- 38 38 38 34 34 34 34 34 34 34 34 34
56894- 34 34 34 34 34 34 34 34 34 34 34 34
56895- 34 34 34 34 34 34 38 38 38 42 42 42
56896- 54 54 54 82 82 82 94 86 76 91 60 6
56897-134 86 6 156 107 11 167 114 7 175 118 6
56898-175 118 6 167 114 7 152 99 6 121 87 25
56899-101 98 89 62 62 62 34 34 34 18 18 18
56900- 6 6 6 0 0 0 0 0 0 0 0 0
56901- 0 0 0 0 0 0 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 0 0 0
56905- 0 0 0 0 0 0 0 0 0 0 0 0
56906- 0 0 0 0 0 0 0 0 0 0 0 0
56907- 0 0 0 6 6 6 6 6 6 10 10 10
56908- 18 18 18 22 22 22 30 30 30 42 42 42
56909- 50 50 50 66 66 66 86 86 86 101 98 89
56910-106 86 58 98 70 6 104 69 6 104 69 6
56911-104 69 6 91 60 6 82 62 34 90 90 90
56912- 62 62 62 38 38 38 22 22 22 14 14 14
56913- 10 10 10 10 10 10 10 10 10 10 10 10
56914- 10 10 10 10 10 10 6 6 6 10 10 10
56915- 10 10 10 10 10 10 10 10 10 14 14 14
56916- 22 22 22 42 42 42 70 70 70 89 81 66
56917- 80 54 7 104 69 6 124 80 6 137 92 6
56918-134 86 6 116 81 8 100 82 52 86 86 86
56919- 58 58 58 30 30 30 14 14 14 6 6 6
56920- 0 0 0 0 0 0 0 0 0 0 0 0
56921- 0 0 0 0 0 0 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 0 0 0
56925- 0 0 0 0 0 0 0 0 0 0 0 0
56926- 0 0 0 0 0 0 0 0 0 0 0 0
56927- 0 0 0 0 0 0 0 0 0 0 0 0
56928- 0 0 0 6 6 6 10 10 10 14 14 14
56929- 18 18 18 26 26 26 38 38 38 54 54 54
56930- 70 70 70 86 86 86 94 86 76 89 81 66
56931- 89 81 66 86 86 86 74 74 74 50 50 50
56932- 30 30 30 14 14 14 6 6 6 0 0 0
56933- 0 0 0 0 0 0 0 0 0 0 0 0
56934- 0 0 0 0 0 0 0 0 0 0 0 0
56935- 0 0 0 0 0 0 0 0 0 0 0 0
56936- 6 6 6 18 18 18 34 34 34 58 58 58
56937- 82 82 82 89 81 66 89 81 66 89 81 66
56938- 94 86 66 94 86 76 74 74 74 50 50 50
56939- 26 26 26 14 14 14 6 6 6 0 0 0
56940- 0 0 0 0 0 0 0 0 0 0 0 0
56941- 0 0 0 0 0 0 0 0 0 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 0 0 0
56945- 0 0 0 0 0 0 0 0 0 0 0 0
56946- 0 0 0 0 0 0 0 0 0 0 0 0
56947- 0 0 0 0 0 0 0 0 0 0 0 0
56948- 0 0 0 0 0 0 0 0 0 0 0 0
56949- 6 6 6 6 6 6 14 14 14 18 18 18
56950- 30 30 30 38 38 38 46 46 46 54 54 54
56951- 50 50 50 42 42 42 30 30 30 18 18 18
56952- 10 10 10 0 0 0 0 0 0 0 0 0
56953- 0 0 0 0 0 0 0 0 0 0 0 0
56954- 0 0 0 0 0 0 0 0 0 0 0 0
56955- 0 0 0 0 0 0 0 0 0 0 0 0
56956- 0 0 0 6 6 6 14 14 14 26 26 26
56957- 38 38 38 50 50 50 58 58 58 58 58 58
56958- 54 54 54 42 42 42 30 30 30 18 18 18
56959- 10 10 10 0 0 0 0 0 0 0 0 0
56960- 0 0 0 0 0 0 0 0 0 0 0 0
56961- 0 0 0 0 0 0 0 0 0 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 0 0 0
56965- 0 0 0 0 0 0 0 0 0 0 0 0
56966- 0 0 0 0 0 0 0 0 0 0 0 0
56967- 0 0 0 0 0 0 0 0 0 0 0 0
56968- 0 0 0 0 0 0 0 0 0 0 0 0
56969- 0 0 0 0 0 0 0 0 0 6 6 6
56970- 6 6 6 10 10 10 14 14 14 18 18 18
56971- 18 18 18 14 14 14 10 10 10 6 6 6
56972- 0 0 0 0 0 0 0 0 0 0 0 0
56973- 0 0 0 0 0 0 0 0 0 0 0 0
56974- 0 0 0 0 0 0 0 0 0 0 0 0
56975- 0 0 0 0 0 0 0 0 0 0 0 0
56976- 0 0 0 0 0 0 0 0 0 6 6 6
56977- 14 14 14 18 18 18 22 22 22 22 22 22
56978- 18 18 18 14 14 14 10 10 10 6 6 6
56979- 0 0 0 0 0 0 0 0 0 0 0 0
56980- 0 0 0 0 0 0 0 0 0 0 0 0
56981- 0 0 0 0 0 0 0 0 0 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56997+4 4 4 4 4 4
56998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
56999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57011+4 4 4 4 4 4
57012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57025+4 4 4 4 4 4
57026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57039+4 4 4 4 4 4
57040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57053+4 4 4 4 4 4
57054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57067+4 4 4 4 4 4
57068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57072+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57073+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57077+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57078+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57079+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57081+4 4 4 4 4 4
57082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57086+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57087+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57088+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57091+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57092+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57093+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57094+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57095+4 4 4 4 4 4
57096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57100+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57101+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57102+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57105+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57106+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57107+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57108+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57109+4 4 4 4 4 4
57110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57113+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57114+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57115+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57116+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57118+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57119+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57120+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57121+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57122+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57123+4 4 4 4 4 4
57124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57127+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57128+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57129+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57130+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57131+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57132+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57133+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57134+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57135+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57136+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57137+4 4 4 4 4 4
57138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57141+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57142+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57143+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57144+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57145+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57146+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57147+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57148+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57149+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57150+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57151+4 4 4 4 4 4
57152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57154+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57155+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57156+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57157+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57158+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57159+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57160+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57161+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57162+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57163+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57164+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57165+4 4 4 4 4 4
57166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57168+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57169+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57170+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57171+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57172+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57173+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57174+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57175+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57176+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57177+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57178+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57179+4 4 4 4 4 4
57180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57182+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57183+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57184+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57185+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57186+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57187+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57188+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57189+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57190+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57191+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57192+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57193+4 4 4 4 4 4
57194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57196+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57197+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57198+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57199+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57200+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57201+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57202+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57203+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57204+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57205+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57206+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57207+4 4 4 4 4 4
57208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57209+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57210+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57211+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57212+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57213+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57214+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57215+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57216+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57217+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57218+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57219+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57220+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57221+4 4 4 4 4 4
57222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57223+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57224+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57225+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57226+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57227+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57228+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57229+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57230+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57231+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57232+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57233+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57234+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57235+0 0 0 4 4 4
57236+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57237+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57238+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57239+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57240+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57241+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57242+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57243+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57244+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57245+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57246+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57247+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57248+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57249+2 0 0 0 0 0
57250+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57251+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57252+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57253+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57254+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57255+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57256+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57257+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57258+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57259+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57260+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57261+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57262+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57263+37 38 37 0 0 0
57264+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57265+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57266+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57267+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57268+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57269+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57270+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57271+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57272+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57273+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57274+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57275+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57276+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57277+85 115 134 4 0 0
57278+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57279+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57280+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57281+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57282+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57283+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57284+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57285+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57286+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57287+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57288+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57289+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57290+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57291+60 73 81 4 0 0
57292+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57293+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57294+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57295+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57296+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57297+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57298+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57299+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57300+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57301+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57302+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57303+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57304+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57305+16 19 21 4 0 0
57306+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57307+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57308+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57309+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57310+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57311+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57312+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57313+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57314+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57315+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57316+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57317+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57318+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57319+4 0 0 4 3 3
57320+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57321+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57322+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57324+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57325+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57326+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57327+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57328+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57329+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57330+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57331+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57332+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57333+3 2 2 4 4 4
57334+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57335+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57336+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57337+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57338+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57339+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57340+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57341+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57342+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57343+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57344+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57345+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57346+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57347+4 4 4 4 4 4
57348+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57349+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57350+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57351+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57352+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57353+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57354+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57355+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57356+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57357+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57358+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57359+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57360+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57361+4 4 4 4 4 4
57362+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57363+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57364+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57365+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57366+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57367+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57368+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57369+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57370+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57371+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57372+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57373+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57374+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57375+5 5 5 5 5 5
57376+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57377+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57378+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57379+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57380+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57381+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57382+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57383+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57384+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57385+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57386+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57387+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57388+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57389+5 5 5 4 4 4
57390+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57391+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57392+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57393+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57394+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57395+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57396+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57397+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57398+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57399+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57400+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57401+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57403+4 4 4 4 4 4
57404+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57405+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57406+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57407+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57408+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57409+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57410+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57411+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57412+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57413+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57414+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57415+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57417+4 4 4 4 4 4
57418+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57419+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57420+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57421+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57422+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57423+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57424+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57425+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57426+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57427+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57428+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57431+4 4 4 4 4 4
57432+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57433+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57434+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57435+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57436+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57437+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57438+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57439+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57440+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57441+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57442+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57445+4 4 4 4 4 4
57446+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57447+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57448+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57449+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57450+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57451+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57452+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57453+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57454+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57455+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57456+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57459+4 4 4 4 4 4
57460+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57461+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57462+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57463+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57464+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57465+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57466+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57467+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57468+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57469+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57470+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57473+4 4 4 4 4 4
57474+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57475+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57476+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57477+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57478+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57479+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57480+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57481+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57482+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57483+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57484+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57487+4 4 4 4 4 4
57488+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57489+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57490+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57491+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57492+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57493+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57494+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57495+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57496+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57497+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57498+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57501+4 4 4 4 4 4
57502+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57503+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57504+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57505+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57506+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57507+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57508+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57509+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57510+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57511+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57512+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57515+4 4 4 4 4 4
57516+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57517+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57518+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57519+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57520+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57521+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57522+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57523+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57524+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57525+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57526+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57529+4 4 4 4 4 4
57530+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57531+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57532+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57533+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57534+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57535+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57536+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57537+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57538+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57539+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57540+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57543+4 4 4 4 4 4
57544+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57545+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57546+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57547+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57548+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57549+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57550+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57551+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57552+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57553+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57554+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57557+4 4 4 4 4 4
57558+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57559+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57560+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57561+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57562+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57563+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57564+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57565+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57566+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57567+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57568+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57571+4 4 4 4 4 4
57572+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57573+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57574+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57575+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57576+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57577+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57578+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57579+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57580+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57581+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57582+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57585+4 4 4 4 4 4
57586+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57587+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57588+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57589+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57590+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57591+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57592+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57593+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57594+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57595+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57596+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57599+4 4 4 4 4 4
57600+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57601+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57602+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57603+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57604+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57605+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57606+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57607+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57608+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57609+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57610+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57613+4 4 4 4 4 4
57614+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57615+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57616+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57617+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57618+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57619+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57620+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57621+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57622+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57623+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57624+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57627+4 4 4 4 4 4
57628+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57629+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57630+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57631+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57632+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57633+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57634+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57635+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57636+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57637+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57638+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57641+4 4 4 4 4 4
57642+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57643+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57644+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57645+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57646+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57647+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57648+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57649+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57650+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57651+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57652+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57655+4 4 4 4 4 4
57656+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57657+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57658+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57659+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57660+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57661+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57662+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57663+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57664+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57665+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57666+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57669+4 4 4 4 4 4
57670+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57671+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57672+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57673+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57674+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57675+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57676+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57677+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57678+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57679+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57680+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57683+4 4 4 4 4 4
57684+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57685+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57686+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57687+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57688+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57689+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57690+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57691+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57692+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57693+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57694+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57697+4 4 4 4 4 4
57698+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57699+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57700+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57701+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57702+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57703+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57704+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57705+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57706+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57707+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57708+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57711+4 4 4 4 4 4
57712+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57713+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57714+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57715+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57716+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57717+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57718+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57719+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57720+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57721+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57722+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57725+4 4 4 4 4 4
57726+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57727+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57728+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57729+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57730+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57731+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57732+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57733+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57734+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57735+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57736+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57739+4 4 4 4 4 4
57740+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57741+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57742+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57743+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57744+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57745+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57746+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57747+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57748+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57749+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57750+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57753+4 4 4 4 4 4
57754+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57755+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57756+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57757+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57758+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57759+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57760+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
57761+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
57762+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
57763+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
57764+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57767+4 4 4 4 4 4
57768+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
57769+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57770+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
57771+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
57772+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
57773+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
57774+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57775+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
57776+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
57777+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
57778+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57781+4 4 4 4 4 4
57782+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
57783+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
57784+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
57785+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
57786+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
57787+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
57788+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
57789+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
57790+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
57791+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
57792+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57795+4 4 4 4 4 4
57796+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
57797+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
57798+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57799+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
57800+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
57801+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
57802+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
57803+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
57804+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
57805+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
57806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57809+4 4 4 4 4 4
57810+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57811+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
57812+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
57813+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
57814+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
57815+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
57816+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
57817+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
57818+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
57819+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57823+4 4 4 4 4 4
57824+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
57825+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
57826+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
57827+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
57828+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
57829+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
57830+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
57831+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
57832+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
57833+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57837+4 4 4 4 4 4
57838+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
57839+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
57840+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
57841+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
57842+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
57843+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
57844+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
57845+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
57846+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57847+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57851+4 4 4 4 4 4
57852+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
57853+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
57854+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
57855+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
57856+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
57857+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
57858+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
57859+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
57860+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
57861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57865+4 4 4 4 4 4
57866+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
57867+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
57868+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
57869+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
57870+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
57871+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
57872+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
57873+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
57874+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
57875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57879+4 4 4 4 4 4
57880+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
57881+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
57882+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
57883+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
57884+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
57885+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
57886+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
57887+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
57888+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57893+4 4 4 4 4 4
57894+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
57895+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
57896+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57897+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
57898+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
57899+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
57900+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
57901+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
57902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57905+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57907+4 4 4 4 4 4
57908+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
57909+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
57910+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
57911+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
57912+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
57913+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
57914+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
57915+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
57916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57918+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57919+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57921+4 4 4 4 4 4
57922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57923+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
57924+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57925+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
57926+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
57927+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
57928+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
57929+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57931+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57932+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57933+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57935+4 4 4 4 4 4
57936+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57937+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
57938+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
57939+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
57940+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
57941+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
57942+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
57943+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
57944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57945+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57946+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57947+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57949+4 4 4 4 4 4
57950+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57951+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57952+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
57953+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
57954+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
57955+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
57956+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
57957+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57960+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57961+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4
57964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57966+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57967+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
57968+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
57969+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
57970+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
57971+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4
57978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57981+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
57982+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
57983+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
57984+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57991+4 4 4 4 4 4
57992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57995+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
57996+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
57997+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
57998+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
57999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+4 4 4 4 4 4
58006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58009+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58010+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58011+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58012+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 4 4 4 4
58020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58023+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58024+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58025+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58026+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58033+4 4 4 4 4 4
58034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58038+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58039+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58040+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58047+4 4 4 4 4 4
58048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58052+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58053+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58054+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58061+4 4 4 4 4 4
58062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58066+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58067+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58068+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58075+4 4 4 4 4 4
58076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58080+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58081+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4
58090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58094+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58095+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4
58104diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58105index fef20db..d28b1ab 100644
58106--- a/drivers/xen/xenfs/xenstored.c
58107+++ b/drivers/xen/xenfs/xenstored.c
58108@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58109 static int xsd_kva_open(struct inode *inode, struct file *file)
58110 {
58111 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58112+#ifdef CONFIG_GRKERNSEC_HIDESYM
58113+ NULL);
58114+#else
58115 xen_store_interface);
58116+#endif
58117+
58118 if (!file->private_data)
58119 return -ENOMEM;
58120 return 0;
58121diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58122index eb14e05..5156de7 100644
58123--- a/fs/9p/vfs_addr.c
58124+++ b/fs/9p/vfs_addr.c
58125@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58126
58127 retval = v9fs_file_write_internal(inode,
58128 v9inode->writeback_fid,
58129- (__force const char __user *)buffer,
58130+ (const char __force_user *)buffer,
58131 len, &offset, 0);
58132 if (retval > 0)
58133 retval = 0;
58134diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58135index 9ee5343..5165e3c 100644
58136--- a/fs/9p/vfs_inode.c
58137+++ b/fs/9p/vfs_inode.c
58138@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58139 void
58140 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58141 {
58142- char *s = nd_get_link(nd);
58143+ const char *s = nd_get_link(nd);
58144
58145 p9_debug(P9_DEBUG_VFS, " %pd %s\n",
58146 dentry, IS_ERR(s) ? "<error>" : s);
58147diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58148index c055d56e..a46f4f5 100644
58149--- a/fs/Kconfig.binfmt
58150+++ b/fs/Kconfig.binfmt
58151@@ -106,7 +106,7 @@ config HAVE_AOUT
58152
58153 config BINFMT_AOUT
58154 tristate "Kernel support for a.out and ECOFF binaries"
58155- depends on HAVE_AOUT
58156+ depends on HAVE_AOUT && BROKEN
58157 ---help---
58158 A.out (Assembler.OUTput) is a set of formats for libraries and
58159 executables used in the earliest versions of UNIX. Linux used
58160diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58161index 8a1d38e..300a14e 100644
58162--- a/fs/afs/inode.c
58163+++ b/fs/afs/inode.c
58164@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58165 struct afs_vnode *vnode;
58166 struct super_block *sb;
58167 struct inode *inode;
58168- static atomic_t afs_autocell_ino;
58169+ static atomic_unchecked_t afs_autocell_ino;
58170
58171 _enter("{%x:%u},%*.*s,",
58172 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58173@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58174 data.fid.unique = 0;
58175 data.fid.vnode = 0;
58176
58177- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58178+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58179 afs_iget5_autocell_test, afs_iget5_set,
58180 &data);
58181 if (!inode) {
58182diff --git a/fs/aio.c b/fs/aio.c
58183index ebd0e9b..c577c91 100644
58184--- a/fs/aio.c
58185+++ b/fs/aio.c
58186@@ -413,7 +413,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58187 size += sizeof(struct io_event) * nr_events;
58188
58189 nr_pages = PFN_UP(size);
58190- if (nr_pages < 0)
58191+ if (nr_pages <= 0)
58192 return -EINVAL;
58193
58194 file = aio_private_file(ctx, nr_pages);
58195diff --git a/fs/attr.c b/fs/attr.c
58196index 6530ced..4a827e2 100644
58197--- a/fs/attr.c
58198+++ b/fs/attr.c
58199@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58200 unsigned long limit;
58201
58202 limit = rlimit(RLIMIT_FSIZE);
58203+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58204 if (limit != RLIM_INFINITY && offset > limit)
58205 goto out_sig;
58206 if (offset > inode->i_sb->s_maxbytes)
58207diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58208index 116fd38..c04182da 100644
58209--- a/fs/autofs4/waitq.c
58210+++ b/fs/autofs4/waitq.c
58211@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58212 {
58213 unsigned long sigpipe, flags;
58214 mm_segment_t fs;
58215- const char *data = (const char *)addr;
58216+ const char __user *data = (const char __force_user *)addr;
58217 ssize_t wr = 0;
58218
58219 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58220@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58221 return 1;
58222 }
58223
58224+#ifdef CONFIG_GRKERNSEC_HIDESYM
58225+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58226+#endif
58227+
58228 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58229 enum autofs_notify notify)
58230 {
58231@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58232
58233 /* If this is a direct mount request create a dummy name */
58234 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58235+#ifdef CONFIG_GRKERNSEC_HIDESYM
58236+ /* this name does get written to userland via autofs4_write() */
58237+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58238+#else
58239 qstr.len = sprintf(name, "%p", dentry);
58240+#endif
58241 else {
58242 qstr.len = autofs4_getpath(sbi, dentry, &name);
58243 if (!qstr.len) {
58244diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58245index 2722387..56059b5 100644
58246--- a/fs/befs/endian.h
58247+++ b/fs/befs/endian.h
58248@@ -11,7 +11,7 @@
58249
58250 #include <asm/byteorder.h>
58251
58252-static inline u64
58253+static inline u64 __intentional_overflow(-1)
58254 fs64_to_cpu(const struct super_block *sb, fs64 n)
58255 {
58256 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58257@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58258 return (__force fs64)cpu_to_be64(n);
58259 }
58260
58261-static inline u32
58262+static inline u32 __intentional_overflow(-1)
58263 fs32_to_cpu(const struct super_block *sb, fs32 n)
58264 {
58265 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58266@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58267 return (__force fs32)cpu_to_be32(n);
58268 }
58269
58270-static inline u16
58271+static inline u16 __intentional_overflow(-1)
58272 fs16_to_cpu(const struct super_block *sb, fs16 n)
58273 {
58274 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58275diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58276index 4c55668..eeae150 100644
58277--- a/fs/binfmt_aout.c
58278+++ b/fs/binfmt_aout.c
58279@@ -16,6 +16,7 @@
58280 #include <linux/string.h>
58281 #include <linux/fs.h>
58282 #include <linux/file.h>
58283+#include <linux/security.h>
58284 #include <linux/stat.h>
58285 #include <linux/fcntl.h>
58286 #include <linux/ptrace.h>
58287@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58288 #endif
58289 # define START_STACK(u) ((void __user *)u.start_stack)
58290
58291+ memset(&dump, 0, sizeof(dump));
58292+
58293 fs = get_fs();
58294 set_fs(KERNEL_DS);
58295 has_dumped = 1;
58296@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58297
58298 /* If the size of the dump file exceeds the rlimit, then see what would happen
58299 if we wrote the stack, but not the data area. */
58300+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58301 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58302 dump.u_dsize = 0;
58303
58304 /* Make sure we have enough room to write the stack and data areas. */
58305+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58306 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58307 dump.u_ssize = 0;
58308
58309@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58310 rlim = rlimit(RLIMIT_DATA);
58311 if (rlim >= RLIM_INFINITY)
58312 rlim = ~0;
58313+
58314+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58315 if (ex.a_data + ex.a_bss > rlim)
58316 return -ENOMEM;
58317
58318@@ -261,6 +268,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58319
58320 install_exec_creds(bprm);
58321
58322+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58323+ current->mm->pax_flags = 0UL;
58324+#endif
58325+
58326+#ifdef CONFIG_PAX_PAGEEXEC
58327+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58328+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58329+
58330+#ifdef CONFIG_PAX_EMUTRAMP
58331+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58332+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58333+#endif
58334+
58335+#ifdef CONFIG_PAX_MPROTECT
58336+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58337+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58338+#endif
58339+
58340+ }
58341+#endif
58342+
58343 if (N_MAGIC(ex) == OMAGIC) {
58344 unsigned long text_addr, map_size;
58345 loff_t pos;
58346@@ -312,7 +340,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58347 return error;
58348
58349 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58350- PROT_READ | PROT_WRITE | PROT_EXEC,
58351+ PROT_READ | PROT_WRITE,
58352 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58353 fd_offset + ex.a_text);
58354 if (error != N_DATADDR(ex))
58355diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58356index 995986b..dcc4ef2 100644
58357--- a/fs/binfmt_elf.c
58358+++ b/fs/binfmt_elf.c
58359@@ -34,6 +34,7 @@
58360 #include <linux/utsname.h>
58361 #include <linux/coredump.h>
58362 #include <linux/sched.h>
58363+#include <linux/xattr.h>
58364 #include <asm/uaccess.h>
58365 #include <asm/param.h>
58366 #include <asm/page.h>
58367@@ -47,7 +48,7 @@
58368
58369 static int load_elf_binary(struct linux_binprm *bprm);
58370 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58371- int, int, unsigned long);
58372+ int, int, unsigned long) __intentional_overflow(-1);
58373
58374 #ifdef CONFIG_USELIB
58375 static int load_elf_library(struct file *);
58376@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58377 #define elf_core_dump NULL
58378 #endif
58379
58380+#ifdef CONFIG_PAX_MPROTECT
58381+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58382+#endif
58383+
58384+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58385+static void elf_handle_mmap(struct file *file);
58386+#endif
58387+
58388 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58389 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58390 #else
58391@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58392 .load_binary = load_elf_binary,
58393 .load_shlib = load_elf_library,
58394 .core_dump = elf_core_dump,
58395+
58396+#ifdef CONFIG_PAX_MPROTECT
58397+ .handle_mprotect= elf_handle_mprotect,
58398+#endif
58399+
58400+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58401+ .handle_mmap = elf_handle_mmap,
58402+#endif
58403+
58404 .min_coredump = ELF_EXEC_PAGESIZE,
58405 };
58406
58407@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58408
58409 static int set_brk(unsigned long start, unsigned long end)
58410 {
58411+ unsigned long e = end;
58412+
58413 start = ELF_PAGEALIGN(start);
58414 end = ELF_PAGEALIGN(end);
58415 if (end > start) {
58416@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58417 if (BAD_ADDR(addr))
58418 return addr;
58419 }
58420- current->mm->start_brk = current->mm->brk = end;
58421+ current->mm->start_brk = current->mm->brk = e;
58422 return 0;
58423 }
58424
58425@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58426 elf_addr_t __user *u_rand_bytes;
58427 const char *k_platform = ELF_PLATFORM;
58428 const char *k_base_platform = ELF_BASE_PLATFORM;
58429- unsigned char k_rand_bytes[16];
58430+ u32 k_rand_bytes[4];
58431 int items;
58432 elf_addr_t *elf_info;
58433 int ei_index = 0;
58434 const struct cred *cred = current_cred();
58435 struct vm_area_struct *vma;
58436+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58437
58438 /*
58439 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58440@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58441 * Generate 16 random bytes for userspace PRNG seeding.
58442 */
58443 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58444- u_rand_bytes = (elf_addr_t __user *)
58445- STACK_ALLOC(p, sizeof(k_rand_bytes));
58446+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58447+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58448+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58449+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58450+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58451+ u_rand_bytes = (elf_addr_t __user *) p;
58452 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58453 return -EFAULT;
58454
58455@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58456 return -EFAULT;
58457 current->mm->env_end = p;
58458
58459+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58460+
58461 /* Put the elf_info on the stack in the right place. */
58462 sp = (elf_addr_t __user *)envp + 1;
58463- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58464+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58465 return -EFAULT;
58466 return 0;
58467 }
58468@@ -514,14 +541,14 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
58469 an ELF header */
58470
58471 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58472- struct file *interpreter, unsigned long *interp_map_addr,
58473+ struct file *interpreter,
58474 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
58475 {
58476 struct elf_phdr *eppnt;
58477- unsigned long load_addr = 0;
58478+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58479 int load_addr_set = 0;
58480 unsigned long last_bss = 0, elf_bss = 0;
58481- unsigned long error = ~0UL;
58482+ unsigned long error = -EINVAL;
58483 unsigned long total_size;
58484 int i;
58485
58486@@ -541,6 +568,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58487 goto out;
58488 }
58489
58490+#ifdef CONFIG_PAX_SEGMEXEC
58491+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58492+ pax_task_size = SEGMEXEC_TASK_SIZE;
58493+#endif
58494+
58495 eppnt = interp_elf_phdata;
58496 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58497 if (eppnt->p_type == PT_LOAD) {
58498@@ -564,8 +596,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58499 map_addr = elf_map(interpreter, load_addr + vaddr,
58500 eppnt, elf_prot, elf_type, total_size);
58501 total_size = 0;
58502- if (!*interp_map_addr)
58503- *interp_map_addr = map_addr;
58504 error = map_addr;
58505 if (BAD_ADDR(map_addr))
58506 goto out;
58507@@ -584,8 +614,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58508 k = load_addr + eppnt->p_vaddr;
58509 if (BAD_ADDR(k) ||
58510 eppnt->p_filesz > eppnt->p_memsz ||
58511- eppnt->p_memsz > TASK_SIZE ||
58512- TASK_SIZE - eppnt->p_memsz < k) {
58513+ eppnt->p_memsz > pax_task_size ||
58514+ pax_task_size - eppnt->p_memsz < k) {
58515 error = -ENOMEM;
58516 goto out;
58517 }
58518@@ -624,9 +654,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58519 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58520
58521 /* Map the last of the bss segment */
58522- error = vm_brk(elf_bss, last_bss - elf_bss);
58523- if (BAD_ADDR(error))
58524- goto out;
58525+ if (last_bss > elf_bss) {
58526+ error = vm_brk(elf_bss, last_bss - elf_bss);
58527+ if (BAD_ADDR(error))
58528+ goto out;
58529+ }
58530 }
58531
58532 error = load_addr;
58533@@ -634,6 +666,336 @@ out:
58534 return error;
58535 }
58536
58537+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58538+#ifdef CONFIG_PAX_SOFTMODE
58539+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58540+{
58541+ unsigned long pax_flags = 0UL;
58542+
58543+#ifdef CONFIG_PAX_PAGEEXEC
58544+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58545+ pax_flags |= MF_PAX_PAGEEXEC;
58546+#endif
58547+
58548+#ifdef CONFIG_PAX_SEGMEXEC
58549+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58550+ pax_flags |= MF_PAX_SEGMEXEC;
58551+#endif
58552+
58553+#ifdef CONFIG_PAX_EMUTRAMP
58554+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58555+ pax_flags |= MF_PAX_EMUTRAMP;
58556+#endif
58557+
58558+#ifdef CONFIG_PAX_MPROTECT
58559+ if (elf_phdata->p_flags & PF_MPROTECT)
58560+ pax_flags |= MF_PAX_MPROTECT;
58561+#endif
58562+
58563+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58564+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58565+ pax_flags |= MF_PAX_RANDMMAP;
58566+#endif
58567+
58568+ return pax_flags;
58569+}
58570+#endif
58571+
58572+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58573+{
58574+ unsigned long pax_flags = 0UL;
58575+
58576+#ifdef CONFIG_PAX_PAGEEXEC
58577+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58578+ pax_flags |= MF_PAX_PAGEEXEC;
58579+#endif
58580+
58581+#ifdef CONFIG_PAX_SEGMEXEC
58582+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58583+ pax_flags |= MF_PAX_SEGMEXEC;
58584+#endif
58585+
58586+#ifdef CONFIG_PAX_EMUTRAMP
58587+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58588+ pax_flags |= MF_PAX_EMUTRAMP;
58589+#endif
58590+
58591+#ifdef CONFIG_PAX_MPROTECT
58592+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58593+ pax_flags |= MF_PAX_MPROTECT;
58594+#endif
58595+
58596+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58597+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58598+ pax_flags |= MF_PAX_RANDMMAP;
58599+#endif
58600+
58601+ return pax_flags;
58602+}
58603+#endif
58604+
58605+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58606+#ifdef CONFIG_PAX_SOFTMODE
58607+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58608+{
58609+ unsigned long pax_flags = 0UL;
58610+
58611+#ifdef CONFIG_PAX_PAGEEXEC
58612+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58613+ pax_flags |= MF_PAX_PAGEEXEC;
58614+#endif
58615+
58616+#ifdef CONFIG_PAX_SEGMEXEC
58617+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58618+ pax_flags |= MF_PAX_SEGMEXEC;
58619+#endif
58620+
58621+#ifdef CONFIG_PAX_EMUTRAMP
58622+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58623+ pax_flags |= MF_PAX_EMUTRAMP;
58624+#endif
58625+
58626+#ifdef CONFIG_PAX_MPROTECT
58627+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58628+ pax_flags |= MF_PAX_MPROTECT;
58629+#endif
58630+
58631+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58632+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58633+ pax_flags |= MF_PAX_RANDMMAP;
58634+#endif
58635+
58636+ return pax_flags;
58637+}
58638+#endif
58639+
58640+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58641+{
58642+ unsigned long pax_flags = 0UL;
58643+
58644+#ifdef CONFIG_PAX_PAGEEXEC
58645+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58646+ pax_flags |= MF_PAX_PAGEEXEC;
58647+#endif
58648+
58649+#ifdef CONFIG_PAX_SEGMEXEC
58650+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58651+ pax_flags |= MF_PAX_SEGMEXEC;
58652+#endif
58653+
58654+#ifdef CONFIG_PAX_EMUTRAMP
58655+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58656+ pax_flags |= MF_PAX_EMUTRAMP;
58657+#endif
58658+
58659+#ifdef CONFIG_PAX_MPROTECT
58660+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58661+ pax_flags |= MF_PAX_MPROTECT;
58662+#endif
58663+
58664+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58665+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58666+ pax_flags |= MF_PAX_RANDMMAP;
58667+#endif
58668+
58669+ return pax_flags;
58670+}
58671+#endif
58672+
58673+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58674+static unsigned long pax_parse_defaults(void)
58675+{
58676+ unsigned long pax_flags = 0UL;
58677+
58678+#ifdef CONFIG_PAX_SOFTMODE
58679+ if (pax_softmode)
58680+ return pax_flags;
58681+#endif
58682+
58683+#ifdef CONFIG_PAX_PAGEEXEC
58684+ pax_flags |= MF_PAX_PAGEEXEC;
58685+#endif
58686+
58687+#ifdef CONFIG_PAX_SEGMEXEC
58688+ pax_flags |= MF_PAX_SEGMEXEC;
58689+#endif
58690+
58691+#ifdef CONFIG_PAX_MPROTECT
58692+ pax_flags |= MF_PAX_MPROTECT;
58693+#endif
58694+
58695+#ifdef CONFIG_PAX_RANDMMAP
58696+ if (randomize_va_space)
58697+ pax_flags |= MF_PAX_RANDMMAP;
58698+#endif
58699+
58700+ return pax_flags;
58701+}
58702+
58703+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58704+{
58705+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58706+
58707+#ifdef CONFIG_PAX_EI_PAX
58708+
58709+#ifdef CONFIG_PAX_SOFTMODE
58710+ if (pax_softmode)
58711+ return pax_flags;
58712+#endif
58713+
58714+ pax_flags = 0UL;
58715+
58716+#ifdef CONFIG_PAX_PAGEEXEC
58717+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58718+ pax_flags |= MF_PAX_PAGEEXEC;
58719+#endif
58720+
58721+#ifdef CONFIG_PAX_SEGMEXEC
58722+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58723+ pax_flags |= MF_PAX_SEGMEXEC;
58724+#endif
58725+
58726+#ifdef CONFIG_PAX_EMUTRAMP
58727+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58728+ pax_flags |= MF_PAX_EMUTRAMP;
58729+#endif
58730+
58731+#ifdef CONFIG_PAX_MPROTECT
58732+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58733+ pax_flags |= MF_PAX_MPROTECT;
58734+#endif
58735+
58736+#ifdef CONFIG_PAX_ASLR
58737+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58738+ pax_flags |= MF_PAX_RANDMMAP;
58739+#endif
58740+
58741+#endif
58742+
58743+ return pax_flags;
58744+
58745+}
58746+
58747+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58748+{
58749+
58750+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58751+ unsigned long i;
58752+
58753+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58754+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58755+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58756+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58757+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58758+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58759+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
58760+ return PAX_PARSE_FLAGS_FALLBACK;
58761+
58762+#ifdef CONFIG_PAX_SOFTMODE
58763+ if (pax_softmode)
58764+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58765+ else
58766+#endif
58767+
58768+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58769+ break;
58770+ }
58771+#endif
58772+
58773+ return PAX_PARSE_FLAGS_FALLBACK;
58774+}
58775+
58776+static unsigned long pax_parse_xattr_pax(struct file * const file)
58777+{
58778+
58779+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58780+ ssize_t xattr_size, i;
58781+ unsigned char xattr_value[sizeof("pemrs") - 1];
58782+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
58783+
58784+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
58785+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
58786+ return PAX_PARSE_FLAGS_FALLBACK;
58787+
58788+ for (i = 0; i < xattr_size; i++)
58789+ switch (xattr_value[i]) {
58790+ default:
58791+ return PAX_PARSE_FLAGS_FALLBACK;
58792+
58793+#define parse_flag(option1, option2, flag) \
58794+ case option1: \
58795+ if (pax_flags_hardmode & MF_PAX_##flag) \
58796+ return PAX_PARSE_FLAGS_FALLBACK;\
58797+ pax_flags_hardmode |= MF_PAX_##flag; \
58798+ break; \
58799+ case option2: \
58800+ if (pax_flags_softmode & MF_PAX_##flag) \
58801+ return PAX_PARSE_FLAGS_FALLBACK;\
58802+ pax_flags_softmode |= MF_PAX_##flag; \
58803+ break;
58804+
58805+ parse_flag('p', 'P', PAGEEXEC);
58806+ parse_flag('e', 'E', EMUTRAMP);
58807+ parse_flag('m', 'M', MPROTECT);
58808+ parse_flag('r', 'R', RANDMMAP);
58809+ parse_flag('s', 'S', SEGMEXEC);
58810+
58811+#undef parse_flag
58812+ }
58813+
58814+ if (pax_flags_hardmode & pax_flags_softmode)
58815+ return PAX_PARSE_FLAGS_FALLBACK;
58816+
58817+#ifdef CONFIG_PAX_SOFTMODE
58818+ if (pax_softmode)
58819+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
58820+ else
58821+#endif
58822+
58823+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
58824+#else
58825+ return PAX_PARSE_FLAGS_FALLBACK;
58826+#endif
58827+
58828+}
58829+
58830+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
58831+{
58832+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
58833+
58834+ pax_flags = pax_parse_defaults();
58835+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
58836+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
58837+ xattr_pax_flags = pax_parse_xattr_pax(file);
58838+
58839+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58840+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
58841+ pt_pax_flags != xattr_pax_flags)
58842+ return -EINVAL;
58843+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58844+ pax_flags = xattr_pax_flags;
58845+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58846+ pax_flags = pt_pax_flags;
58847+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
58848+ pax_flags = ei_pax_flags;
58849+
58850+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
58851+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58852+ if ((__supported_pte_mask & _PAGE_NX))
58853+ pax_flags &= ~MF_PAX_SEGMEXEC;
58854+ else
58855+ pax_flags &= ~MF_PAX_PAGEEXEC;
58856+ }
58857+#endif
58858+
58859+ if (0 > pax_check_flags(&pax_flags))
58860+ return -EINVAL;
58861+
58862+ current->mm->pax_flags = pax_flags;
58863+ return 0;
58864+}
58865+#endif
58866+
58867 /*
58868 * These are the functions used to load ELF style executables and shared
58869 * libraries. There is no binary dependent code anywhere else.
58870@@ -647,6 +1009,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58871 {
58872 unsigned long random_variable = 0;
58873
58874+#ifdef CONFIG_PAX_RANDUSTACK
58875+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
58876+ return stack_top - current->mm->delta_stack;
58877+#endif
58878+
58879 if ((current->flags & PF_RANDOMIZE) &&
58880 !(current->personality & ADDR_NO_RANDOMIZE)) {
58881 random_variable = (unsigned long) get_random_int();
58882@@ -666,7 +1033,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58883 unsigned long load_addr = 0, load_bias = 0;
58884 int load_addr_set = 0;
58885 char * elf_interpreter = NULL;
58886- unsigned long error;
58887+ unsigned long error = 0;
58888 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
58889 unsigned long elf_bss, elf_brk;
58890 int retval, i;
58891@@ -681,6 +1048,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
58892 struct elfhdr interp_elf_ex;
58893 } *loc;
58894 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
58895+ unsigned long pax_task_size;
58896
58897 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
58898 if (!loc) {
58899@@ -839,6 +1207,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
58900 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
58901 may depend on the personality. */
58902 SET_PERSONALITY2(loc->elf_ex, &arch_state);
58903+
58904+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58905+ current->mm->pax_flags = 0UL;
58906+#endif
58907+
58908+#ifdef CONFIG_PAX_DLRESOLVE
58909+ current->mm->call_dl_resolve = 0UL;
58910+#endif
58911+
58912+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
58913+ current->mm->call_syscall = 0UL;
58914+#endif
58915+
58916+#ifdef CONFIG_PAX_ASLR
58917+ current->mm->delta_mmap = 0UL;
58918+ current->mm->delta_stack = 0UL;
58919+#endif
58920+
58921+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58922+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58923+ send_sig(SIGKILL, current, 0);
58924+ goto out_free_dentry;
58925+ }
58926+#endif
58927+
58928+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
58929+ pax_set_initial_flags(bprm);
58930+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58931+ if (pax_set_initial_flags_func)
58932+ (pax_set_initial_flags_func)(bprm);
58933+#endif
58934+
58935+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
58936+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58937+ current->mm->context.user_cs_limit = PAGE_SIZE;
58938+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
58939+ }
58940+#endif
58941+
58942+#ifdef CONFIG_PAX_SEGMEXEC
58943+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
58944+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
58945+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
58946+ pax_task_size = SEGMEXEC_TASK_SIZE;
58947+ current->mm->def_flags |= VM_NOHUGEPAGE;
58948+ } else
58949+#endif
58950+
58951+ pax_task_size = TASK_SIZE;
58952+
58953+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
58954+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58955+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
58956+ put_cpu();
58957+ }
58958+#endif
58959+
58960+#ifdef CONFIG_PAX_ASLR
58961+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
58962+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
58963+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
58964+ }
58965+#endif
58966+
58967+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
58968+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
58969+ executable_stack = EXSTACK_DISABLE_X;
58970+ current->personality &= ~READ_IMPLIES_EXEC;
58971+ } else
58972+#endif
58973+
58974 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
58975 current->personality |= READ_IMPLIES_EXEC;
58976
58977@@ -924,6 +1363,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
58978 #else
58979 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
58980 #endif
58981+
58982+#ifdef CONFIG_PAX_RANDMMAP
58983+ /* PaX: randomize base address at the default exe base if requested */
58984+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
58985+#ifdef CONFIG_SPARC64
58986+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
58987+#else
58988+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
58989+#endif
58990+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
58991+ elf_flags |= MAP_FIXED;
58992+ }
58993+#endif
58994+
58995 }
58996
58997 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
58998@@ -955,9 +1408,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
58999 * allowed task size. Note that p_filesz must always be
59000 * <= p_memsz so it is only necessary to check p_memsz.
59001 */
59002- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59003- elf_ppnt->p_memsz > TASK_SIZE ||
59004- TASK_SIZE - elf_ppnt->p_memsz < k) {
59005+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59006+ elf_ppnt->p_memsz > pax_task_size ||
59007+ pax_task_size - elf_ppnt->p_memsz < k) {
59008 /* set_brk can never work. Avoid overflows. */
59009 retval = -EINVAL;
59010 goto out_free_dentry;
59011@@ -993,16 +1446,43 @@ static int load_elf_binary(struct linux_binprm *bprm)
59012 if (retval)
59013 goto out_free_dentry;
59014 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59015- retval = -EFAULT; /* Nobody gets to see this, but.. */
59016- goto out_free_dentry;
59017+ /*
59018+ * This bss-zeroing can fail if the ELF
59019+ * file specifies odd protections. So
59020+ * we don't check the return value
59021+ */
59022 }
59023
59024+#ifdef CONFIG_PAX_RANDMMAP
59025+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59026+ unsigned long start, size, flags;
59027+ vm_flags_t vm_flags;
59028+
59029+ start = ELF_PAGEALIGN(elf_brk);
59030+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59031+ flags = MAP_FIXED | MAP_PRIVATE;
59032+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59033+
59034+ down_write(&current->mm->mmap_sem);
59035+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59036+ retval = -ENOMEM;
59037+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59038+// if (current->personality & ADDR_NO_RANDOMIZE)
59039+// vm_flags |= VM_READ | VM_MAYREAD;
59040+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59041+ retval = IS_ERR_VALUE(start) ? start : 0;
59042+ }
59043+ up_write(&current->mm->mmap_sem);
59044+ if (retval == 0)
59045+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59046+ if (retval < 0)
59047+ goto out_free_dentry;
59048+ }
59049+#endif
59050+
59051 if (elf_interpreter) {
59052- unsigned long interp_map_addr = 0;
59053-
59054 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59055 interpreter,
59056- &interp_map_addr,
59057 load_bias, interp_elf_phdata);
59058 if (!IS_ERR((void *)elf_entry)) {
59059 /*
59060@@ -1230,7 +1710,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59061 * Decide what to dump of a segment, part, all or none.
59062 */
59063 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59064- unsigned long mm_flags)
59065+ unsigned long mm_flags, long signr)
59066 {
59067 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59068
59069@@ -1268,7 +1748,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59070 if (vma->vm_file == NULL)
59071 return 0;
59072
59073- if (FILTER(MAPPED_PRIVATE))
59074+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59075 goto whole;
59076
59077 /*
59078@@ -1475,9 +1955,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59079 {
59080 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59081 int i = 0;
59082- do
59083+ do {
59084 i += 2;
59085- while (auxv[i - 2] != AT_NULL);
59086+ } while (auxv[i - 2] != AT_NULL);
59087 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59088 }
59089
59090@@ -1486,7 +1966,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59091 {
59092 mm_segment_t old_fs = get_fs();
59093 set_fs(KERNEL_DS);
59094- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59095+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59096 set_fs(old_fs);
59097 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59098 }
59099@@ -2206,7 +2686,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59100 vma = next_vma(vma, gate_vma)) {
59101 unsigned long dump_size;
59102
59103- dump_size = vma_dump_size(vma, cprm->mm_flags);
59104+ dump_size = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59105 vma_filesz[i++] = dump_size;
59106 vma_data_size += dump_size;
59107 }
59108@@ -2314,6 +2794,167 @@ out:
59109
59110 #endif /* CONFIG_ELF_CORE */
59111
59112+#ifdef CONFIG_PAX_MPROTECT
59113+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59114+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59115+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59116+ *
59117+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59118+ * basis because we want to allow the common case and not the special ones.
59119+ */
59120+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59121+{
59122+ struct elfhdr elf_h;
59123+ struct elf_phdr elf_p;
59124+ unsigned long i;
59125+ unsigned long oldflags;
59126+ bool is_textrel_rw, is_textrel_rx, is_relro;
59127+
59128+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59129+ return;
59130+
59131+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59132+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59133+
59134+#ifdef CONFIG_PAX_ELFRELOCS
59135+ /* possible TEXTREL */
59136+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59137+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59138+#else
59139+ is_textrel_rw = false;
59140+ is_textrel_rx = false;
59141+#endif
59142+
59143+ /* possible RELRO */
59144+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59145+
59146+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59147+ return;
59148+
59149+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59150+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59151+
59152+#ifdef CONFIG_PAX_ETEXECRELOCS
59153+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59154+#else
59155+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59156+#endif
59157+
59158+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59159+ !elf_check_arch(&elf_h) ||
59160+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59161+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59162+ return;
59163+
59164+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59165+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59166+ return;
59167+ switch (elf_p.p_type) {
59168+ case PT_DYNAMIC:
59169+ if (!is_textrel_rw && !is_textrel_rx)
59170+ continue;
59171+ i = 0UL;
59172+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59173+ elf_dyn dyn;
59174+
59175+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59176+ break;
59177+ if (dyn.d_tag == DT_NULL)
59178+ break;
59179+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59180+ gr_log_textrel(vma);
59181+ if (is_textrel_rw)
59182+ vma->vm_flags |= VM_MAYWRITE;
59183+ else
59184+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59185+ vma->vm_flags &= ~VM_MAYWRITE;
59186+ break;
59187+ }
59188+ i++;
59189+ }
59190+ is_textrel_rw = false;
59191+ is_textrel_rx = false;
59192+ continue;
59193+
59194+ case PT_GNU_RELRO:
59195+ if (!is_relro)
59196+ continue;
59197+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59198+ vma->vm_flags &= ~VM_MAYWRITE;
59199+ is_relro = false;
59200+ continue;
59201+
59202+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59203+ case PT_PAX_FLAGS: {
59204+ const char *msg_mprotect = "", *msg_emutramp = "";
59205+ char *buffer_lib, *buffer_exe;
59206+
59207+ if (elf_p.p_flags & PF_NOMPROTECT)
59208+ msg_mprotect = "MPROTECT disabled";
59209+
59210+#ifdef CONFIG_PAX_EMUTRAMP
59211+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59212+ msg_emutramp = "EMUTRAMP enabled";
59213+#endif
59214+
59215+ if (!msg_mprotect[0] && !msg_emutramp[0])
59216+ continue;
59217+
59218+ if (!printk_ratelimit())
59219+ continue;
59220+
59221+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59222+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59223+ if (buffer_lib && buffer_exe) {
59224+ char *path_lib, *path_exe;
59225+
59226+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59227+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59228+
59229+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59230+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59231+
59232+ }
59233+ free_page((unsigned long)buffer_exe);
59234+ free_page((unsigned long)buffer_lib);
59235+ continue;
59236+ }
59237+#endif
59238+
59239+ }
59240+ }
59241+}
59242+#endif
59243+
59244+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59245+
59246+extern int grsec_enable_log_rwxmaps;
59247+
59248+static void elf_handle_mmap(struct file *file)
59249+{
59250+ struct elfhdr elf_h;
59251+ struct elf_phdr elf_p;
59252+ unsigned long i;
59253+
59254+ if (!grsec_enable_log_rwxmaps)
59255+ return;
59256+
59257+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59258+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59259+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59260+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59261+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59262+ return;
59263+
59264+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59265+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59266+ return;
59267+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59268+ gr_log_ptgnustack(file);
59269+ }
59270+}
59271+#endif
59272+
59273 static int __init init_elf_binfmt(void)
59274 {
59275 register_binfmt(&elf_format);
59276diff --git a/fs/block_dev.c b/fs/block_dev.c
59277index b48c41b..e070416 100644
59278--- a/fs/block_dev.c
59279+++ b/fs/block_dev.c
59280@@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59281 else if (bdev->bd_contains == bdev)
59282 return true; /* is a whole device which isn't held */
59283
59284- else if (whole->bd_holder == bd_may_claim)
59285+ else if (whole->bd_holder == (void *)bd_may_claim)
59286 return true; /* is a partition of a device that is being partitioned */
59287 else if (whole->bd_holder != NULL)
59288 return false; /* is a partition of a held device */
59289diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59290index f54511d..58acdec 100644
59291--- a/fs/btrfs/ctree.c
59292+++ b/fs/btrfs/ctree.c
59293@@ -1173,9 +1173,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59294 free_extent_buffer(buf);
59295 add_root_to_dirty_list(root);
59296 } else {
59297- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59298- parent_start = parent->start;
59299- else
59300+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59301+ if (parent)
59302+ parent_start = parent->start;
59303+ else
59304+ parent_start = 0;
59305+ } else
59306 parent_start = 0;
59307
59308 WARN_ON(trans->transid != btrfs_header_generation(parent));
59309diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59310index de4e70f..b41dc45 100644
59311--- a/fs/btrfs/delayed-inode.c
59312+++ b/fs/btrfs/delayed-inode.c
59313@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59314
59315 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59316 {
59317- int seq = atomic_inc_return(&delayed_root->items_seq);
59318+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59319 if ((atomic_dec_return(&delayed_root->items) <
59320 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59321 waitqueue_active(&delayed_root->wait))
59322@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59323
59324 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59325 {
59326- int val = atomic_read(&delayed_root->items_seq);
59327+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59328
59329 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59330 return 1;
59331@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59332 int seq;
59333 int ret;
59334
59335- seq = atomic_read(&delayed_root->items_seq);
59336+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59337
59338 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59339 if (ret)
59340diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59341index f70119f..ab5894d 100644
59342--- a/fs/btrfs/delayed-inode.h
59343+++ b/fs/btrfs/delayed-inode.h
59344@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59345 */
59346 struct list_head prepare_list;
59347 atomic_t items; /* for delayed items */
59348- atomic_t items_seq; /* for delayed items */
59349+ atomic_unchecked_t items_seq; /* for delayed items */
59350 int nodes; /* for delayed nodes */
59351 wait_queue_head_t wait;
59352 };
59353@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59354 struct btrfs_delayed_root *delayed_root)
59355 {
59356 atomic_set(&delayed_root->items, 0);
59357- atomic_set(&delayed_root->items_seq, 0);
59358+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59359 delayed_root->nodes = 0;
59360 spin_lock_init(&delayed_root->lock);
59361 init_waitqueue_head(&delayed_root->wait);
59362diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59363index 6f49b28..483410f 100644
59364--- a/fs/btrfs/super.c
59365+++ b/fs/btrfs/super.c
59366@@ -271,7 +271,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59367 function, line, errstr);
59368 return;
59369 }
59370- ACCESS_ONCE(trans->transaction->aborted) = errno;
59371+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59372 /* Wake up anybody who may be waiting on this transaction */
59373 wake_up(&root->fs_info->transaction_wait);
59374 wake_up(&root->fs_info->transaction_blocked_wait);
59375diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59376index 92db3f6..898a561 100644
59377--- a/fs/btrfs/sysfs.c
59378+++ b/fs/btrfs/sysfs.c
59379@@ -472,7 +472,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59380 for (set = 0; set < FEAT_MAX; set++) {
59381 int i;
59382 struct attribute *attrs[2];
59383- struct attribute_group agroup = {
59384+ attribute_group_no_const agroup = {
59385 .name = "features",
59386 .attrs = attrs,
59387 };
59388diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
59389index 2299bfd..4098e72 100644
59390--- a/fs/btrfs/tests/free-space-tests.c
59391+++ b/fs/btrfs/tests/free-space-tests.c
59392@@ -463,7 +463,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59393 * extent entry.
59394 */
59395 use_bitmap_op = cache->free_space_ctl->op->use_bitmap;
59396- cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59397+ pax_open_kernel();
59398+ *(void **)&cache->free_space_ctl->op->use_bitmap = test_use_bitmap;
59399+ pax_close_kernel();
59400
59401 /*
59402 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
59403@@ -870,7 +872,9 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
59404 if (ret)
59405 return ret;
59406
59407- cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59408+ pax_open_kernel();
59409+ *(void **)&cache->free_space_ctl->op->use_bitmap = use_bitmap_op;
59410+ pax_close_kernel();
59411 __btrfs_remove_free_space_cache(cache->free_space_ctl);
59412
59413 return 0;
59414diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59415index 154990c..d0cf699 100644
59416--- a/fs/btrfs/tree-log.h
59417+++ b/fs/btrfs/tree-log.h
59418@@ -43,7 +43,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59419 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59420 struct btrfs_trans_handle *trans)
59421 {
59422- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59423+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59424 }
59425
59426 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59427diff --git a/fs/buffer.c b/fs/buffer.c
59428index 20805db..2e8fc69 100644
59429--- a/fs/buffer.c
59430+++ b/fs/buffer.c
59431@@ -3417,7 +3417,7 @@ void __init buffer_init(void)
59432 bh_cachep = kmem_cache_create("buffer_head",
59433 sizeof(struct buffer_head), 0,
59434 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59435- SLAB_MEM_SPREAD),
59436+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59437 NULL);
59438
59439 /*
59440diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59441index fbb08e9..0fda764 100644
59442--- a/fs/cachefiles/bind.c
59443+++ b/fs/cachefiles/bind.c
59444@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59445 args);
59446
59447 /* start by checking things over */
59448- ASSERT(cache->fstop_percent >= 0 &&
59449- cache->fstop_percent < cache->fcull_percent &&
59450+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59451 cache->fcull_percent < cache->frun_percent &&
59452 cache->frun_percent < 100);
59453
59454- ASSERT(cache->bstop_percent >= 0 &&
59455- cache->bstop_percent < cache->bcull_percent &&
59456+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59457 cache->bcull_percent < cache->brun_percent &&
59458 cache->brun_percent < 100);
59459
59460diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59461index ce1b115..4a6852c 100644
59462--- a/fs/cachefiles/daemon.c
59463+++ b/fs/cachefiles/daemon.c
59464@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59465 if (n > buflen)
59466 return -EMSGSIZE;
59467
59468- if (copy_to_user(_buffer, buffer, n) != 0)
59469+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59470 return -EFAULT;
59471
59472 return n;
59473@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59474 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59475 return -EIO;
59476
59477- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59478+ if (datalen > PAGE_SIZE - 1)
59479 return -EOPNOTSUPP;
59480
59481 /* drag the command string into the kernel so we can parse it */
59482@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59483 if (args[0] != '%' || args[1] != '\0')
59484 return -EINVAL;
59485
59486- if (fstop < 0 || fstop >= cache->fcull_percent)
59487+ if (fstop >= cache->fcull_percent)
59488 return cachefiles_daemon_range_error(cache, args);
59489
59490 cache->fstop_percent = fstop;
59491@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59492 if (args[0] != '%' || args[1] != '\0')
59493 return -EINVAL;
59494
59495- if (bstop < 0 || bstop >= cache->bcull_percent)
59496+ if (bstop >= cache->bcull_percent)
59497 return cachefiles_daemon_range_error(cache, args);
59498
59499 cache->bstop_percent = bstop;
59500diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59501index 8c52472..c4e3a69 100644
59502--- a/fs/cachefiles/internal.h
59503+++ b/fs/cachefiles/internal.h
59504@@ -66,7 +66,7 @@ struct cachefiles_cache {
59505 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59506 struct rb_root active_nodes; /* active nodes (can't be culled) */
59507 rwlock_t active_lock; /* lock for active_nodes */
59508- atomic_t gravecounter; /* graveyard uniquifier */
59509+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59510 unsigned frun_percent; /* when to stop culling (% files) */
59511 unsigned fcull_percent; /* when to start culling (% files) */
59512 unsigned fstop_percent; /* when to stop allocating (% files) */
59513@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59514 * proc.c
59515 */
59516 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59517-extern atomic_t cachefiles_lookup_histogram[HZ];
59518-extern atomic_t cachefiles_mkdir_histogram[HZ];
59519-extern atomic_t cachefiles_create_histogram[HZ];
59520+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59521+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59522+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59523
59524 extern int __init cachefiles_proc_init(void);
59525 extern void cachefiles_proc_cleanup(void);
59526 static inline
59527-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59528+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59529 {
59530 unsigned long jif = jiffies - start_jif;
59531 if (jif >= HZ)
59532 jif = HZ - 1;
59533- atomic_inc(&histogram[jif]);
59534+ atomic_inc_unchecked(&histogram[jif]);
59535 }
59536
59537 #else
59538diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59539index 7f8e83f..8951aa4 100644
59540--- a/fs/cachefiles/namei.c
59541+++ b/fs/cachefiles/namei.c
59542@@ -309,7 +309,7 @@ try_again:
59543 /* first step is to make up a grave dentry in the graveyard */
59544 sprintf(nbuffer, "%08x%08x",
59545 (uint32_t) get_seconds(),
59546- (uint32_t) atomic_inc_return(&cache->gravecounter));
59547+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59548
59549 /* do the multiway lock magic */
59550 trap = lock_rename(cache->graveyard, dir);
59551diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59552index eccd339..4c1d995 100644
59553--- a/fs/cachefiles/proc.c
59554+++ b/fs/cachefiles/proc.c
59555@@ -14,9 +14,9 @@
59556 #include <linux/seq_file.h>
59557 #include "internal.h"
59558
59559-atomic_t cachefiles_lookup_histogram[HZ];
59560-atomic_t cachefiles_mkdir_histogram[HZ];
59561-atomic_t cachefiles_create_histogram[HZ];
59562+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59563+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59564+atomic_unchecked_t cachefiles_create_histogram[HZ];
59565
59566 /*
59567 * display the latency histogram
59568@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59569 return 0;
59570 default:
59571 index = (unsigned long) v - 3;
59572- x = atomic_read(&cachefiles_lookup_histogram[index]);
59573- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59574- z = atomic_read(&cachefiles_create_histogram[index]);
59575+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59576+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59577+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59578 if (x == 0 && y == 0 && z == 0)
59579 return 0;
59580
59581diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59582index c241603..56bae60 100644
59583--- a/fs/ceph/dir.c
59584+++ b/fs/ceph/dir.c
59585@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59586 struct dentry *dentry, *last;
59587 struct ceph_dentry_info *di;
59588 int err = 0;
59589+ char d_name[DNAME_INLINE_LEN];
59590+ const unsigned char *name;
59591
59592 /* claim ref on last dentry we returned */
59593 last = fi->dentry;
59594@@ -192,7 +194,12 @@ more:
59595
59596 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
59597 dentry, dentry, dentry->d_inode);
59598- if (!dir_emit(ctx, dentry->d_name.name,
59599+ name = dentry->d_name.name;
59600+ if (name == dentry->d_iname) {
59601+ memcpy(d_name, name, dentry->d_name.len);
59602+ name = d_name;
59603+ }
59604+ if (!dir_emit(ctx, name,
59605 dentry->d_name.len,
59606 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59607 dentry->d_inode->i_mode >> 12)) {
59608@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59609 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59610 struct ceph_mds_client *mdsc = fsc->mdsc;
59611 unsigned frag = fpos_frag(ctx->pos);
59612- int off = fpos_off(ctx->pos);
59613+ unsigned int off = fpos_off(ctx->pos);
59614 int err;
59615 u32 ftype;
59616 struct ceph_mds_reply_info_parsed *rinfo;
59617diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59618index 50f06cd..c7eba3e 100644
59619--- a/fs/ceph/super.c
59620+++ b/fs/ceph/super.c
59621@@ -896,7 +896,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59622 /*
59623 * construct our own bdi so we can control readahead, etc.
59624 */
59625-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59626+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59627
59628 static int ceph_register_bdi(struct super_block *sb,
59629 struct ceph_fs_client *fsc)
59630@@ -913,7 +913,7 @@ static int ceph_register_bdi(struct super_block *sb,
59631 default_backing_dev_info.ra_pages;
59632
59633 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59634- atomic_long_inc_return(&bdi_seq));
59635+ atomic_long_inc_return_unchecked(&bdi_seq));
59636 if (!err)
59637 sb->s_bdi = &fsc->backing_dev_info;
59638 return err;
59639diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59640index 7febcf2..62a5721 100644
59641--- a/fs/cifs/cifs_debug.c
59642+++ b/fs/cifs/cifs_debug.c
59643@@ -269,8 +269,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59644
59645 if (strtobool(&c, &bv) == 0) {
59646 #ifdef CONFIG_CIFS_STATS2
59647- atomic_set(&totBufAllocCount, 0);
59648- atomic_set(&totSmBufAllocCount, 0);
59649+ atomic_set_unchecked(&totBufAllocCount, 0);
59650+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59651 #endif /* CONFIG_CIFS_STATS2 */
59652 spin_lock(&cifs_tcp_ses_lock);
59653 list_for_each(tmp1, &cifs_tcp_ses_list) {
59654@@ -283,7 +283,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59655 tcon = list_entry(tmp3,
59656 struct cifs_tcon,
59657 tcon_list);
59658- atomic_set(&tcon->num_smbs_sent, 0);
59659+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59660 if (server->ops->clear_stats)
59661 server->ops->clear_stats(tcon);
59662 }
59663@@ -315,8 +315,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59664 smBufAllocCount.counter, cifs_min_small);
59665 #ifdef CONFIG_CIFS_STATS2
59666 seq_printf(m, "Total Large %d Small %d Allocations\n",
59667- atomic_read(&totBufAllocCount),
59668- atomic_read(&totSmBufAllocCount));
59669+ atomic_read_unchecked(&totBufAllocCount),
59670+ atomic_read_unchecked(&totSmBufAllocCount));
59671 #endif /* CONFIG_CIFS_STATS2 */
59672
59673 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59674@@ -345,7 +345,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59675 if (tcon->need_reconnect)
59676 seq_puts(m, "\tDISCONNECTED ");
59677 seq_printf(m, "\nSMBs: %d",
59678- atomic_read(&tcon->num_smbs_sent));
59679+ atomic_read_unchecked(&tcon->num_smbs_sent));
59680 if (server->ops->print_stats)
59681 server->ops->print_stats(m, tcon);
59682 }
59683diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59684index d72fe37..ded5511 100644
59685--- a/fs/cifs/cifsfs.c
59686+++ b/fs/cifs/cifsfs.c
59687@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59688 */
59689 cifs_req_cachep = kmem_cache_create("cifs_request",
59690 CIFSMaxBufSize + max_hdr_size, 0,
59691- SLAB_HWCACHE_ALIGN, NULL);
59692+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59693 if (cifs_req_cachep == NULL)
59694 return -ENOMEM;
59695
59696@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
59697 efficient to alloc 1 per page off the slab compared to 17K (5page)
59698 alloc of large cifs buffers even when page debugging is on */
59699 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
59700- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
59701+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
59702 NULL);
59703 if (cifs_sm_req_cachep == NULL) {
59704 mempool_destroy(cifs_req_poolp);
59705@@ -1204,8 +1204,8 @@ init_cifs(void)
59706 atomic_set(&bufAllocCount, 0);
59707 atomic_set(&smBufAllocCount, 0);
59708 #ifdef CONFIG_CIFS_STATS2
59709- atomic_set(&totBufAllocCount, 0);
59710- atomic_set(&totSmBufAllocCount, 0);
59711+ atomic_set_unchecked(&totBufAllocCount, 0);
59712+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59713 #endif /* CONFIG_CIFS_STATS2 */
59714
59715 atomic_set(&midCount, 0);
59716diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
59717index 22b289a..bbbba08 100644
59718--- a/fs/cifs/cifsglob.h
59719+++ b/fs/cifs/cifsglob.h
59720@@ -823,35 +823,35 @@ struct cifs_tcon {
59721 __u16 Flags; /* optional support bits */
59722 enum statusEnum tidStatus;
59723 #ifdef CONFIG_CIFS_STATS
59724- atomic_t num_smbs_sent;
59725+ atomic_unchecked_t num_smbs_sent;
59726 union {
59727 struct {
59728- atomic_t num_writes;
59729- atomic_t num_reads;
59730- atomic_t num_flushes;
59731- atomic_t num_oplock_brks;
59732- atomic_t num_opens;
59733- atomic_t num_closes;
59734- atomic_t num_deletes;
59735- atomic_t num_mkdirs;
59736- atomic_t num_posixopens;
59737- atomic_t num_posixmkdirs;
59738- atomic_t num_rmdirs;
59739- atomic_t num_renames;
59740- atomic_t num_t2renames;
59741- atomic_t num_ffirst;
59742- atomic_t num_fnext;
59743- atomic_t num_fclose;
59744- atomic_t num_hardlinks;
59745- atomic_t num_symlinks;
59746- atomic_t num_locks;
59747- atomic_t num_acl_get;
59748- atomic_t num_acl_set;
59749+ atomic_unchecked_t num_writes;
59750+ atomic_unchecked_t num_reads;
59751+ atomic_unchecked_t num_flushes;
59752+ atomic_unchecked_t num_oplock_brks;
59753+ atomic_unchecked_t num_opens;
59754+ atomic_unchecked_t num_closes;
59755+ atomic_unchecked_t num_deletes;
59756+ atomic_unchecked_t num_mkdirs;
59757+ atomic_unchecked_t num_posixopens;
59758+ atomic_unchecked_t num_posixmkdirs;
59759+ atomic_unchecked_t num_rmdirs;
59760+ atomic_unchecked_t num_renames;
59761+ atomic_unchecked_t num_t2renames;
59762+ atomic_unchecked_t num_ffirst;
59763+ atomic_unchecked_t num_fnext;
59764+ atomic_unchecked_t num_fclose;
59765+ atomic_unchecked_t num_hardlinks;
59766+ atomic_unchecked_t num_symlinks;
59767+ atomic_unchecked_t num_locks;
59768+ atomic_unchecked_t num_acl_get;
59769+ atomic_unchecked_t num_acl_set;
59770 } cifs_stats;
59771 #ifdef CONFIG_CIFS_SMB2
59772 struct {
59773- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59774- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59775+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
59776+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
59777 } smb2_stats;
59778 #endif /* CONFIG_CIFS_SMB2 */
59779 } stats;
59780@@ -1198,7 +1198,7 @@ convert_delimiter(char *path, char delim)
59781 }
59782
59783 #ifdef CONFIG_CIFS_STATS
59784-#define cifs_stats_inc atomic_inc
59785+#define cifs_stats_inc atomic_inc_unchecked
59786
59787 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
59788 unsigned int bytes)
59789@@ -1565,8 +1565,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
59790 /* Various Debug counters */
59791 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
59792 #ifdef CONFIG_CIFS_STATS2
59793-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
59794-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
59795+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
59796+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
59797 #endif
59798 GLOBAL_EXTERN atomic_t smBufAllocCount;
59799 GLOBAL_EXTERN atomic_t midCount;
59800diff --git a/fs/cifs/file.c b/fs/cifs/file.c
59801index 3e30d92..2c9f066 100644
59802--- a/fs/cifs/file.c
59803+++ b/fs/cifs/file.c
59804@@ -2061,10 +2061,14 @@ static int cifs_writepages(struct address_space *mapping,
59805 index = mapping->writeback_index; /* Start from prev offset */
59806 end = -1;
59807 } else {
59808- index = wbc->range_start >> PAGE_CACHE_SHIFT;
59809- end = wbc->range_end >> PAGE_CACHE_SHIFT;
59810- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
59811+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
59812 range_whole = true;
59813+ index = 0;
59814+ end = ULONG_MAX;
59815+ } else {
59816+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
59817+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
59818+ }
59819 scanned = true;
59820 }
59821 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
59822diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
59823index 3379463..3af418a 100644
59824--- a/fs/cifs/misc.c
59825+++ b/fs/cifs/misc.c
59826@@ -170,7 +170,7 @@ cifs_buf_get(void)
59827 memset(ret_buf, 0, buf_size + 3);
59828 atomic_inc(&bufAllocCount);
59829 #ifdef CONFIG_CIFS_STATS2
59830- atomic_inc(&totBufAllocCount);
59831+ atomic_inc_unchecked(&totBufAllocCount);
59832 #endif /* CONFIG_CIFS_STATS2 */
59833 }
59834
59835@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
59836 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
59837 atomic_inc(&smBufAllocCount);
59838 #ifdef CONFIG_CIFS_STATS2
59839- atomic_inc(&totSmBufAllocCount);
59840+ atomic_inc_unchecked(&totSmBufAllocCount);
59841 #endif /* CONFIG_CIFS_STATS2 */
59842
59843 }
59844diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
59845index d297903..1cb7516 100644
59846--- a/fs/cifs/smb1ops.c
59847+++ b/fs/cifs/smb1ops.c
59848@@ -622,27 +622,27 @@ static void
59849 cifs_clear_stats(struct cifs_tcon *tcon)
59850 {
59851 #ifdef CONFIG_CIFS_STATS
59852- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
59853- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
59854- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
59855- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59856- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
59857- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
59858- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59859- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
59860- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
59861- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
59862- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
59863- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
59864- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
59865- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
59866- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
59867- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
59868- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
59869- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
59870- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
59871- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
59872- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
59873+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
59874+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
59875+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
59876+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
59877+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
59878+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
59879+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
59880+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
59881+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
59882+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
59883+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
59884+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
59885+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
59886+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
59887+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
59888+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
59889+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
59890+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
59891+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
59892+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
59893+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
59894 #endif
59895 }
59896
59897@@ -651,36 +651,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59898 {
59899 #ifdef CONFIG_CIFS_STATS
59900 seq_printf(m, " Oplocks breaks: %d",
59901- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
59902+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
59903 seq_printf(m, "\nReads: %d Bytes: %llu",
59904- atomic_read(&tcon->stats.cifs_stats.num_reads),
59905+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
59906 (long long)(tcon->bytes_read));
59907 seq_printf(m, "\nWrites: %d Bytes: %llu",
59908- atomic_read(&tcon->stats.cifs_stats.num_writes),
59909+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
59910 (long long)(tcon->bytes_written));
59911 seq_printf(m, "\nFlushes: %d",
59912- atomic_read(&tcon->stats.cifs_stats.num_flushes));
59913+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
59914 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
59915- atomic_read(&tcon->stats.cifs_stats.num_locks),
59916- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
59917- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
59918+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
59919+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
59920+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
59921 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
59922- atomic_read(&tcon->stats.cifs_stats.num_opens),
59923- atomic_read(&tcon->stats.cifs_stats.num_closes),
59924- atomic_read(&tcon->stats.cifs_stats.num_deletes));
59925+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
59926+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
59927+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
59928 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
59929- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
59930- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
59931+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
59932+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
59933 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
59934- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
59935- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
59936+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
59937+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
59938 seq_printf(m, "\nRenames: %d T2 Renames %d",
59939- atomic_read(&tcon->stats.cifs_stats.num_renames),
59940- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
59941+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
59942+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
59943 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
59944- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
59945- atomic_read(&tcon->stats.cifs_stats.num_fnext),
59946- atomic_read(&tcon->stats.cifs_stats.num_fclose));
59947+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
59948+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
59949+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
59950 #endif
59951 }
59952
59953diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
59954index eab05e1..ffe5ea4 100644
59955--- a/fs/cifs/smb2ops.c
59956+++ b/fs/cifs/smb2ops.c
59957@@ -418,8 +418,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
59958 #ifdef CONFIG_CIFS_STATS
59959 int i;
59960 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
59961- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59962- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59963+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
59964+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
59965 }
59966 #endif
59967 }
59968@@ -459,65 +459,65 @@ static void
59969 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
59970 {
59971 #ifdef CONFIG_CIFS_STATS
59972- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59973- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59974+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
59975+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
59976 seq_printf(m, "\nNegotiates: %d sent %d failed",
59977- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
59978- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
59979+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
59980+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
59981 seq_printf(m, "\nSessionSetups: %d sent %d failed",
59982- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
59983- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
59984+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
59985+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
59986 seq_printf(m, "\nLogoffs: %d sent %d failed",
59987- atomic_read(&sent[SMB2_LOGOFF_HE]),
59988- atomic_read(&failed[SMB2_LOGOFF_HE]));
59989+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
59990+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
59991 seq_printf(m, "\nTreeConnects: %d sent %d failed",
59992- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
59993- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
59994+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
59995+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
59996 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
59997- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
59998- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
59999+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60000+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60001 seq_printf(m, "\nCreates: %d sent %d failed",
60002- atomic_read(&sent[SMB2_CREATE_HE]),
60003- atomic_read(&failed[SMB2_CREATE_HE]));
60004+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60005+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60006 seq_printf(m, "\nCloses: %d sent %d failed",
60007- atomic_read(&sent[SMB2_CLOSE_HE]),
60008- atomic_read(&failed[SMB2_CLOSE_HE]));
60009+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60010+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60011 seq_printf(m, "\nFlushes: %d sent %d failed",
60012- atomic_read(&sent[SMB2_FLUSH_HE]),
60013- atomic_read(&failed[SMB2_FLUSH_HE]));
60014+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60015+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60016 seq_printf(m, "\nReads: %d sent %d failed",
60017- atomic_read(&sent[SMB2_READ_HE]),
60018- atomic_read(&failed[SMB2_READ_HE]));
60019+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60020+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60021 seq_printf(m, "\nWrites: %d sent %d failed",
60022- atomic_read(&sent[SMB2_WRITE_HE]),
60023- atomic_read(&failed[SMB2_WRITE_HE]));
60024+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60025+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60026 seq_printf(m, "\nLocks: %d sent %d failed",
60027- atomic_read(&sent[SMB2_LOCK_HE]),
60028- atomic_read(&failed[SMB2_LOCK_HE]));
60029+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60030+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60031 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60032- atomic_read(&sent[SMB2_IOCTL_HE]),
60033- atomic_read(&failed[SMB2_IOCTL_HE]));
60034+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60035+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60036 seq_printf(m, "\nCancels: %d sent %d failed",
60037- atomic_read(&sent[SMB2_CANCEL_HE]),
60038- atomic_read(&failed[SMB2_CANCEL_HE]));
60039+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60040+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60041 seq_printf(m, "\nEchos: %d sent %d failed",
60042- atomic_read(&sent[SMB2_ECHO_HE]),
60043- atomic_read(&failed[SMB2_ECHO_HE]));
60044+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60045+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60046 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60047- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60048- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60049+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60050+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60051 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60052- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60053- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60054+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60055+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60056 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60057- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60058- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60059+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60060+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60061 seq_printf(m, "\nSetInfos: %d sent %d failed",
60062- atomic_read(&sent[SMB2_SET_INFO_HE]),
60063- atomic_read(&failed[SMB2_SET_INFO_HE]));
60064+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60065+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60066 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60067- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60068- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60069+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60070+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60071 #endif
60072 }
60073
60074diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60075index 3417340..b942390 100644
60076--- a/fs/cifs/smb2pdu.c
60077+++ b/fs/cifs/smb2pdu.c
60078@@ -2144,8 +2144,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60079 default:
60080 cifs_dbg(VFS, "info level %u isn't supported\n",
60081 srch_inf->info_level);
60082- rc = -EINVAL;
60083- goto qdir_exit;
60084+ return -EINVAL;
60085 }
60086
60087 req->FileIndex = cpu_to_le32(index);
60088diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60089index 46ee6f2..89a9e7f 100644
60090--- a/fs/coda/cache.c
60091+++ b/fs/coda/cache.c
60092@@ -24,7 +24,7 @@
60093 #include "coda_linux.h"
60094 #include "coda_cache.h"
60095
60096-static atomic_t permission_epoch = ATOMIC_INIT(0);
60097+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60098
60099 /* replace or extend an acl cache hit */
60100 void coda_cache_enter(struct inode *inode, int mask)
60101@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60102 struct coda_inode_info *cii = ITOC(inode);
60103
60104 spin_lock(&cii->c_lock);
60105- cii->c_cached_epoch = atomic_read(&permission_epoch);
60106+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60107 if (!uid_eq(cii->c_uid, current_fsuid())) {
60108 cii->c_uid = current_fsuid();
60109 cii->c_cached_perm = mask;
60110@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60111 {
60112 struct coda_inode_info *cii = ITOC(inode);
60113 spin_lock(&cii->c_lock);
60114- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60115+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60116 spin_unlock(&cii->c_lock);
60117 }
60118
60119 /* remove all acl caches */
60120 void coda_cache_clear_all(struct super_block *sb)
60121 {
60122- atomic_inc(&permission_epoch);
60123+ atomic_inc_unchecked(&permission_epoch);
60124 }
60125
60126
60127@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60128 spin_lock(&cii->c_lock);
60129 hit = (mask & cii->c_cached_perm) == mask &&
60130 uid_eq(cii->c_uid, current_fsuid()) &&
60131- cii->c_cached_epoch == atomic_read(&permission_epoch);
60132+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60133 spin_unlock(&cii->c_lock);
60134
60135 return hit;
60136diff --git a/fs/compat.c b/fs/compat.c
60137index 6fd272d..dd34ba2 100644
60138--- a/fs/compat.c
60139+++ b/fs/compat.c
60140@@ -54,7 +54,7 @@
60141 #include <asm/ioctls.h>
60142 #include "internal.h"
60143
60144-int compat_log = 1;
60145+int compat_log = 0;
60146
60147 int compat_printk(const char *fmt, ...)
60148 {
60149@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60150
60151 set_fs(KERNEL_DS);
60152 /* The __user pointer cast is valid because of the set_fs() */
60153- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60154+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60155 set_fs(oldfs);
60156 /* truncating is ok because it's a user address */
60157 if (!ret)
60158@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60159 goto out;
60160
60161 ret = -EINVAL;
60162- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60163+ if (nr_segs > UIO_MAXIOV)
60164 goto out;
60165 if (nr_segs > fast_segs) {
60166 ret = -ENOMEM;
60167@@ -844,6 +844,7 @@ struct compat_old_linux_dirent {
60168 struct compat_readdir_callback {
60169 struct dir_context ctx;
60170 struct compat_old_linux_dirent __user *dirent;
60171+ struct file * file;
60172 int result;
60173 };
60174
60175@@ -863,6 +864,10 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
60176 buf->result = -EOVERFLOW;
60177 return -EOVERFLOW;
60178 }
60179+
60180+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60181+ return 0;
60182+
60183 buf->result++;
60184 dirent = buf->dirent;
60185 if (!access_ok(VERIFY_WRITE, dirent,
60186@@ -894,6 +899,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60187 if (!f.file)
60188 return -EBADF;
60189
60190+ buf.file = f.file;
60191 error = iterate_dir(f.file, &buf.ctx);
60192 if (buf.result)
60193 error = buf.result;
60194@@ -913,6 +919,7 @@ struct compat_getdents_callback {
60195 struct dir_context ctx;
60196 struct compat_linux_dirent __user *current_dir;
60197 struct compat_linux_dirent __user *previous;
60198+ struct file * file;
60199 int count;
60200 int error;
60201 };
60202@@ -935,6 +942,10 @@ static int compat_filldir(struct dir_context *ctx, const char *name, int namlen,
60203 buf->error = -EOVERFLOW;
60204 return -EOVERFLOW;
60205 }
60206+
60207+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60208+ return 0;
60209+
60210 dirent = buf->previous;
60211 if (dirent) {
60212 if (__put_user(offset, &dirent->d_off))
60213@@ -980,6 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60214 if (!f.file)
60215 return -EBADF;
60216
60217+ buf.file = f.file;
60218 error = iterate_dir(f.file, &buf.ctx);
60219 if (error >= 0)
60220 error = buf.error;
60221@@ -1000,6 +1012,7 @@ struct compat_getdents_callback64 {
60222 struct dir_context ctx;
60223 struct linux_dirent64 __user *current_dir;
60224 struct linux_dirent64 __user *previous;
60225+ struct file * file;
60226 int count;
60227 int error;
60228 };
60229@@ -1018,6 +1031,10 @@ static int compat_filldir64(struct dir_context *ctx, const char *name,
60230 buf->error = -EINVAL; /* only used if we fail.. */
60231 if (reclen > buf->count)
60232 return -EINVAL;
60233+
60234+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60235+ return 0;
60236+
60237 dirent = buf->previous;
60238
60239 if (dirent) {
60240@@ -1067,6 +1084,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60241 if (!f.file)
60242 return -EBADF;
60243
60244+ buf.file = f.file;
60245 error = iterate_dir(f.file, &buf.ctx);
60246 if (error >= 0)
60247 error = buf.error;
60248diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60249index 4d24d17..4f8c09e 100644
60250--- a/fs/compat_binfmt_elf.c
60251+++ b/fs/compat_binfmt_elf.c
60252@@ -30,11 +30,13 @@
60253 #undef elf_phdr
60254 #undef elf_shdr
60255 #undef elf_note
60256+#undef elf_dyn
60257 #undef elf_addr_t
60258 #define elfhdr elf32_hdr
60259 #define elf_phdr elf32_phdr
60260 #define elf_shdr elf32_shdr
60261 #define elf_note elf32_note
60262+#define elf_dyn Elf32_Dyn
60263 #define elf_addr_t Elf32_Addr
60264
60265 /*
60266diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60267index afec645..9c65620 100644
60268--- a/fs/compat_ioctl.c
60269+++ b/fs/compat_ioctl.c
60270@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60271 return -EFAULT;
60272 if (__get_user(udata, &ss32->iomem_base))
60273 return -EFAULT;
60274- ss.iomem_base = compat_ptr(udata);
60275+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60276 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60277 __get_user(ss.port_high, &ss32->port_high))
60278 return -EFAULT;
60279@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60280 for (i = 0; i < nmsgs; i++) {
60281 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60282 return -EFAULT;
60283- if (get_user(datap, &umsgs[i].buf) ||
60284- put_user(compat_ptr(datap), &tmsgs[i].buf))
60285+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60286+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60287 return -EFAULT;
60288 }
60289 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60290@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60291 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60292 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60293 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60294- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60295+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60296 return -EFAULT;
60297
60298 return ioctl_preallocate(file, p);
60299@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60300 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60301 {
60302 unsigned int a, b;
60303- a = *(unsigned int *)p;
60304- b = *(unsigned int *)q;
60305+ a = *(const unsigned int *)p;
60306+ b = *(const unsigned int *)q;
60307 if (a > b)
60308 return 1;
60309 if (a < b)
60310diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60311index c9c298b..544d100 100644
60312--- a/fs/configfs/dir.c
60313+++ b/fs/configfs/dir.c
60314@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60315 }
60316 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60317 struct configfs_dirent *next;
60318- const char *name;
60319+ const unsigned char * name;
60320+ char d_name[sizeof(next->s_dentry->d_iname)];
60321 int len;
60322 struct inode *inode = NULL;
60323
60324@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60325 continue;
60326
60327 name = configfs_get_name(next);
60328- len = strlen(name);
60329+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60330+ len = next->s_dentry->d_name.len;
60331+ memcpy(d_name, name, len);
60332+ name = d_name;
60333+ } else
60334+ len = strlen(name);
60335
60336 /*
60337 * We'll have a dentry and an inode for
60338diff --git a/fs/coredump.c b/fs/coredump.c
60339index b5c86ff..0dac262 100644
60340--- a/fs/coredump.c
60341+++ b/fs/coredump.c
60342@@ -450,8 +450,8 @@ static void wait_for_dump_helpers(struct file *file)
60343 struct pipe_inode_info *pipe = file->private_data;
60344
60345 pipe_lock(pipe);
60346- pipe->readers++;
60347- pipe->writers--;
60348+ atomic_inc(&pipe->readers);
60349+ atomic_dec(&pipe->writers);
60350 wake_up_interruptible_sync(&pipe->wait);
60351 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60352 pipe_unlock(pipe);
60353@@ -460,11 +460,11 @@ static void wait_for_dump_helpers(struct file *file)
60354 * We actually want wait_event_freezable() but then we need
60355 * to clear TIF_SIGPENDING and improve dump_interrupted().
60356 */
60357- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60358+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60359
60360 pipe_lock(pipe);
60361- pipe->readers--;
60362- pipe->writers++;
60363+ atomic_dec(&pipe->readers);
60364+ atomic_inc(&pipe->writers);
60365 pipe_unlock(pipe);
60366 }
60367
60368@@ -511,7 +511,9 @@ void do_coredump(const siginfo_t *siginfo)
60369 struct files_struct *displaced;
60370 bool need_nonrelative = false;
60371 bool core_dumped = false;
60372- static atomic_t core_dump_count = ATOMIC_INIT(0);
60373+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60374+ long signr = siginfo->si_signo;
60375+ int dumpable;
60376 struct coredump_params cprm = {
60377 .siginfo = siginfo,
60378 .regs = signal_pt_regs(),
60379@@ -524,12 +526,17 @@ void do_coredump(const siginfo_t *siginfo)
60380 .mm_flags = mm->flags,
60381 };
60382
60383- audit_core_dumps(siginfo->si_signo);
60384+ audit_core_dumps(signr);
60385+
60386+ dumpable = __get_dumpable(cprm.mm_flags);
60387+
60388+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60389+ gr_handle_brute_attach(dumpable);
60390
60391 binfmt = mm->binfmt;
60392 if (!binfmt || !binfmt->core_dump)
60393 goto fail;
60394- if (!__get_dumpable(cprm.mm_flags))
60395+ if (!dumpable)
60396 goto fail;
60397
60398 cred = prepare_creds();
60399@@ -548,7 +555,7 @@ void do_coredump(const siginfo_t *siginfo)
60400 need_nonrelative = true;
60401 }
60402
60403- retval = coredump_wait(siginfo->si_signo, &core_state);
60404+ retval = coredump_wait(signr, &core_state);
60405 if (retval < 0)
60406 goto fail_creds;
60407
60408@@ -591,7 +598,7 @@ void do_coredump(const siginfo_t *siginfo)
60409 }
60410 cprm.limit = RLIM_INFINITY;
60411
60412- dump_count = atomic_inc_return(&core_dump_count);
60413+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60414 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60415 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60416 task_tgid_vnr(current), current->comm);
60417@@ -623,6 +630,8 @@ void do_coredump(const siginfo_t *siginfo)
60418 } else {
60419 struct inode *inode;
60420
60421+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60422+
60423 if (cprm.limit < binfmt->min_coredump)
60424 goto fail_unlock;
60425
60426@@ -681,7 +690,7 @@ close_fail:
60427 filp_close(cprm.file, NULL);
60428 fail_dropcount:
60429 if (ispipe)
60430- atomic_dec(&core_dump_count);
60431+ atomic_dec_unchecked(&core_dump_count);
60432 fail_unlock:
60433 kfree(cn.corename);
60434 coredump_finish(mm, core_dumped);
60435@@ -702,6 +711,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60436 struct file *file = cprm->file;
60437 loff_t pos = file->f_pos;
60438 ssize_t n;
60439+
60440+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60441 if (cprm->written + nr > cprm->limit)
60442 return 0;
60443 while (nr) {
60444diff --git a/fs/dcache.c b/fs/dcache.c
60445index e368d4f..b40ba59 100644
60446--- a/fs/dcache.c
60447+++ b/fs/dcache.c
60448@@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry)
60449 * dentry_iput drops the locks, at which point nobody (except
60450 * transient RCU lookups) can reach this dentry.
60451 */
60452- BUG_ON((int)dentry->d_lockref.count > 0);
60453+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60454 this_cpu_dec(nr_dentry);
60455 if (dentry->d_op && dentry->d_op->d_release)
60456 dentry->d_op->d_release(dentry);
60457@@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60458 struct dentry *parent = dentry->d_parent;
60459 if (IS_ROOT(dentry))
60460 return NULL;
60461- if (unlikely((int)dentry->d_lockref.count < 0))
60462+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60463 return NULL;
60464 if (likely(spin_trylock(&parent->d_lock)))
60465 return parent;
60466@@ -638,7 +638,7 @@ repeat:
60467 dentry->d_flags |= DCACHE_REFERENCED;
60468 dentry_lru_add(dentry);
60469
60470- dentry->d_lockref.count--;
60471+ __lockref_dec(&dentry->d_lockref);
60472 spin_unlock(&dentry->d_lock);
60473 return;
60474
60475@@ -653,7 +653,7 @@ EXPORT_SYMBOL(dput);
60476 /* This must be called with d_lock held */
60477 static inline void __dget_dlock(struct dentry *dentry)
60478 {
60479- dentry->d_lockref.count++;
60480+ __lockref_inc(&dentry->d_lockref);
60481 }
60482
60483 static inline void __dget(struct dentry *dentry)
60484@@ -694,8 +694,8 @@ repeat:
60485 goto repeat;
60486 }
60487 rcu_read_unlock();
60488- BUG_ON(!ret->d_lockref.count);
60489- ret->d_lockref.count++;
60490+ BUG_ON(!__lockref_read(&ret->d_lockref));
60491+ __lockref_inc(&ret->d_lockref);
60492 spin_unlock(&ret->d_lock);
60493 return ret;
60494 }
60495@@ -773,9 +773,9 @@ restart:
60496 spin_lock(&inode->i_lock);
60497 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
60498 spin_lock(&dentry->d_lock);
60499- if (!dentry->d_lockref.count) {
60500+ if (!__lockref_read(&dentry->d_lockref)) {
60501 struct dentry *parent = lock_parent(dentry);
60502- if (likely(!dentry->d_lockref.count)) {
60503+ if (likely(!__lockref_read(&dentry->d_lockref))) {
60504 __dentry_kill(dentry);
60505 dput(parent);
60506 goto restart;
60507@@ -810,7 +810,7 @@ static void shrink_dentry_list(struct list_head *list)
60508 * We found an inuse dentry which was not removed from
60509 * the LRU because of laziness during lookup. Do not free it.
60510 */
60511- if ((int)dentry->d_lockref.count > 0) {
60512+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60513 spin_unlock(&dentry->d_lock);
60514 if (parent)
60515 spin_unlock(&parent->d_lock);
60516@@ -848,8 +848,8 @@ static void shrink_dentry_list(struct list_head *list)
60517 dentry = parent;
60518 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60519 parent = lock_parent(dentry);
60520- if (dentry->d_lockref.count != 1) {
60521- dentry->d_lockref.count--;
60522+ if (__lockref_read(&dentry->d_lockref) != 1) {
60523+ __lockref_inc(&dentry->d_lockref);
60524 spin_unlock(&dentry->d_lock);
60525 if (parent)
60526 spin_unlock(&parent->d_lock);
60527@@ -889,7 +889,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60528 * counts, just remove them from the LRU. Otherwise give them
60529 * another pass through the LRU.
60530 */
60531- if (dentry->d_lockref.count) {
60532+ if (__lockref_read(&dentry->d_lockref) > 0) {
60533 d_lru_isolate(dentry);
60534 spin_unlock(&dentry->d_lock);
60535 return LRU_REMOVED;
60536@@ -1225,7 +1225,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60537 } else {
60538 if (dentry->d_flags & DCACHE_LRU_LIST)
60539 d_lru_del(dentry);
60540- if (!dentry->d_lockref.count) {
60541+ if (!__lockref_read(&dentry->d_lockref)) {
60542 d_shrink_add(dentry, &data->dispose);
60543 data->found++;
60544 }
60545@@ -1273,7 +1273,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60546 return D_WALK_CONTINUE;
60547
60548 /* root with refcount 1 is fine */
60549- if (dentry == _data && dentry->d_lockref.count == 1)
60550+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60551 return D_WALK_CONTINUE;
60552
60553 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60554@@ -1282,7 +1282,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60555 dentry->d_inode ?
60556 dentry->d_inode->i_ino : 0UL,
60557 dentry,
60558- dentry->d_lockref.count,
60559+ __lockref_read(&dentry->d_lockref),
60560 dentry->d_sb->s_type->name,
60561 dentry->d_sb->s_id);
60562 WARN_ON(1);
60563@@ -1423,7 +1423,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60564 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60565 if (name->len > DNAME_INLINE_LEN-1) {
60566 size_t size = offsetof(struct external_name, name[1]);
60567- struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
60568+ struct external_name *p = kmalloc(round_up(size + name->len, sizeof(unsigned long)), GFP_KERNEL);
60569 if (!p) {
60570 kmem_cache_free(dentry_cache, dentry);
60571 return NULL;
60572@@ -1443,7 +1443,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60573 smp_wmb();
60574 dentry->d_name.name = dname;
60575
60576- dentry->d_lockref.count = 1;
60577+ __lockref_set(&dentry->d_lockref, 1);
60578 dentry->d_flags = 0;
60579 spin_lock_init(&dentry->d_lock);
60580 seqcount_init(&dentry->d_seq);
60581@@ -1452,6 +1452,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60582 dentry->d_sb = sb;
60583 dentry->d_op = NULL;
60584 dentry->d_fsdata = NULL;
60585+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
60586+ atomic_set(&dentry->chroot_refcnt, 0);
60587+#endif
60588 INIT_HLIST_BL_NODE(&dentry->d_hash);
60589 INIT_LIST_HEAD(&dentry->d_lru);
60590 INIT_LIST_HEAD(&dentry->d_subdirs);
60591@@ -2151,7 +2154,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60592 goto next;
60593 }
60594
60595- dentry->d_lockref.count++;
60596+ __lockref_inc(&dentry->d_lockref);
60597 found = dentry;
60598 spin_unlock(&dentry->d_lock);
60599 break;
60600@@ -2250,7 +2253,7 @@ again:
60601 spin_lock(&dentry->d_lock);
60602 inode = dentry->d_inode;
60603 isdir = S_ISDIR(inode->i_mode);
60604- if (dentry->d_lockref.count == 1) {
60605+ if (__lockref_read(&dentry->d_lockref) == 1) {
60606 if (!spin_trylock(&inode->i_lock)) {
60607 spin_unlock(&dentry->d_lock);
60608 cpu_relax();
60609@@ -3203,7 +3206,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60610
60611 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60612 dentry->d_flags |= DCACHE_GENOCIDE;
60613- dentry->d_lockref.count--;
60614+ __lockref_dec(&dentry->d_lockref);
60615 }
60616 }
60617 return D_WALK_CONTINUE;
60618@@ -3319,7 +3322,8 @@ void __init vfs_caches_init(unsigned long mempages)
60619 mempages -= reserve;
60620
60621 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60622- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60623+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60624+ SLAB_NO_SANITIZE, NULL);
60625
60626 dcache_init();
60627 inode_init();
60628diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60629index 6f0ce53..92bba36 100644
60630--- a/fs/debugfs/inode.c
60631+++ b/fs/debugfs/inode.c
60632@@ -423,10 +423,20 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60633 * If debugfs is not enabled in the kernel, the value -%ENODEV will be
60634 * returned.
60635 */
60636+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60637+extern int grsec_enable_sysfs_restrict;
60638+#endif
60639+
60640 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60641 {
60642- return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60643- parent, NULL, NULL);
60644+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
60645+
60646+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60647+ if (grsec_enable_sysfs_restrict)
60648+ mode = S_IFDIR | S_IRWXU;
60649+#endif
60650+
60651+ return __create_file(name, mode, parent, NULL, NULL);
60652 }
60653 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60654
60655diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60656index 1686dc2..9611c50 100644
60657--- a/fs/ecryptfs/inode.c
60658+++ b/fs/ecryptfs/inode.c
60659@@ -664,7 +664,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60660 old_fs = get_fs();
60661 set_fs(get_ds());
60662 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60663- (char __user *)lower_buf,
60664+ (char __force_user *)lower_buf,
60665 PATH_MAX);
60666 set_fs(old_fs);
60667 if (rc < 0)
60668diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60669index e4141f2..d8263e8 100644
60670--- a/fs/ecryptfs/miscdev.c
60671+++ b/fs/ecryptfs/miscdev.c
60672@@ -304,7 +304,7 @@ check_list:
60673 goto out_unlock_msg_ctx;
60674 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60675 if (msg_ctx->msg) {
60676- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60677+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60678 goto out_unlock_msg_ctx;
60679 i += packet_length_size;
60680 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60681diff --git a/fs/exec.c b/fs/exec.c
60682index ad8798e..5f872c9 100644
60683--- a/fs/exec.c
60684+++ b/fs/exec.c
60685@@ -56,8 +56,20 @@
60686 #include <linux/pipe_fs_i.h>
60687 #include <linux/oom.h>
60688 #include <linux/compat.h>
60689+#include <linux/random.h>
60690+#include <linux/seq_file.h>
60691+#include <linux/coredump.h>
60692+#include <linux/mman.h>
60693+
60694+#ifdef CONFIG_PAX_REFCOUNT
60695+#include <linux/kallsyms.h>
60696+#include <linux/kdebug.h>
60697+#endif
60698+
60699+#include <trace/events/fs.h>
60700
60701 #include <asm/uaccess.h>
60702+#include <asm/sections.h>
60703 #include <asm/mmu_context.h>
60704 #include <asm/tlb.h>
60705
60706@@ -66,19 +78,34 @@
60707
60708 #include <trace/events/sched.h>
60709
60710+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60711+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
60712+{
60713+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
60714+}
60715+#endif
60716+
60717+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
60718+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60719+EXPORT_SYMBOL(pax_set_initial_flags_func);
60720+#endif
60721+
60722 int suid_dumpable = 0;
60723
60724 static LIST_HEAD(formats);
60725 static DEFINE_RWLOCK(binfmt_lock);
60726
60727+extern int gr_process_kernel_exec_ban(void);
60728+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
60729+
60730 void __register_binfmt(struct linux_binfmt * fmt, int insert)
60731 {
60732 BUG_ON(!fmt);
60733 if (WARN_ON(!fmt->load_binary))
60734 return;
60735 write_lock(&binfmt_lock);
60736- insert ? list_add(&fmt->lh, &formats) :
60737- list_add_tail(&fmt->lh, &formats);
60738+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
60739+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
60740 write_unlock(&binfmt_lock);
60741 }
60742
60743@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
60744 void unregister_binfmt(struct linux_binfmt * fmt)
60745 {
60746 write_lock(&binfmt_lock);
60747- list_del(&fmt->lh);
60748+ pax_list_del((struct list_head *)&fmt->lh);
60749 write_unlock(&binfmt_lock);
60750 }
60751
60752@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60753 int write)
60754 {
60755 struct page *page;
60756- int ret;
60757
60758-#ifdef CONFIG_STACK_GROWSUP
60759- if (write) {
60760- ret = expand_downwards(bprm->vma, pos);
60761- if (ret < 0)
60762- return NULL;
60763- }
60764-#endif
60765- ret = get_user_pages(current, bprm->mm, pos,
60766- 1, write, 1, &page, NULL);
60767- if (ret <= 0)
60768+ if (0 > expand_downwards(bprm->vma, pos))
60769+ return NULL;
60770+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
60771 return NULL;
60772
60773 if (write) {
60774@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
60775 if (size <= ARG_MAX)
60776 return page;
60777
60778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60779+ // only allow 512KB for argv+env on suid/sgid binaries
60780+ // to prevent easy ASLR exhaustion
60781+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
60782+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
60783+ (size > (512 * 1024))) {
60784+ put_page(page);
60785+ return NULL;
60786+ }
60787+#endif
60788+
60789 /*
60790 * Limit to 1/4-th the stack size for the argv+env strings.
60791 * This ensures that:
60792@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60793 vma->vm_end = STACK_TOP_MAX;
60794 vma->vm_start = vma->vm_end - PAGE_SIZE;
60795 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
60796+
60797+#ifdef CONFIG_PAX_SEGMEXEC
60798+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
60799+#endif
60800+
60801 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
60802 INIT_LIST_HEAD(&vma->anon_vma_chain);
60803
60804@@ -280,6 +315,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
60805 arch_bprm_mm_init(mm, vma);
60806 up_write(&mm->mmap_sem);
60807 bprm->p = vma->vm_end - sizeof(void *);
60808+
60809+#ifdef CONFIG_PAX_RANDUSTACK
60810+ if (randomize_va_space)
60811+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
60812+#endif
60813+
60814 return 0;
60815 err:
60816 up_write(&mm->mmap_sem);
60817@@ -396,7 +437,7 @@ struct user_arg_ptr {
60818 } ptr;
60819 };
60820
60821-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60822+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60823 {
60824 const char __user *native;
60825
60826@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
60827 compat_uptr_t compat;
60828
60829 if (get_user(compat, argv.ptr.compat + nr))
60830- return ERR_PTR(-EFAULT);
60831+ return (const char __force_user *)ERR_PTR(-EFAULT);
60832
60833 return compat_ptr(compat);
60834 }
60835 #endif
60836
60837 if (get_user(native, argv.ptr.native + nr))
60838- return ERR_PTR(-EFAULT);
60839+ return (const char __force_user *)ERR_PTR(-EFAULT);
60840
60841 return native;
60842 }
60843@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
60844 if (!p)
60845 break;
60846
60847- if (IS_ERR(p))
60848+ if (IS_ERR((const char __force_kernel *)p))
60849 return -EFAULT;
60850
60851 if (i >= max)
60852@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
60853
60854 ret = -EFAULT;
60855 str = get_user_arg_ptr(argv, argc);
60856- if (IS_ERR(str))
60857+ if (IS_ERR((const char __force_kernel *)str))
60858 goto out;
60859
60860 len = strnlen_user(str, MAX_ARG_STRLEN);
60861@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
60862 int r;
60863 mm_segment_t oldfs = get_fs();
60864 struct user_arg_ptr argv = {
60865- .ptr.native = (const char __user *const __user *)__argv,
60866+ .ptr.native = (const char __user * const __force_user *)__argv,
60867 };
60868
60869 set_fs(KERNEL_DS);
60870@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60871 unsigned long new_end = old_end - shift;
60872 struct mmu_gather tlb;
60873
60874- BUG_ON(new_start > new_end);
60875+ if (new_start >= new_end || new_start < mmap_min_addr)
60876+ return -ENOMEM;
60877
60878 /*
60879 * ensure there are no vmas between where we want to go
60880@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
60881 if (vma != find_vma(mm, new_start))
60882 return -EFAULT;
60883
60884+#ifdef CONFIG_PAX_SEGMEXEC
60885+ BUG_ON(pax_find_mirror_vma(vma));
60886+#endif
60887+
60888 /*
60889 * cover the whole range: [new_start, old_end)
60890 */
60891@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60892 stack_top = arch_align_stack(stack_top);
60893 stack_top = PAGE_ALIGN(stack_top);
60894
60895- if (unlikely(stack_top < mmap_min_addr) ||
60896- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
60897- return -ENOMEM;
60898-
60899 stack_shift = vma->vm_end - stack_top;
60900
60901 bprm->p -= stack_shift;
60902@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
60903 bprm->exec -= stack_shift;
60904
60905 down_write(&mm->mmap_sem);
60906+
60907+ /* Move stack pages down in memory. */
60908+ if (stack_shift) {
60909+ ret = shift_arg_pages(vma, stack_shift);
60910+ if (ret)
60911+ goto out_unlock;
60912+ }
60913+
60914 vm_flags = VM_STACK_FLAGS;
60915
60916+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
60917+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
60918+ vm_flags &= ~VM_EXEC;
60919+
60920+#ifdef CONFIG_PAX_MPROTECT
60921+ if (mm->pax_flags & MF_PAX_MPROTECT)
60922+ vm_flags &= ~VM_MAYEXEC;
60923+#endif
60924+
60925+ }
60926+#endif
60927+
60928 /*
60929 * Adjust stack execute permissions; explicitly enable for
60930 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
60931@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
60932 goto out_unlock;
60933 BUG_ON(prev != vma);
60934
60935- /* Move stack pages down in memory. */
60936- if (stack_shift) {
60937- ret = shift_arg_pages(vma, stack_shift);
60938- if (ret)
60939- goto out_unlock;
60940- }
60941-
60942 /* mprotect_fixup is overkill to remove the temporary stack flags */
60943 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
60944
60945@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
60946 #endif
60947 current->mm->start_stack = bprm->p;
60948 ret = expand_stack(vma, stack_base);
60949+
60950+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
60951+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
60952+ unsigned long size;
60953+ vm_flags_t vm_flags;
60954+
60955+ size = STACK_TOP - vma->vm_end;
60956+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
60957+
60958+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
60959+
60960+#ifdef CONFIG_X86
60961+ if (!ret) {
60962+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
60963+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
60964+ }
60965+#endif
60966+
60967+ }
60968+#endif
60969+
60970 if (ret)
60971 ret = -EFAULT;
60972
60973@@ -781,8 +857,10 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
60974 if (err)
60975 goto exit;
60976
60977- if (name->name[0] != '\0')
60978+ if (name->name[0] != '\0') {
60979 fsnotify_open(file);
60980+ trace_open_exec(name->name);
60981+ }
60982
60983 out:
60984 return file;
60985@@ -809,7 +887,7 @@ int kernel_read(struct file *file, loff_t offset,
60986 old_fs = get_fs();
60987 set_fs(get_ds());
60988 /* The cast to a user pointer is valid due to the set_fs() */
60989- result = vfs_read(file, (void __user *)addr, count, &pos);
60990+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
60991 set_fs(old_fs);
60992 return result;
60993 }
60994@@ -854,6 +932,7 @@ static int exec_mmap(struct mm_struct *mm)
60995 tsk->mm = mm;
60996 tsk->active_mm = mm;
60997 activate_mm(active_mm, mm);
60998+ populate_stack();
60999 tsk->mm->vmacache_seqnum = 0;
61000 vmacache_flush(tsk);
61001 task_unlock(tsk);
61002@@ -1252,7 +1331,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61003 }
61004 rcu_read_unlock();
61005
61006- if (p->fs->users > n_fs)
61007+ if (atomic_read(&p->fs->users) > n_fs)
61008 bprm->unsafe |= LSM_UNSAFE_SHARE;
61009 else
61010 p->fs->in_exec = 1;
61011@@ -1433,6 +1512,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61012 return ret;
61013 }
61014
61015+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61016+static DEFINE_PER_CPU(u64, exec_counter);
61017+static int __init init_exec_counters(void)
61018+{
61019+ unsigned int cpu;
61020+
61021+ for_each_possible_cpu(cpu) {
61022+ per_cpu(exec_counter, cpu) = (u64)cpu;
61023+ }
61024+
61025+ return 0;
61026+}
61027+early_initcall(init_exec_counters);
61028+static inline void increment_exec_counter(void)
61029+{
61030+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61031+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61032+}
61033+#else
61034+static inline void increment_exec_counter(void) {}
61035+#endif
61036+
61037+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61038+ struct user_arg_ptr argv);
61039+
61040 /*
61041 * sys_execve() executes a new program.
61042 */
61043@@ -1441,6 +1545,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61044 struct user_arg_ptr envp,
61045 int flags)
61046 {
61047+#ifdef CONFIG_GRKERNSEC
61048+ struct file *old_exec_file;
61049+ struct acl_subject_label *old_acl;
61050+ struct rlimit old_rlim[RLIM_NLIMITS];
61051+#endif
61052 char *pathbuf = NULL;
61053 struct linux_binprm *bprm;
61054 struct file *file;
61055@@ -1450,6 +1559,8 @@ static int do_execveat_common(int fd, struct filename *filename,
61056 if (IS_ERR(filename))
61057 return PTR_ERR(filename);
61058
61059+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61060+
61061 /*
61062 * We move the actual failure in case of RLIMIT_NPROC excess from
61063 * set*uid() to execve() because too many poorly written programs
61064@@ -1487,6 +1598,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61065 if (IS_ERR(file))
61066 goto out_unmark;
61067
61068+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61069+ retval = -EPERM;
61070+ goto out_unmark;
61071+ }
61072+
61073 sched_exec();
61074
61075 bprm->file = file;
61076@@ -1513,6 +1629,11 @@ static int do_execveat_common(int fd, struct filename *filename,
61077 }
61078 bprm->interp = bprm->filename;
61079
61080+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61081+ retval = -EACCES;
61082+ goto out_unmark;
61083+ }
61084+
61085 retval = bprm_mm_init(bprm);
61086 if (retval)
61087 goto out_unmark;
61088@@ -1529,24 +1650,70 @@ static int do_execveat_common(int fd, struct filename *filename,
61089 if (retval < 0)
61090 goto out;
61091
61092+#ifdef CONFIG_GRKERNSEC
61093+ old_acl = current->acl;
61094+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61095+ old_exec_file = current->exec_file;
61096+ get_file(file);
61097+ current->exec_file = file;
61098+#endif
61099+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61100+ /* limit suid stack to 8MB
61101+ * we saved the old limits above and will restore them if this exec fails
61102+ */
61103+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61104+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61105+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61106+#endif
61107+
61108+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61109+ retval = -EPERM;
61110+ goto out_fail;
61111+ }
61112+
61113+ if (!gr_tpe_allow(file)) {
61114+ retval = -EACCES;
61115+ goto out_fail;
61116+ }
61117+
61118+ if (gr_check_crash_exec(file)) {
61119+ retval = -EACCES;
61120+ goto out_fail;
61121+ }
61122+
61123+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61124+ bprm->unsafe);
61125+ if (retval < 0)
61126+ goto out_fail;
61127+
61128 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61129 if (retval < 0)
61130- goto out;
61131+ goto out_fail;
61132
61133 bprm->exec = bprm->p;
61134 retval = copy_strings(bprm->envc, envp, bprm);
61135 if (retval < 0)
61136- goto out;
61137+ goto out_fail;
61138
61139 retval = copy_strings(bprm->argc, argv, bprm);
61140 if (retval < 0)
61141- goto out;
61142+ goto out_fail;
61143+
61144+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61145+
61146+ gr_handle_exec_args(bprm, argv);
61147
61148 retval = exec_binprm(bprm);
61149 if (retval < 0)
61150- goto out;
61151+ goto out_fail;
61152+#ifdef CONFIG_GRKERNSEC
61153+ if (old_exec_file)
61154+ fput(old_exec_file);
61155+#endif
61156
61157 /* execve succeeded */
61158+
61159+ increment_exec_counter();
61160 current->fs->in_exec = 0;
61161 current->in_execve = 0;
61162 acct_update_integrals(current);
61163@@ -1558,6 +1725,14 @@ static int do_execveat_common(int fd, struct filename *filename,
61164 put_files_struct(displaced);
61165 return retval;
61166
61167+out_fail:
61168+#ifdef CONFIG_GRKERNSEC
61169+ current->acl = old_acl;
61170+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61171+ fput(current->exec_file);
61172+ current->exec_file = old_exec_file;
61173+#endif
61174+
61175 out:
61176 if (bprm->mm) {
61177 acct_arg_size(bprm, 0);
61178@@ -1704,3 +1879,312 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
61179 argv, envp, flags);
61180 }
61181 #endif
61182+
61183+int pax_check_flags(unsigned long *flags)
61184+{
61185+ int retval = 0;
61186+
61187+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61188+ if (*flags & MF_PAX_SEGMEXEC)
61189+ {
61190+ *flags &= ~MF_PAX_SEGMEXEC;
61191+ retval = -EINVAL;
61192+ }
61193+#endif
61194+
61195+ if ((*flags & MF_PAX_PAGEEXEC)
61196+
61197+#ifdef CONFIG_PAX_PAGEEXEC
61198+ && (*flags & MF_PAX_SEGMEXEC)
61199+#endif
61200+
61201+ )
61202+ {
61203+ *flags &= ~MF_PAX_PAGEEXEC;
61204+ retval = -EINVAL;
61205+ }
61206+
61207+ if ((*flags & MF_PAX_MPROTECT)
61208+
61209+#ifdef CONFIG_PAX_MPROTECT
61210+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61211+#endif
61212+
61213+ )
61214+ {
61215+ *flags &= ~MF_PAX_MPROTECT;
61216+ retval = -EINVAL;
61217+ }
61218+
61219+ if ((*flags & MF_PAX_EMUTRAMP)
61220+
61221+#ifdef CONFIG_PAX_EMUTRAMP
61222+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61223+#endif
61224+
61225+ )
61226+ {
61227+ *flags &= ~MF_PAX_EMUTRAMP;
61228+ retval = -EINVAL;
61229+ }
61230+
61231+ return retval;
61232+}
61233+
61234+EXPORT_SYMBOL(pax_check_flags);
61235+
61236+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61237+char *pax_get_path(const struct path *path, char *buf, int buflen)
61238+{
61239+ char *pathname = d_path(path, buf, buflen);
61240+
61241+ if (IS_ERR(pathname))
61242+ goto toolong;
61243+
61244+ pathname = mangle_path(buf, pathname, "\t\n\\");
61245+ if (!pathname)
61246+ goto toolong;
61247+
61248+ *pathname = 0;
61249+ return buf;
61250+
61251+toolong:
61252+ return "<path too long>";
61253+}
61254+EXPORT_SYMBOL(pax_get_path);
61255+
61256+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61257+{
61258+ struct task_struct *tsk = current;
61259+ struct mm_struct *mm = current->mm;
61260+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61261+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61262+ char *path_exec = NULL;
61263+ char *path_fault = NULL;
61264+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61265+ siginfo_t info = { };
61266+
61267+ if (buffer_exec && buffer_fault) {
61268+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61269+
61270+ down_read(&mm->mmap_sem);
61271+ vma = mm->mmap;
61272+ while (vma && (!vma_exec || !vma_fault)) {
61273+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61274+ vma_exec = vma;
61275+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61276+ vma_fault = vma;
61277+ vma = vma->vm_next;
61278+ }
61279+ if (vma_exec)
61280+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61281+ if (vma_fault) {
61282+ start = vma_fault->vm_start;
61283+ end = vma_fault->vm_end;
61284+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61285+ if (vma_fault->vm_file)
61286+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61287+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61288+ path_fault = "<heap>";
61289+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61290+ path_fault = "<stack>";
61291+ else
61292+ path_fault = "<anonymous mapping>";
61293+ }
61294+ up_read(&mm->mmap_sem);
61295+ }
61296+ if (tsk->signal->curr_ip)
61297+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61298+ else
61299+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61300+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61301+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61302+ free_page((unsigned long)buffer_exec);
61303+ free_page((unsigned long)buffer_fault);
61304+ pax_report_insns(regs, pc, sp);
61305+ info.si_signo = SIGKILL;
61306+ info.si_errno = 0;
61307+ info.si_code = SI_KERNEL;
61308+ info.si_pid = 0;
61309+ info.si_uid = 0;
61310+ do_coredump(&info);
61311+}
61312+#endif
61313+
61314+#ifdef CONFIG_PAX_REFCOUNT
61315+void pax_report_refcount_overflow(struct pt_regs *regs)
61316+{
61317+ if (current->signal->curr_ip)
61318+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61319+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61320+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61321+ else
61322+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61323+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61324+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61325+ preempt_disable();
61326+ show_regs(regs);
61327+ preempt_enable();
61328+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61329+}
61330+#endif
61331+
61332+#ifdef CONFIG_PAX_USERCOPY
61333+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61334+static noinline int check_stack_object(const void *obj, unsigned long len)
61335+{
61336+ const void * const stack = task_stack_page(current);
61337+ const void * const stackend = stack + THREAD_SIZE;
61338+
61339+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61340+ const void *frame = NULL;
61341+ const void *oldframe;
61342+#endif
61343+
61344+ if (obj + len < obj)
61345+ return -1;
61346+
61347+ if (obj + len <= stack || stackend <= obj)
61348+ return 0;
61349+
61350+ if (obj < stack || stackend < obj + len)
61351+ return -1;
61352+
61353+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61354+ oldframe = __builtin_frame_address(1);
61355+ if (oldframe)
61356+ frame = __builtin_frame_address(2);
61357+ /*
61358+ low ----------------------------------------------> high
61359+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61360+ ^----------------^
61361+ allow copies only within here
61362+ */
61363+ while (stack <= frame && frame < stackend) {
61364+ /* if obj + len extends past the last frame, this
61365+ check won't pass and the next frame will be 0,
61366+ causing us to bail out and correctly report
61367+ the copy as invalid
61368+ */
61369+ if (obj + len <= frame)
61370+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61371+ oldframe = frame;
61372+ frame = *(const void * const *)frame;
61373+ }
61374+ return -1;
61375+#else
61376+ return 1;
61377+#endif
61378+}
61379+
61380+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61381+{
61382+ if (current->signal->curr_ip)
61383+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61384+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61385+ else
61386+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61387+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61388+ dump_stack();
61389+ gr_handle_kernel_exploit();
61390+ do_group_exit(SIGKILL);
61391+}
61392+#endif
61393+
61394+#ifdef CONFIG_PAX_USERCOPY
61395+
61396+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61397+{
61398+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61399+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61400+#ifdef CONFIG_MODULES
61401+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61402+#else
61403+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61404+#endif
61405+
61406+#else
61407+ unsigned long textlow = (unsigned long)_stext;
61408+ unsigned long texthigh = (unsigned long)_etext;
61409+
61410+#ifdef CONFIG_X86_64
61411+ /* check against linear mapping as well */
61412+ if (high > (unsigned long)__va(__pa(textlow)) &&
61413+ low < (unsigned long)__va(__pa(texthigh)))
61414+ return true;
61415+#endif
61416+
61417+#endif
61418+
61419+ if (high <= textlow || low >= texthigh)
61420+ return false;
61421+ else
61422+ return true;
61423+}
61424+#endif
61425+
61426+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61427+{
61428+#ifdef CONFIG_PAX_USERCOPY
61429+ const char *type;
61430+#endif
61431+
61432+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_X86_64)
61433+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61434+ unsigned long currentsp = (unsigned long)&stackstart;
61435+ if (unlikely((currentsp < stackstart + 512 ||
61436+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61437+ BUG();
61438+#endif
61439+
61440+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61441+ if (const_size)
61442+ return;
61443+#endif
61444+
61445+#ifdef CONFIG_PAX_USERCOPY
61446+ if (!n)
61447+ return;
61448+
61449+ type = check_heap_object(ptr, n);
61450+ if (!type) {
61451+ int ret = check_stack_object(ptr, n);
61452+ if (ret == 1 || ret == 2)
61453+ return;
61454+ if (ret == 0) {
61455+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61456+ type = "<kernel text>";
61457+ else
61458+ return;
61459+ } else
61460+ type = "<process stack>";
61461+ }
61462+
61463+ pax_report_usercopy(ptr, n, to_user, type);
61464+#endif
61465+
61466+}
61467+EXPORT_SYMBOL(__check_object_size);
61468+
61469+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61470+void pax_track_stack(void)
61471+{
61472+ unsigned long sp = (unsigned long)&sp;
61473+ if (sp < current_thread_info()->lowest_stack &&
61474+ sp >= (unsigned long)task_stack_page(current) + 2 * sizeof(unsigned long))
61475+ current_thread_info()->lowest_stack = sp;
61476+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61477+ BUG();
61478+}
61479+EXPORT_SYMBOL(pax_track_stack);
61480+#endif
61481+
61482+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61483+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61484+{
61485+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61486+ dump_stack();
61487+ do_group_exit(SIGKILL);
61488+}
61489+EXPORT_SYMBOL(report_size_overflow);
61490+#endif
61491diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61492index 9f9992b..8b59411 100644
61493--- a/fs/ext2/balloc.c
61494+++ b/fs/ext2/balloc.c
61495@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61496
61497 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61498 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61499- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61500+ if (free_blocks < root_blocks + 1 &&
61501 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61502 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61503- !in_group_p (sbi->s_resgid))) {
61504+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61505 return 0;
61506 }
61507 return 1;
61508diff --git a/fs/ext2/super.c b/fs/ext2/super.c
61509index ae55fdd..5e64c27 100644
61510--- a/fs/ext2/super.c
61511+++ b/fs/ext2/super.c
61512@@ -268,10 +268,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root)
61513 #ifdef CONFIG_EXT2_FS_XATTR
61514 if (test_opt(sb, XATTR_USER))
61515 seq_puts(seq, ",user_xattr");
61516- if (!test_opt(sb, XATTR_USER) &&
61517- (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
61518+ if (!test_opt(sb, XATTR_USER))
61519 seq_puts(seq, ",nouser_xattr");
61520- }
61521 #endif
61522
61523 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61524@@ -850,8 +848,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
61525 if (def_mount_opts & EXT2_DEFM_UID16)
61526 set_opt(sbi->s_mount_opt, NO_UID32);
61527 #ifdef CONFIG_EXT2_FS_XATTR
61528- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
61529- set_opt(sbi->s_mount_opt, XATTR_USER);
61530+ /* always enable user xattrs */
61531+ set_opt(sbi->s_mount_opt, XATTR_USER);
61532 #endif
61533 #ifdef CONFIG_EXT2_FS_POSIX_ACL
61534 if (def_mount_opts & EXT2_DEFM_ACL)
61535diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61536index 9142614..97484fa 100644
61537--- a/fs/ext2/xattr.c
61538+++ b/fs/ext2/xattr.c
61539@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61540 struct buffer_head *bh = NULL;
61541 struct ext2_xattr_entry *entry;
61542 char *end;
61543- size_t rest = buffer_size;
61544+ size_t rest = buffer_size, total_size = 0;
61545 int error;
61546
61547 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61548@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61549 buffer += size;
61550 }
61551 rest -= size;
61552+ total_size += size;
61553 }
61554 }
61555- error = buffer_size - rest; /* total size */
61556+ error = total_size;
61557
61558 cleanup:
61559 brelse(bh);
61560diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61561index 158b5d4..2432610 100644
61562--- a/fs/ext3/balloc.c
61563+++ b/fs/ext3/balloc.c
61564@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61565
61566 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61567 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61568- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61569+ if (free_blocks < root_blocks + 1 &&
61570 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61571 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61572- !in_group_p (sbi->s_resgid))) {
61573+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61574 return 0;
61575 }
61576 return 1;
61577diff --git a/fs/ext3/super.c b/fs/ext3/super.c
61578index 9b4e7d7..048d025 100644
61579--- a/fs/ext3/super.c
61580+++ b/fs/ext3/super.c
61581@@ -653,10 +653,8 @@ static int ext3_show_options(struct seq_file *seq, struct dentry *root)
61582 #ifdef CONFIG_EXT3_FS_XATTR
61583 if (test_opt(sb, XATTR_USER))
61584 seq_puts(seq, ",user_xattr");
61585- if (!test_opt(sb, XATTR_USER) &&
61586- (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
61587+ if (!test_opt(sb, XATTR_USER))
61588 seq_puts(seq, ",nouser_xattr");
61589- }
61590 #endif
61591 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61592 if (test_opt(sb, POSIX_ACL))
61593@@ -1758,8 +1756,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
61594 if (def_mount_opts & EXT3_DEFM_UID16)
61595 set_opt(sbi->s_mount_opt, NO_UID32);
61596 #ifdef CONFIG_EXT3_FS_XATTR
61597- if (def_mount_opts & EXT3_DEFM_XATTR_USER)
61598- set_opt(sbi->s_mount_opt, XATTR_USER);
61599+ /* always enable user xattrs */
61600+ set_opt(sbi->s_mount_opt, XATTR_USER);
61601 #endif
61602 #ifdef CONFIG_EXT3_FS_POSIX_ACL
61603 if (def_mount_opts & EXT3_DEFM_ACL)
61604diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61605index c6874be..f8a6ae8 100644
61606--- a/fs/ext3/xattr.c
61607+++ b/fs/ext3/xattr.c
61608@@ -330,7 +330,7 @@ static int
61609 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61610 char *buffer, size_t buffer_size)
61611 {
61612- size_t rest = buffer_size;
61613+ size_t rest = buffer_size, total_size = 0;
61614
61615 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61616 const struct xattr_handler *handler =
61617@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61618 buffer += size;
61619 }
61620 rest -= size;
61621+ total_size += size;
61622 }
61623 }
61624- return buffer_size - rest;
61625+ return total_size;
61626 }
61627
61628 static int
61629diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61630index 83a6f49..d4e4d03 100644
61631--- a/fs/ext4/balloc.c
61632+++ b/fs/ext4/balloc.c
61633@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61634 /* Hm, nope. Are (enough) root reserved clusters available? */
61635 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61636 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61637- capable(CAP_SYS_RESOURCE) ||
61638- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61639+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61640+ capable_nolog(CAP_SYS_RESOURCE)) {
61641
61642 if (free_clusters >= (nclusters + dirty_clusters +
61643 resv_clusters))
61644diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61645index a75fba6..8235fca 100644
61646--- a/fs/ext4/ext4.h
61647+++ b/fs/ext4/ext4.h
61648@@ -1274,19 +1274,19 @@ struct ext4_sb_info {
61649 unsigned long s_mb_last_start;
61650
61651 /* stats for buddy allocator */
61652- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61653- atomic_t s_bal_success; /* we found long enough chunks */
61654- atomic_t s_bal_allocated; /* in blocks */
61655- atomic_t s_bal_ex_scanned; /* total extents scanned */
61656- atomic_t s_bal_goals; /* goal hits */
61657- atomic_t s_bal_breaks; /* too long searches */
61658- atomic_t s_bal_2orders; /* 2^order hits */
61659+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61660+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61661+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61662+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61663+ atomic_unchecked_t s_bal_goals; /* goal hits */
61664+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61665+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61666 spinlock_t s_bal_lock;
61667 unsigned long s_mb_buddies_generated;
61668 unsigned long long s_mb_generation_time;
61669- atomic_t s_mb_lost_chunks;
61670- atomic_t s_mb_preallocated;
61671- atomic_t s_mb_discarded;
61672+ atomic_unchecked_t s_mb_lost_chunks;
61673+ atomic_unchecked_t s_mb_preallocated;
61674+ atomic_unchecked_t s_mb_discarded;
61675 atomic_t s_lock_busy;
61676
61677 /* locality groups */
61678diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61679index 8d1e602..abf497b 100644
61680--- a/fs/ext4/mballoc.c
61681+++ b/fs/ext4/mballoc.c
61682@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61683 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61684
61685 if (EXT4_SB(sb)->s_mb_stats)
61686- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61687+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61688
61689 break;
61690 }
61691@@ -2211,7 +2211,7 @@ repeat:
61692 ac->ac_status = AC_STATUS_CONTINUE;
61693 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61694 cr = 3;
61695- atomic_inc(&sbi->s_mb_lost_chunks);
61696+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61697 goto repeat;
61698 }
61699 }
61700@@ -2716,25 +2716,25 @@ int ext4_mb_release(struct super_block *sb)
61701 if (sbi->s_mb_stats) {
61702 ext4_msg(sb, KERN_INFO,
61703 "mballoc: %u blocks %u reqs (%u success)",
61704- atomic_read(&sbi->s_bal_allocated),
61705- atomic_read(&sbi->s_bal_reqs),
61706- atomic_read(&sbi->s_bal_success));
61707+ atomic_read_unchecked(&sbi->s_bal_allocated),
61708+ atomic_read_unchecked(&sbi->s_bal_reqs),
61709+ atomic_read_unchecked(&sbi->s_bal_success));
61710 ext4_msg(sb, KERN_INFO,
61711 "mballoc: %u extents scanned, %u goal hits, "
61712 "%u 2^N hits, %u breaks, %u lost",
61713- atomic_read(&sbi->s_bal_ex_scanned),
61714- atomic_read(&sbi->s_bal_goals),
61715- atomic_read(&sbi->s_bal_2orders),
61716- atomic_read(&sbi->s_bal_breaks),
61717- atomic_read(&sbi->s_mb_lost_chunks));
61718+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61719+ atomic_read_unchecked(&sbi->s_bal_goals),
61720+ atomic_read_unchecked(&sbi->s_bal_2orders),
61721+ atomic_read_unchecked(&sbi->s_bal_breaks),
61722+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61723 ext4_msg(sb, KERN_INFO,
61724 "mballoc: %lu generated and it took %Lu",
61725 sbi->s_mb_buddies_generated,
61726 sbi->s_mb_generation_time);
61727 ext4_msg(sb, KERN_INFO,
61728 "mballoc: %u preallocated, %u discarded",
61729- atomic_read(&sbi->s_mb_preallocated),
61730- atomic_read(&sbi->s_mb_discarded));
61731+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61732+ atomic_read_unchecked(&sbi->s_mb_discarded));
61733 }
61734
61735 free_percpu(sbi->s_locality_groups);
61736@@ -3190,16 +3190,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61737 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61738
61739 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61740- atomic_inc(&sbi->s_bal_reqs);
61741- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61742+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61743+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61744 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61745- atomic_inc(&sbi->s_bal_success);
61746- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61747+ atomic_inc_unchecked(&sbi->s_bal_success);
61748+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61749 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61750 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61751- atomic_inc(&sbi->s_bal_goals);
61752+ atomic_inc_unchecked(&sbi->s_bal_goals);
61753 if (ac->ac_found > sbi->s_mb_max_to_scan)
61754- atomic_inc(&sbi->s_bal_breaks);
61755+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61756 }
61757
61758 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61759@@ -3626,7 +3626,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
61760 trace_ext4_mb_new_inode_pa(ac, pa);
61761
61762 ext4_mb_use_inode_pa(ac, pa);
61763- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
61764+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
61765
61766 ei = EXT4_I(ac->ac_inode);
61767 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61768@@ -3686,7 +3686,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
61769 trace_ext4_mb_new_group_pa(ac, pa);
61770
61771 ext4_mb_use_group_pa(ac, pa);
61772- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61773+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
61774
61775 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
61776 lg = ac->ac_lg;
61777@@ -3775,7 +3775,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
61778 * from the bitmap and continue.
61779 */
61780 }
61781- atomic_add(free, &sbi->s_mb_discarded);
61782+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
61783
61784 return err;
61785 }
61786@@ -3793,7 +3793,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
61787 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
61788 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
61789 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
61790- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61791+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
61792 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
61793
61794 return 0;
61795diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
61796index 8313ca3..8a37d08 100644
61797--- a/fs/ext4/mmp.c
61798+++ b/fs/ext4/mmp.c
61799@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
61800 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
61801 const char *function, unsigned int line, const char *msg)
61802 {
61803- __ext4_warning(sb, function, line, msg);
61804+ __ext4_warning(sb, function, line, "%s", msg);
61805 __ext4_warning(sb, function, line,
61806 "MMP failure info: last update time: %llu, last update "
61807 "node: %s, last update device: %s\n",
61808diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
61809index 8a8ec62..1b02de5 100644
61810--- a/fs/ext4/resize.c
61811+++ b/fs/ext4/resize.c
61812@@ -413,7 +413,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61813
61814 ext4_debug("mark blocks [%llu/%u] used\n", block, count);
61815 for (count2 = count; count > 0; count -= count2, block += count2) {
61816- ext4_fsblk_t start;
61817+ ext4_fsblk_t start, diff;
61818 struct buffer_head *bh;
61819 ext4_group_t group;
61820 int err;
61821@@ -422,10 +422,6 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61822 start = ext4_group_first_block_no(sb, group);
61823 group -= flex_gd->groups[0].group;
61824
61825- count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
61826- if (count2 > count)
61827- count2 = count;
61828-
61829 if (flex_gd->bg_flags[group] & EXT4_BG_BLOCK_UNINIT) {
61830 BUG_ON(flex_gd->count > 1);
61831 continue;
61832@@ -443,9 +439,15 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
61833 err = ext4_journal_get_write_access(handle, bh);
61834 if (err)
61835 return err;
61836+
61837+ diff = block - start;
61838+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - diff;
61839+ if (count2 > count)
61840+ count2 = count;
61841+
61842 ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n", block,
61843- block - start, count2);
61844- ext4_set_bits(bh->b_data, block - start, count2);
61845+ diff, count2);
61846+ ext4_set_bits(bh->b_data, diff, count2);
61847
61848 err = ext4_handle_dirty_metadata(handle, NULL, bh);
61849 if (unlikely(err))
61850diff --git a/fs/ext4/super.c b/fs/ext4/super.c
61851index fc29b2c..6c8b255 100644
61852--- a/fs/ext4/super.c
61853+++ b/fs/ext4/super.c
61854@@ -1252,7 +1252,7 @@ static ext4_fsblk_t get_sb_block(void **data)
61855 }
61856
61857 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
61858-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61859+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
61860 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
61861
61862 #ifdef CONFIG_QUOTA
61863@@ -2440,7 +2440,7 @@ struct ext4_attr {
61864 int offset;
61865 int deprecated_val;
61866 } u;
61867-};
61868+} __do_const;
61869
61870 static int parse_strtoull(const char *buf,
61871 unsigned long long max, unsigned long long *value)
61872diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
61873index 1e09fc7..0400dd4 100644
61874--- a/fs/ext4/xattr.c
61875+++ b/fs/ext4/xattr.c
61876@@ -399,7 +399,7 @@ static int
61877 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61878 char *buffer, size_t buffer_size)
61879 {
61880- size_t rest = buffer_size;
61881+ size_t rest = buffer_size, total_size = 0;
61882
61883 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
61884 const struct xattr_handler *handler =
61885@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
61886 buffer += size;
61887 }
61888 rest -= size;
61889+ total_size += size;
61890 }
61891 }
61892- return buffer_size - rest;
61893+ return total_size;
61894 }
61895
61896 static int
61897diff --git a/fs/fcntl.c b/fs/fcntl.c
61898index ee85cd4..9dd0d20 100644
61899--- a/fs/fcntl.c
61900+++ b/fs/fcntl.c
61901@@ -102,6 +102,10 @@ void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
61902 int force)
61903 {
61904 security_file_set_fowner(filp);
61905+ if (gr_handle_chroot_fowner(pid, type))
61906+ return;
61907+ if (gr_check_protected_task_fowner(pid, type))
61908+ return;
61909 f_modown(filp, pid, type, force);
61910 }
61911 EXPORT_SYMBOL(__f_setown);
61912diff --git a/fs/fhandle.c b/fs/fhandle.c
61913index 999ff5c..2281df9 100644
61914--- a/fs/fhandle.c
61915+++ b/fs/fhandle.c
61916@@ -8,6 +8,7 @@
61917 #include <linux/fs_struct.h>
61918 #include <linux/fsnotify.h>
61919 #include <linux/personality.h>
61920+#include <linux/grsecurity.h>
61921 #include <asm/uaccess.h>
61922 #include "internal.h"
61923 #include "mount.h"
61924@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
61925 } else
61926 retval = 0;
61927 /* copy the mount id */
61928- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
61929- sizeof(*mnt_id)) ||
61930+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
61931 copy_to_user(ufh, handle,
61932 sizeof(struct file_handle) + handle_bytes))
61933 retval = -EFAULT;
61934@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61935 * the directory. Ideally we would like CAP_DAC_SEARCH.
61936 * But we don't have that
61937 */
61938- if (!capable(CAP_DAC_READ_SEARCH)) {
61939+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
61940 retval = -EPERM;
61941 goto out_err;
61942 }
61943@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
61944 goto out_err;
61945 }
61946 /* copy the full handle */
61947- if (copy_from_user(handle, ufh,
61948- sizeof(struct file_handle) +
61949+ *handle = f_handle;
61950+ if (copy_from_user(&handle->f_handle,
61951+ &ufh->f_handle,
61952 f_handle.handle_bytes)) {
61953 retval = -EFAULT;
61954 goto out_handle;
61955diff --git a/fs/file.c b/fs/file.c
61956index ee738ea..f6c15629 100644
61957--- a/fs/file.c
61958+++ b/fs/file.c
61959@@ -16,6 +16,7 @@
61960 #include <linux/slab.h>
61961 #include <linux/vmalloc.h>
61962 #include <linux/file.h>
61963+#include <linux/security.h>
61964 #include <linux/fdtable.h>
61965 #include <linux/bitops.h>
61966 #include <linux/interrupt.h>
61967@@ -139,7 +140,7 @@ out:
61968 * Return <0 error code on error; 1 on successful completion.
61969 * The files->file_lock should be held on entry, and will be held on exit.
61970 */
61971-static int expand_fdtable(struct files_struct *files, int nr)
61972+static int expand_fdtable(struct files_struct *files, unsigned int nr)
61973 __releases(files->file_lock)
61974 __acquires(files->file_lock)
61975 {
61976@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
61977 * expanded and execution may have blocked.
61978 * The files->file_lock should be held on entry, and will be held on exit.
61979 */
61980-static int expand_files(struct files_struct *files, int nr)
61981+static int expand_files(struct files_struct *files, unsigned int nr)
61982 {
61983 struct fdtable *fdt;
61984
61985@@ -800,6 +801,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
61986 if (!file)
61987 return __close_fd(files, fd);
61988
61989+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
61990 if (fd >= rlimit(RLIMIT_NOFILE))
61991 return -EBADF;
61992
61993@@ -826,6 +828,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
61994 if (unlikely(oldfd == newfd))
61995 return -EINVAL;
61996
61997+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
61998 if (newfd >= rlimit(RLIMIT_NOFILE))
61999 return -EBADF;
62000
62001@@ -881,6 +884,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62002 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62003 {
62004 int err;
62005+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62006 if (from >= rlimit(RLIMIT_NOFILE))
62007 return -EINVAL;
62008 err = alloc_fd(from, flags);
62009diff --git a/fs/filesystems.c b/fs/filesystems.c
62010index 5797d45..7d7d79a 100644
62011--- a/fs/filesystems.c
62012+++ b/fs/filesystems.c
62013@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62014 int len = dot ? dot - name : strlen(name);
62015
62016 fs = __get_fs_type(name, len);
62017+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62018+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62019+#else
62020 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62021+#endif
62022 fs = __get_fs_type(name, len);
62023
62024 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62025diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62026index 7dca743..2f2786d 100644
62027--- a/fs/fs_struct.c
62028+++ b/fs/fs_struct.c
62029@@ -4,6 +4,7 @@
62030 #include <linux/path.h>
62031 #include <linux/slab.h>
62032 #include <linux/fs_struct.h>
62033+#include <linux/grsecurity.h>
62034 #include "internal.h"
62035
62036 /*
62037@@ -15,14 +16,18 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62038 struct path old_root;
62039
62040 path_get(path);
62041+ gr_inc_chroot_refcnts(path->dentry, path->mnt);
62042 spin_lock(&fs->lock);
62043 write_seqcount_begin(&fs->seq);
62044 old_root = fs->root;
62045 fs->root = *path;
62046+ gr_set_chroot_entries(current, path);
62047 write_seqcount_end(&fs->seq);
62048 spin_unlock(&fs->lock);
62049- if (old_root.dentry)
62050+ if (old_root.dentry) {
62051+ gr_dec_chroot_refcnts(old_root.dentry, old_root.mnt);
62052 path_put(&old_root);
62053+ }
62054 }
62055
62056 /*
62057@@ -67,6 +72,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62058 int hits = 0;
62059 spin_lock(&fs->lock);
62060 write_seqcount_begin(&fs->seq);
62061+ /* this root replacement is only done by pivot_root,
62062+ leave grsec's chroot tagging alone for this task
62063+ so that a pivoted root isn't treated as a chroot
62064+ */
62065 hits += replace_path(&fs->root, old_root, new_root);
62066 hits += replace_path(&fs->pwd, old_root, new_root);
62067 write_seqcount_end(&fs->seq);
62068@@ -85,6 +94,7 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62069
62070 void free_fs_struct(struct fs_struct *fs)
62071 {
62072+ gr_dec_chroot_refcnts(fs->root.dentry, fs->root.mnt);
62073 path_put(&fs->root);
62074 path_put(&fs->pwd);
62075 kmem_cache_free(fs_cachep, fs);
62076@@ -99,7 +109,8 @@ void exit_fs(struct task_struct *tsk)
62077 task_lock(tsk);
62078 spin_lock(&fs->lock);
62079 tsk->fs = NULL;
62080- kill = !--fs->users;
62081+ gr_clear_chroot_entries(tsk);
62082+ kill = !atomic_dec_return(&fs->users);
62083 spin_unlock(&fs->lock);
62084 task_unlock(tsk);
62085 if (kill)
62086@@ -112,7 +123,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62087 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62088 /* We don't need to lock fs - think why ;-) */
62089 if (fs) {
62090- fs->users = 1;
62091+ atomic_set(&fs->users, 1);
62092 fs->in_exec = 0;
62093 spin_lock_init(&fs->lock);
62094 seqcount_init(&fs->seq);
62095@@ -121,6 +132,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62096 spin_lock(&old->lock);
62097 fs->root = old->root;
62098 path_get(&fs->root);
62099+ /* instead of calling gr_set_chroot_entries here,
62100+ we call it from every caller of this function
62101+ */
62102 fs->pwd = old->pwd;
62103 path_get(&fs->pwd);
62104 spin_unlock(&old->lock);
62105@@ -139,8 +153,9 @@ int unshare_fs_struct(void)
62106
62107 task_lock(current);
62108 spin_lock(&fs->lock);
62109- kill = !--fs->users;
62110+ kill = !atomic_dec_return(&fs->users);
62111 current->fs = new_fs;
62112+ gr_set_chroot_entries(current, &new_fs->root);
62113 spin_unlock(&fs->lock);
62114 task_unlock(current);
62115
62116@@ -153,13 +168,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62117
62118 int current_umask(void)
62119 {
62120- return current->fs->umask;
62121+ return current->fs->umask | gr_acl_umask();
62122 }
62123 EXPORT_SYMBOL(current_umask);
62124
62125 /* to be mentioned only in INIT_TASK */
62126 struct fs_struct init_fs = {
62127- .users = 1,
62128+ .users = ATOMIC_INIT(1),
62129 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62130 .seq = SEQCNT_ZERO(init_fs.seq),
62131 .umask = 0022,
62132diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62133index 89acec7..a575262 100644
62134--- a/fs/fscache/cookie.c
62135+++ b/fs/fscache/cookie.c
62136@@ -19,7 +19,7 @@
62137
62138 struct kmem_cache *fscache_cookie_jar;
62139
62140-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62141+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62142
62143 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62144 static int fscache_alloc_object(struct fscache_cache *cache,
62145@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62146 parent ? (char *) parent->def->name : "<no-parent>",
62147 def->name, netfs_data, enable);
62148
62149- fscache_stat(&fscache_n_acquires);
62150+ fscache_stat_unchecked(&fscache_n_acquires);
62151
62152 /* if there's no parent cookie, then we don't create one here either */
62153 if (!parent) {
62154- fscache_stat(&fscache_n_acquires_null);
62155+ fscache_stat_unchecked(&fscache_n_acquires_null);
62156 _leave(" [no parent]");
62157 return NULL;
62158 }
62159@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62160 /* allocate and initialise a cookie */
62161 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62162 if (!cookie) {
62163- fscache_stat(&fscache_n_acquires_oom);
62164+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62165 _leave(" [ENOMEM]");
62166 return NULL;
62167 }
62168@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62169
62170 switch (cookie->def->type) {
62171 case FSCACHE_COOKIE_TYPE_INDEX:
62172- fscache_stat(&fscache_n_cookie_index);
62173+ fscache_stat_unchecked(&fscache_n_cookie_index);
62174 break;
62175 case FSCACHE_COOKIE_TYPE_DATAFILE:
62176- fscache_stat(&fscache_n_cookie_data);
62177+ fscache_stat_unchecked(&fscache_n_cookie_data);
62178 break;
62179 default:
62180- fscache_stat(&fscache_n_cookie_special);
62181+ fscache_stat_unchecked(&fscache_n_cookie_special);
62182 break;
62183 }
62184
62185@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62186 } else {
62187 atomic_dec(&parent->n_children);
62188 __fscache_cookie_put(cookie);
62189- fscache_stat(&fscache_n_acquires_nobufs);
62190+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62191 _leave(" = NULL");
62192 return NULL;
62193 }
62194@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62195 }
62196 }
62197
62198- fscache_stat(&fscache_n_acquires_ok);
62199+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62200 _leave(" = %p", cookie);
62201 return cookie;
62202 }
62203@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62204 cache = fscache_select_cache_for_object(cookie->parent);
62205 if (!cache) {
62206 up_read(&fscache_addremove_sem);
62207- fscache_stat(&fscache_n_acquires_no_cache);
62208+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62209 _leave(" = -ENOMEDIUM [no cache]");
62210 return -ENOMEDIUM;
62211 }
62212@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62213 object = cache->ops->alloc_object(cache, cookie);
62214 fscache_stat_d(&fscache_n_cop_alloc_object);
62215 if (IS_ERR(object)) {
62216- fscache_stat(&fscache_n_object_no_alloc);
62217+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62218 ret = PTR_ERR(object);
62219 goto error;
62220 }
62221
62222- fscache_stat(&fscache_n_object_alloc);
62223+ fscache_stat_unchecked(&fscache_n_object_alloc);
62224
62225- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62226+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62227
62228 _debug("ALLOC OBJ%x: %s {%lx}",
62229 object->debug_id, cookie->def->name, object->events);
62230@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62231
62232 _enter("{%s}", cookie->def->name);
62233
62234- fscache_stat(&fscache_n_invalidates);
62235+ fscache_stat_unchecked(&fscache_n_invalidates);
62236
62237 /* Only permit invalidation of data files. Invalidating an index will
62238 * require the caller to release all its attachments to the tree rooted
62239@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62240 {
62241 struct fscache_object *object;
62242
62243- fscache_stat(&fscache_n_updates);
62244+ fscache_stat_unchecked(&fscache_n_updates);
62245
62246 if (!cookie) {
62247- fscache_stat(&fscache_n_updates_null);
62248+ fscache_stat_unchecked(&fscache_n_updates_null);
62249 _leave(" [no cookie]");
62250 return;
62251 }
62252@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62253 */
62254 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62255 {
62256- fscache_stat(&fscache_n_relinquishes);
62257+ fscache_stat_unchecked(&fscache_n_relinquishes);
62258 if (retire)
62259- fscache_stat(&fscache_n_relinquishes_retire);
62260+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62261
62262 if (!cookie) {
62263- fscache_stat(&fscache_n_relinquishes_null);
62264+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62265 _leave(" [no cookie]");
62266 return;
62267 }
62268@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62269 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62270 goto inconsistent;
62271
62272- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62273+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62274
62275 __fscache_use_cookie(cookie);
62276 if (fscache_submit_op(object, op) < 0)
62277diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62278index 7872a62..d91b19f 100644
62279--- a/fs/fscache/internal.h
62280+++ b/fs/fscache/internal.h
62281@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62282 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62283 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62284 struct fscache_operation *,
62285- atomic_t *,
62286- atomic_t *,
62287+ atomic_unchecked_t *,
62288+ atomic_unchecked_t *,
62289 void (*)(struct fscache_operation *));
62290 extern void fscache_invalidate_writes(struct fscache_cookie *);
62291
62292@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62293 * stats.c
62294 */
62295 #ifdef CONFIG_FSCACHE_STATS
62296-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62297-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62298+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62299+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62300
62301-extern atomic_t fscache_n_op_pend;
62302-extern atomic_t fscache_n_op_run;
62303-extern atomic_t fscache_n_op_enqueue;
62304-extern atomic_t fscache_n_op_deferred_release;
62305-extern atomic_t fscache_n_op_release;
62306-extern atomic_t fscache_n_op_gc;
62307-extern atomic_t fscache_n_op_cancelled;
62308-extern atomic_t fscache_n_op_rejected;
62309+extern atomic_unchecked_t fscache_n_op_pend;
62310+extern atomic_unchecked_t fscache_n_op_run;
62311+extern atomic_unchecked_t fscache_n_op_enqueue;
62312+extern atomic_unchecked_t fscache_n_op_deferred_release;
62313+extern atomic_unchecked_t fscache_n_op_release;
62314+extern atomic_unchecked_t fscache_n_op_gc;
62315+extern atomic_unchecked_t fscache_n_op_cancelled;
62316+extern atomic_unchecked_t fscache_n_op_rejected;
62317
62318-extern atomic_t fscache_n_attr_changed;
62319-extern atomic_t fscache_n_attr_changed_ok;
62320-extern atomic_t fscache_n_attr_changed_nobufs;
62321-extern atomic_t fscache_n_attr_changed_nomem;
62322-extern atomic_t fscache_n_attr_changed_calls;
62323+extern atomic_unchecked_t fscache_n_attr_changed;
62324+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62325+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62326+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62327+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62328
62329-extern atomic_t fscache_n_allocs;
62330-extern atomic_t fscache_n_allocs_ok;
62331-extern atomic_t fscache_n_allocs_wait;
62332-extern atomic_t fscache_n_allocs_nobufs;
62333-extern atomic_t fscache_n_allocs_intr;
62334-extern atomic_t fscache_n_allocs_object_dead;
62335-extern atomic_t fscache_n_alloc_ops;
62336-extern atomic_t fscache_n_alloc_op_waits;
62337+extern atomic_unchecked_t fscache_n_allocs;
62338+extern atomic_unchecked_t fscache_n_allocs_ok;
62339+extern atomic_unchecked_t fscache_n_allocs_wait;
62340+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62341+extern atomic_unchecked_t fscache_n_allocs_intr;
62342+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62343+extern atomic_unchecked_t fscache_n_alloc_ops;
62344+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62345
62346-extern atomic_t fscache_n_retrievals;
62347-extern atomic_t fscache_n_retrievals_ok;
62348-extern atomic_t fscache_n_retrievals_wait;
62349-extern atomic_t fscache_n_retrievals_nodata;
62350-extern atomic_t fscache_n_retrievals_nobufs;
62351-extern atomic_t fscache_n_retrievals_intr;
62352-extern atomic_t fscache_n_retrievals_nomem;
62353-extern atomic_t fscache_n_retrievals_object_dead;
62354-extern atomic_t fscache_n_retrieval_ops;
62355-extern atomic_t fscache_n_retrieval_op_waits;
62356+extern atomic_unchecked_t fscache_n_retrievals;
62357+extern atomic_unchecked_t fscache_n_retrievals_ok;
62358+extern atomic_unchecked_t fscache_n_retrievals_wait;
62359+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62360+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62361+extern atomic_unchecked_t fscache_n_retrievals_intr;
62362+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62363+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62364+extern atomic_unchecked_t fscache_n_retrieval_ops;
62365+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62366
62367-extern atomic_t fscache_n_stores;
62368-extern atomic_t fscache_n_stores_ok;
62369-extern atomic_t fscache_n_stores_again;
62370-extern atomic_t fscache_n_stores_nobufs;
62371-extern atomic_t fscache_n_stores_oom;
62372-extern atomic_t fscache_n_store_ops;
62373-extern atomic_t fscache_n_store_calls;
62374-extern atomic_t fscache_n_store_pages;
62375-extern atomic_t fscache_n_store_radix_deletes;
62376-extern atomic_t fscache_n_store_pages_over_limit;
62377+extern atomic_unchecked_t fscache_n_stores;
62378+extern atomic_unchecked_t fscache_n_stores_ok;
62379+extern atomic_unchecked_t fscache_n_stores_again;
62380+extern atomic_unchecked_t fscache_n_stores_nobufs;
62381+extern atomic_unchecked_t fscache_n_stores_oom;
62382+extern atomic_unchecked_t fscache_n_store_ops;
62383+extern atomic_unchecked_t fscache_n_store_calls;
62384+extern atomic_unchecked_t fscache_n_store_pages;
62385+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62386+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62387
62388-extern atomic_t fscache_n_store_vmscan_not_storing;
62389-extern atomic_t fscache_n_store_vmscan_gone;
62390-extern atomic_t fscache_n_store_vmscan_busy;
62391-extern atomic_t fscache_n_store_vmscan_cancelled;
62392-extern atomic_t fscache_n_store_vmscan_wait;
62393+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62394+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62395+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62396+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62397+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62398
62399-extern atomic_t fscache_n_marks;
62400-extern atomic_t fscache_n_uncaches;
62401+extern atomic_unchecked_t fscache_n_marks;
62402+extern atomic_unchecked_t fscache_n_uncaches;
62403
62404-extern atomic_t fscache_n_acquires;
62405-extern atomic_t fscache_n_acquires_null;
62406-extern atomic_t fscache_n_acquires_no_cache;
62407-extern atomic_t fscache_n_acquires_ok;
62408-extern atomic_t fscache_n_acquires_nobufs;
62409-extern atomic_t fscache_n_acquires_oom;
62410+extern atomic_unchecked_t fscache_n_acquires;
62411+extern atomic_unchecked_t fscache_n_acquires_null;
62412+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62413+extern atomic_unchecked_t fscache_n_acquires_ok;
62414+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62415+extern atomic_unchecked_t fscache_n_acquires_oom;
62416
62417-extern atomic_t fscache_n_invalidates;
62418-extern atomic_t fscache_n_invalidates_run;
62419+extern atomic_unchecked_t fscache_n_invalidates;
62420+extern atomic_unchecked_t fscache_n_invalidates_run;
62421
62422-extern atomic_t fscache_n_updates;
62423-extern atomic_t fscache_n_updates_null;
62424-extern atomic_t fscache_n_updates_run;
62425+extern atomic_unchecked_t fscache_n_updates;
62426+extern atomic_unchecked_t fscache_n_updates_null;
62427+extern atomic_unchecked_t fscache_n_updates_run;
62428
62429-extern atomic_t fscache_n_relinquishes;
62430-extern atomic_t fscache_n_relinquishes_null;
62431-extern atomic_t fscache_n_relinquishes_waitcrt;
62432-extern atomic_t fscache_n_relinquishes_retire;
62433+extern atomic_unchecked_t fscache_n_relinquishes;
62434+extern atomic_unchecked_t fscache_n_relinquishes_null;
62435+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62436+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62437
62438-extern atomic_t fscache_n_cookie_index;
62439-extern atomic_t fscache_n_cookie_data;
62440-extern atomic_t fscache_n_cookie_special;
62441+extern atomic_unchecked_t fscache_n_cookie_index;
62442+extern atomic_unchecked_t fscache_n_cookie_data;
62443+extern atomic_unchecked_t fscache_n_cookie_special;
62444
62445-extern atomic_t fscache_n_object_alloc;
62446-extern atomic_t fscache_n_object_no_alloc;
62447-extern atomic_t fscache_n_object_lookups;
62448-extern atomic_t fscache_n_object_lookups_negative;
62449-extern atomic_t fscache_n_object_lookups_positive;
62450-extern atomic_t fscache_n_object_lookups_timed_out;
62451-extern atomic_t fscache_n_object_created;
62452-extern atomic_t fscache_n_object_avail;
62453-extern atomic_t fscache_n_object_dead;
62454+extern atomic_unchecked_t fscache_n_object_alloc;
62455+extern atomic_unchecked_t fscache_n_object_no_alloc;
62456+extern atomic_unchecked_t fscache_n_object_lookups;
62457+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62458+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62459+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62460+extern atomic_unchecked_t fscache_n_object_created;
62461+extern atomic_unchecked_t fscache_n_object_avail;
62462+extern atomic_unchecked_t fscache_n_object_dead;
62463
62464-extern atomic_t fscache_n_checkaux_none;
62465-extern atomic_t fscache_n_checkaux_okay;
62466-extern atomic_t fscache_n_checkaux_update;
62467-extern atomic_t fscache_n_checkaux_obsolete;
62468+extern atomic_unchecked_t fscache_n_checkaux_none;
62469+extern atomic_unchecked_t fscache_n_checkaux_okay;
62470+extern atomic_unchecked_t fscache_n_checkaux_update;
62471+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62472
62473 extern atomic_t fscache_n_cop_alloc_object;
62474 extern atomic_t fscache_n_cop_lookup_object;
62475@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62476 atomic_inc(stat);
62477 }
62478
62479+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62480+{
62481+ atomic_inc_unchecked(stat);
62482+}
62483+
62484 static inline void fscache_stat_d(atomic_t *stat)
62485 {
62486 atomic_dec(stat);
62487@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62488
62489 #define __fscache_stat(stat) (NULL)
62490 #define fscache_stat(stat) do {} while (0)
62491+#define fscache_stat_unchecked(stat) do {} while (0)
62492 #define fscache_stat_d(stat) do {} while (0)
62493 #endif
62494
62495diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62496index da032da..0076ce7 100644
62497--- a/fs/fscache/object.c
62498+++ b/fs/fscache/object.c
62499@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62500 _debug("LOOKUP \"%s\" in \"%s\"",
62501 cookie->def->name, object->cache->tag->name);
62502
62503- fscache_stat(&fscache_n_object_lookups);
62504+ fscache_stat_unchecked(&fscache_n_object_lookups);
62505 fscache_stat(&fscache_n_cop_lookup_object);
62506 ret = object->cache->ops->lookup_object(object);
62507 fscache_stat_d(&fscache_n_cop_lookup_object);
62508@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62509 if (ret == -ETIMEDOUT) {
62510 /* probably stuck behind another object, so move this one to
62511 * the back of the queue */
62512- fscache_stat(&fscache_n_object_lookups_timed_out);
62513+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62514 _leave(" [timeout]");
62515 return NO_TRANSIT;
62516 }
62517@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62518 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62519
62520 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62521- fscache_stat(&fscache_n_object_lookups_negative);
62522+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62523
62524 /* Allow write requests to begin stacking up and read requests to begin
62525 * returning ENODATA.
62526@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62527 /* if we were still looking up, then we must have a positive lookup
62528 * result, in which case there may be data available */
62529 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62530- fscache_stat(&fscache_n_object_lookups_positive);
62531+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62532
62533 /* We do (presumably) have data */
62534 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62535@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62536 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62537 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62538 } else {
62539- fscache_stat(&fscache_n_object_created);
62540+ fscache_stat_unchecked(&fscache_n_object_created);
62541 }
62542
62543 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62544@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62545 fscache_stat_d(&fscache_n_cop_lookup_complete);
62546
62547 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62548- fscache_stat(&fscache_n_object_avail);
62549+ fscache_stat_unchecked(&fscache_n_object_avail);
62550
62551 _leave("");
62552 return transit_to(JUMPSTART_DEPS);
62553@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62554
62555 /* this just shifts the object release to the work processor */
62556 fscache_put_object(object);
62557- fscache_stat(&fscache_n_object_dead);
62558+ fscache_stat_unchecked(&fscache_n_object_dead);
62559
62560 _leave("");
62561 return transit_to(OBJECT_DEAD);
62562@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62563 enum fscache_checkaux result;
62564
62565 if (!object->cookie->def->check_aux) {
62566- fscache_stat(&fscache_n_checkaux_none);
62567+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62568 return FSCACHE_CHECKAUX_OKAY;
62569 }
62570
62571@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62572 switch (result) {
62573 /* entry okay as is */
62574 case FSCACHE_CHECKAUX_OKAY:
62575- fscache_stat(&fscache_n_checkaux_okay);
62576+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62577 break;
62578
62579 /* entry requires update */
62580 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62581- fscache_stat(&fscache_n_checkaux_update);
62582+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62583 break;
62584
62585 /* entry requires deletion */
62586 case FSCACHE_CHECKAUX_OBSOLETE:
62587- fscache_stat(&fscache_n_checkaux_obsolete);
62588+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62589 break;
62590
62591 default:
62592@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62593 {
62594 const struct fscache_state *s;
62595
62596- fscache_stat(&fscache_n_invalidates_run);
62597+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62598 fscache_stat(&fscache_n_cop_invalidate_object);
62599 s = _fscache_invalidate_object(object, event);
62600 fscache_stat_d(&fscache_n_cop_invalidate_object);
62601@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62602 {
62603 _enter("{OBJ%x},%d", object->debug_id, event);
62604
62605- fscache_stat(&fscache_n_updates_run);
62606+ fscache_stat_unchecked(&fscache_n_updates_run);
62607 fscache_stat(&fscache_n_cop_update_object);
62608 object->cache->ops->update_object(object);
62609 fscache_stat_d(&fscache_n_cop_update_object);
62610diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62611index e7b87a0..a85d47a 100644
62612--- a/fs/fscache/operation.c
62613+++ b/fs/fscache/operation.c
62614@@ -17,7 +17,7 @@
62615 #include <linux/slab.h>
62616 #include "internal.h"
62617
62618-atomic_t fscache_op_debug_id;
62619+atomic_unchecked_t fscache_op_debug_id;
62620 EXPORT_SYMBOL(fscache_op_debug_id);
62621
62622 /**
62623@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62624 ASSERTCMP(atomic_read(&op->usage), >, 0);
62625 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62626
62627- fscache_stat(&fscache_n_op_enqueue);
62628+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62629 switch (op->flags & FSCACHE_OP_TYPE) {
62630 case FSCACHE_OP_ASYNC:
62631 _debug("queue async");
62632@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62633 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62634 if (op->processor)
62635 fscache_enqueue_operation(op);
62636- fscache_stat(&fscache_n_op_run);
62637+ fscache_stat_unchecked(&fscache_n_op_run);
62638 }
62639
62640 /*
62641@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62642 if (object->n_in_progress > 0) {
62643 atomic_inc(&op->usage);
62644 list_add_tail(&op->pend_link, &object->pending_ops);
62645- fscache_stat(&fscache_n_op_pend);
62646+ fscache_stat_unchecked(&fscache_n_op_pend);
62647 } else if (!list_empty(&object->pending_ops)) {
62648 atomic_inc(&op->usage);
62649 list_add_tail(&op->pend_link, &object->pending_ops);
62650- fscache_stat(&fscache_n_op_pend);
62651+ fscache_stat_unchecked(&fscache_n_op_pend);
62652 fscache_start_operations(object);
62653 } else {
62654 ASSERTCMP(object->n_in_progress, ==, 0);
62655@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62656 object->n_exclusive++; /* reads and writes must wait */
62657 atomic_inc(&op->usage);
62658 list_add_tail(&op->pend_link, &object->pending_ops);
62659- fscache_stat(&fscache_n_op_pend);
62660+ fscache_stat_unchecked(&fscache_n_op_pend);
62661 ret = 0;
62662 } else {
62663 /* If we're in any other state, there must have been an I/O
62664@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62665 if (object->n_exclusive > 0) {
62666 atomic_inc(&op->usage);
62667 list_add_tail(&op->pend_link, &object->pending_ops);
62668- fscache_stat(&fscache_n_op_pend);
62669+ fscache_stat_unchecked(&fscache_n_op_pend);
62670 } else if (!list_empty(&object->pending_ops)) {
62671 atomic_inc(&op->usage);
62672 list_add_tail(&op->pend_link, &object->pending_ops);
62673- fscache_stat(&fscache_n_op_pend);
62674+ fscache_stat_unchecked(&fscache_n_op_pend);
62675 fscache_start_operations(object);
62676 } else {
62677 ASSERTCMP(object->n_exclusive, ==, 0);
62678@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62679 object->n_ops++;
62680 atomic_inc(&op->usage);
62681 list_add_tail(&op->pend_link, &object->pending_ops);
62682- fscache_stat(&fscache_n_op_pend);
62683+ fscache_stat_unchecked(&fscache_n_op_pend);
62684 ret = 0;
62685 } else if (fscache_object_is_dying(object)) {
62686- fscache_stat(&fscache_n_op_rejected);
62687+ fscache_stat_unchecked(&fscache_n_op_rejected);
62688 op->state = FSCACHE_OP_ST_CANCELLED;
62689 ret = -ENOBUFS;
62690 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62691@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62692 ret = -EBUSY;
62693 if (op->state == FSCACHE_OP_ST_PENDING) {
62694 ASSERT(!list_empty(&op->pend_link));
62695- fscache_stat(&fscache_n_op_cancelled);
62696+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62697 list_del_init(&op->pend_link);
62698 if (do_cancel)
62699 do_cancel(op);
62700@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62701 while (!list_empty(&object->pending_ops)) {
62702 op = list_entry(object->pending_ops.next,
62703 struct fscache_operation, pend_link);
62704- fscache_stat(&fscache_n_op_cancelled);
62705+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62706 list_del_init(&op->pend_link);
62707
62708 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62709@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62710 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62711 op->state = FSCACHE_OP_ST_DEAD;
62712
62713- fscache_stat(&fscache_n_op_release);
62714+ fscache_stat_unchecked(&fscache_n_op_release);
62715
62716 if (op->release) {
62717 op->release(op);
62718@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62719 * lock, and defer it otherwise */
62720 if (!spin_trylock(&object->lock)) {
62721 _debug("defer put");
62722- fscache_stat(&fscache_n_op_deferred_release);
62723+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62724
62725 cache = object->cache;
62726 spin_lock(&cache->op_gc_list_lock);
62727@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62728
62729 _debug("GC DEFERRED REL OBJ%x OP%x",
62730 object->debug_id, op->debug_id);
62731- fscache_stat(&fscache_n_op_gc);
62732+ fscache_stat_unchecked(&fscache_n_op_gc);
62733
62734 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62735 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62736diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62737index de33b3f..8be4d29 100644
62738--- a/fs/fscache/page.c
62739+++ b/fs/fscache/page.c
62740@@ -74,7 +74,7 @@ try_again:
62741 val = radix_tree_lookup(&cookie->stores, page->index);
62742 if (!val) {
62743 rcu_read_unlock();
62744- fscache_stat(&fscache_n_store_vmscan_not_storing);
62745+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62746 __fscache_uncache_page(cookie, page);
62747 return true;
62748 }
62749@@ -104,11 +104,11 @@ try_again:
62750 spin_unlock(&cookie->stores_lock);
62751
62752 if (xpage) {
62753- fscache_stat(&fscache_n_store_vmscan_cancelled);
62754- fscache_stat(&fscache_n_store_radix_deletes);
62755+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62756+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62757 ASSERTCMP(xpage, ==, page);
62758 } else {
62759- fscache_stat(&fscache_n_store_vmscan_gone);
62760+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62761 }
62762
62763 wake_up_bit(&cookie->flags, 0);
62764@@ -123,11 +123,11 @@ page_busy:
62765 * sleeping on memory allocation, so we may need to impose a timeout
62766 * too. */
62767 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
62768- fscache_stat(&fscache_n_store_vmscan_busy);
62769+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
62770 return false;
62771 }
62772
62773- fscache_stat(&fscache_n_store_vmscan_wait);
62774+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
62775 if (!release_page_wait_timeout(cookie, page))
62776 _debug("fscache writeout timeout page: %p{%lx}",
62777 page, page->index);
62778@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
62779 FSCACHE_COOKIE_STORING_TAG);
62780 if (!radix_tree_tag_get(&cookie->stores, page->index,
62781 FSCACHE_COOKIE_PENDING_TAG)) {
62782- fscache_stat(&fscache_n_store_radix_deletes);
62783+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62784 xpage = radix_tree_delete(&cookie->stores, page->index);
62785 }
62786 spin_unlock(&cookie->stores_lock);
62787@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
62788
62789 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
62790
62791- fscache_stat(&fscache_n_attr_changed_calls);
62792+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
62793
62794 if (fscache_object_is_active(object)) {
62795 fscache_stat(&fscache_n_cop_attr_changed);
62796@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62797
62798 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
62799
62800- fscache_stat(&fscache_n_attr_changed);
62801+ fscache_stat_unchecked(&fscache_n_attr_changed);
62802
62803 op = kzalloc(sizeof(*op), GFP_KERNEL);
62804 if (!op) {
62805- fscache_stat(&fscache_n_attr_changed_nomem);
62806+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
62807 _leave(" = -ENOMEM");
62808 return -ENOMEM;
62809 }
62810@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
62811 if (fscache_submit_exclusive_op(object, op) < 0)
62812 goto nobufs_dec;
62813 spin_unlock(&cookie->lock);
62814- fscache_stat(&fscache_n_attr_changed_ok);
62815+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
62816 fscache_put_operation(op);
62817 _leave(" = 0");
62818 return 0;
62819@@ -242,7 +242,7 @@ nobufs:
62820 kfree(op);
62821 if (wake_cookie)
62822 __fscache_wake_unused_cookie(cookie);
62823- fscache_stat(&fscache_n_attr_changed_nobufs);
62824+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
62825 _leave(" = %d", -ENOBUFS);
62826 return -ENOBUFS;
62827 }
62828@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
62829 /* allocate a retrieval operation and attempt to submit it */
62830 op = kzalloc(sizeof(*op), GFP_NOIO);
62831 if (!op) {
62832- fscache_stat(&fscache_n_retrievals_nomem);
62833+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62834 return NULL;
62835 }
62836
62837@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
62838 return 0;
62839 }
62840
62841- fscache_stat(&fscache_n_retrievals_wait);
62842+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
62843
62844 jif = jiffies;
62845 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
62846 TASK_INTERRUPTIBLE) != 0) {
62847- fscache_stat(&fscache_n_retrievals_intr);
62848+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62849 _leave(" = -ERESTARTSYS");
62850 return -ERESTARTSYS;
62851 }
62852@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
62853 */
62854 int fscache_wait_for_operation_activation(struct fscache_object *object,
62855 struct fscache_operation *op,
62856- atomic_t *stat_op_waits,
62857- atomic_t *stat_object_dead,
62858+ atomic_unchecked_t *stat_op_waits,
62859+ atomic_unchecked_t *stat_object_dead,
62860 void (*do_cancel)(struct fscache_operation *))
62861 {
62862 int ret;
62863@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62864
62865 _debug(">>> WT");
62866 if (stat_op_waits)
62867- fscache_stat(stat_op_waits);
62868+ fscache_stat_unchecked(stat_op_waits);
62869 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
62870 TASK_INTERRUPTIBLE) != 0) {
62871 ret = fscache_cancel_op(op, do_cancel);
62872@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
62873 check_if_dead:
62874 if (op->state == FSCACHE_OP_ST_CANCELLED) {
62875 if (stat_object_dead)
62876- fscache_stat(stat_object_dead);
62877+ fscache_stat_unchecked(stat_object_dead);
62878 _leave(" = -ENOBUFS [cancelled]");
62879 return -ENOBUFS;
62880 }
62881@@ -381,7 +381,7 @@ check_if_dead:
62882 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
62883 fscache_cancel_op(op, do_cancel);
62884 if (stat_object_dead)
62885- fscache_stat(stat_object_dead);
62886+ fscache_stat_unchecked(stat_object_dead);
62887 return -ENOBUFS;
62888 }
62889 return 0;
62890@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62891
62892 _enter("%p,%p,,,", cookie, page);
62893
62894- fscache_stat(&fscache_n_retrievals);
62895+ fscache_stat_unchecked(&fscache_n_retrievals);
62896
62897 if (hlist_empty(&cookie->backing_objects))
62898 goto nobufs;
62899@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62900 goto nobufs_unlock_dec;
62901 spin_unlock(&cookie->lock);
62902
62903- fscache_stat(&fscache_n_retrieval_ops);
62904+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62905
62906 /* pin the netfs read context in case we need to do the actual netfs
62907 * read because we've encountered a cache read failure */
62908@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
62909
62910 error:
62911 if (ret == -ENOMEM)
62912- fscache_stat(&fscache_n_retrievals_nomem);
62913+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62914 else if (ret == -ERESTARTSYS)
62915- fscache_stat(&fscache_n_retrievals_intr);
62916+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62917 else if (ret == -ENODATA)
62918- fscache_stat(&fscache_n_retrievals_nodata);
62919+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62920 else if (ret < 0)
62921- fscache_stat(&fscache_n_retrievals_nobufs);
62922+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62923 else
62924- fscache_stat(&fscache_n_retrievals_ok);
62925+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62926
62927 fscache_put_retrieval(op);
62928 _leave(" = %d", ret);
62929@@ -505,7 +505,7 @@ nobufs_unlock:
62930 __fscache_wake_unused_cookie(cookie);
62931 kfree(op);
62932 nobufs:
62933- fscache_stat(&fscache_n_retrievals_nobufs);
62934+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62935 _leave(" = -ENOBUFS");
62936 return -ENOBUFS;
62937 }
62938@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62939
62940 _enter("%p,,%d,,,", cookie, *nr_pages);
62941
62942- fscache_stat(&fscache_n_retrievals);
62943+ fscache_stat_unchecked(&fscache_n_retrievals);
62944
62945 if (hlist_empty(&cookie->backing_objects))
62946 goto nobufs;
62947@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62948 goto nobufs_unlock_dec;
62949 spin_unlock(&cookie->lock);
62950
62951- fscache_stat(&fscache_n_retrieval_ops);
62952+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
62953
62954 /* pin the netfs read context in case we need to do the actual netfs
62955 * read because we've encountered a cache read failure */
62956@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
62957
62958 error:
62959 if (ret == -ENOMEM)
62960- fscache_stat(&fscache_n_retrievals_nomem);
62961+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
62962 else if (ret == -ERESTARTSYS)
62963- fscache_stat(&fscache_n_retrievals_intr);
62964+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
62965 else if (ret == -ENODATA)
62966- fscache_stat(&fscache_n_retrievals_nodata);
62967+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
62968 else if (ret < 0)
62969- fscache_stat(&fscache_n_retrievals_nobufs);
62970+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62971 else
62972- fscache_stat(&fscache_n_retrievals_ok);
62973+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
62974
62975 fscache_put_retrieval(op);
62976 _leave(" = %d", ret);
62977@@ -636,7 +636,7 @@ nobufs_unlock:
62978 if (wake_cookie)
62979 __fscache_wake_unused_cookie(cookie);
62980 nobufs:
62981- fscache_stat(&fscache_n_retrievals_nobufs);
62982+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
62983 _leave(" = -ENOBUFS");
62984 return -ENOBUFS;
62985 }
62986@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62987
62988 _enter("%p,%p,,,", cookie, page);
62989
62990- fscache_stat(&fscache_n_allocs);
62991+ fscache_stat_unchecked(&fscache_n_allocs);
62992
62993 if (hlist_empty(&cookie->backing_objects))
62994 goto nobufs;
62995@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
62996 goto nobufs_unlock_dec;
62997 spin_unlock(&cookie->lock);
62998
62999- fscache_stat(&fscache_n_alloc_ops);
63000+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63001
63002 ret = fscache_wait_for_operation_activation(
63003 object, &op->op,
63004@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63005
63006 error:
63007 if (ret == -ERESTARTSYS)
63008- fscache_stat(&fscache_n_allocs_intr);
63009+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63010 else if (ret < 0)
63011- fscache_stat(&fscache_n_allocs_nobufs);
63012+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63013 else
63014- fscache_stat(&fscache_n_allocs_ok);
63015+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63016
63017 fscache_put_retrieval(op);
63018 _leave(" = %d", ret);
63019@@ -730,7 +730,7 @@ nobufs_unlock:
63020 if (wake_cookie)
63021 __fscache_wake_unused_cookie(cookie);
63022 nobufs:
63023- fscache_stat(&fscache_n_allocs_nobufs);
63024+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63025 _leave(" = -ENOBUFS");
63026 return -ENOBUFS;
63027 }
63028@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63029
63030 spin_lock(&cookie->stores_lock);
63031
63032- fscache_stat(&fscache_n_store_calls);
63033+ fscache_stat_unchecked(&fscache_n_store_calls);
63034
63035 /* find a page to store */
63036 page = NULL;
63037@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63038 page = results[0];
63039 _debug("gang %d [%lx]", n, page->index);
63040 if (page->index > op->store_limit) {
63041- fscache_stat(&fscache_n_store_pages_over_limit);
63042+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63043 goto superseded;
63044 }
63045
63046@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63047 spin_unlock(&cookie->stores_lock);
63048 spin_unlock(&object->lock);
63049
63050- fscache_stat(&fscache_n_store_pages);
63051+ fscache_stat_unchecked(&fscache_n_store_pages);
63052 fscache_stat(&fscache_n_cop_write_page);
63053 ret = object->cache->ops->write_page(op, page);
63054 fscache_stat_d(&fscache_n_cop_write_page);
63055@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63056 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63057 ASSERT(PageFsCache(page));
63058
63059- fscache_stat(&fscache_n_stores);
63060+ fscache_stat_unchecked(&fscache_n_stores);
63061
63062 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63063 _leave(" = -ENOBUFS [invalidating]");
63064@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63065 spin_unlock(&cookie->stores_lock);
63066 spin_unlock(&object->lock);
63067
63068- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63069+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63070 op->store_limit = object->store_limit;
63071
63072 __fscache_use_cookie(cookie);
63073@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63074
63075 spin_unlock(&cookie->lock);
63076 radix_tree_preload_end();
63077- fscache_stat(&fscache_n_store_ops);
63078- fscache_stat(&fscache_n_stores_ok);
63079+ fscache_stat_unchecked(&fscache_n_store_ops);
63080+ fscache_stat_unchecked(&fscache_n_stores_ok);
63081
63082 /* the work queue now carries its own ref on the object */
63083 fscache_put_operation(&op->op);
63084@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63085 return 0;
63086
63087 already_queued:
63088- fscache_stat(&fscache_n_stores_again);
63089+ fscache_stat_unchecked(&fscache_n_stores_again);
63090 already_pending:
63091 spin_unlock(&cookie->stores_lock);
63092 spin_unlock(&object->lock);
63093 spin_unlock(&cookie->lock);
63094 radix_tree_preload_end();
63095 kfree(op);
63096- fscache_stat(&fscache_n_stores_ok);
63097+ fscache_stat_unchecked(&fscache_n_stores_ok);
63098 _leave(" = 0");
63099 return 0;
63100
63101@@ -1039,14 +1039,14 @@ nobufs:
63102 kfree(op);
63103 if (wake_cookie)
63104 __fscache_wake_unused_cookie(cookie);
63105- fscache_stat(&fscache_n_stores_nobufs);
63106+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63107 _leave(" = -ENOBUFS");
63108 return -ENOBUFS;
63109
63110 nomem_free:
63111 kfree(op);
63112 nomem:
63113- fscache_stat(&fscache_n_stores_oom);
63114+ fscache_stat_unchecked(&fscache_n_stores_oom);
63115 _leave(" = -ENOMEM");
63116 return -ENOMEM;
63117 }
63118@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63119 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63120 ASSERTCMP(page, !=, NULL);
63121
63122- fscache_stat(&fscache_n_uncaches);
63123+ fscache_stat_unchecked(&fscache_n_uncaches);
63124
63125 /* cache withdrawal may beat us to it */
63126 if (!PageFsCache(page))
63127@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63128 struct fscache_cookie *cookie = op->op.object->cookie;
63129
63130 #ifdef CONFIG_FSCACHE_STATS
63131- atomic_inc(&fscache_n_marks);
63132+ atomic_inc_unchecked(&fscache_n_marks);
63133 #endif
63134
63135 _debug("- mark %p{%lx}", page, page->index);
63136diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63137index 40d13c7..ddf52b9 100644
63138--- a/fs/fscache/stats.c
63139+++ b/fs/fscache/stats.c
63140@@ -18,99 +18,99 @@
63141 /*
63142 * operation counters
63143 */
63144-atomic_t fscache_n_op_pend;
63145-atomic_t fscache_n_op_run;
63146-atomic_t fscache_n_op_enqueue;
63147-atomic_t fscache_n_op_requeue;
63148-atomic_t fscache_n_op_deferred_release;
63149-atomic_t fscache_n_op_release;
63150-atomic_t fscache_n_op_gc;
63151-atomic_t fscache_n_op_cancelled;
63152-atomic_t fscache_n_op_rejected;
63153+atomic_unchecked_t fscache_n_op_pend;
63154+atomic_unchecked_t fscache_n_op_run;
63155+atomic_unchecked_t fscache_n_op_enqueue;
63156+atomic_unchecked_t fscache_n_op_requeue;
63157+atomic_unchecked_t fscache_n_op_deferred_release;
63158+atomic_unchecked_t fscache_n_op_release;
63159+atomic_unchecked_t fscache_n_op_gc;
63160+atomic_unchecked_t fscache_n_op_cancelled;
63161+atomic_unchecked_t fscache_n_op_rejected;
63162
63163-atomic_t fscache_n_attr_changed;
63164-atomic_t fscache_n_attr_changed_ok;
63165-atomic_t fscache_n_attr_changed_nobufs;
63166-atomic_t fscache_n_attr_changed_nomem;
63167-atomic_t fscache_n_attr_changed_calls;
63168+atomic_unchecked_t fscache_n_attr_changed;
63169+atomic_unchecked_t fscache_n_attr_changed_ok;
63170+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63171+atomic_unchecked_t fscache_n_attr_changed_nomem;
63172+atomic_unchecked_t fscache_n_attr_changed_calls;
63173
63174-atomic_t fscache_n_allocs;
63175-atomic_t fscache_n_allocs_ok;
63176-atomic_t fscache_n_allocs_wait;
63177-atomic_t fscache_n_allocs_nobufs;
63178-atomic_t fscache_n_allocs_intr;
63179-atomic_t fscache_n_allocs_object_dead;
63180-atomic_t fscache_n_alloc_ops;
63181-atomic_t fscache_n_alloc_op_waits;
63182+atomic_unchecked_t fscache_n_allocs;
63183+atomic_unchecked_t fscache_n_allocs_ok;
63184+atomic_unchecked_t fscache_n_allocs_wait;
63185+atomic_unchecked_t fscache_n_allocs_nobufs;
63186+atomic_unchecked_t fscache_n_allocs_intr;
63187+atomic_unchecked_t fscache_n_allocs_object_dead;
63188+atomic_unchecked_t fscache_n_alloc_ops;
63189+atomic_unchecked_t fscache_n_alloc_op_waits;
63190
63191-atomic_t fscache_n_retrievals;
63192-atomic_t fscache_n_retrievals_ok;
63193-atomic_t fscache_n_retrievals_wait;
63194-atomic_t fscache_n_retrievals_nodata;
63195-atomic_t fscache_n_retrievals_nobufs;
63196-atomic_t fscache_n_retrievals_intr;
63197-atomic_t fscache_n_retrievals_nomem;
63198-atomic_t fscache_n_retrievals_object_dead;
63199-atomic_t fscache_n_retrieval_ops;
63200-atomic_t fscache_n_retrieval_op_waits;
63201+atomic_unchecked_t fscache_n_retrievals;
63202+atomic_unchecked_t fscache_n_retrievals_ok;
63203+atomic_unchecked_t fscache_n_retrievals_wait;
63204+atomic_unchecked_t fscache_n_retrievals_nodata;
63205+atomic_unchecked_t fscache_n_retrievals_nobufs;
63206+atomic_unchecked_t fscache_n_retrievals_intr;
63207+atomic_unchecked_t fscache_n_retrievals_nomem;
63208+atomic_unchecked_t fscache_n_retrievals_object_dead;
63209+atomic_unchecked_t fscache_n_retrieval_ops;
63210+atomic_unchecked_t fscache_n_retrieval_op_waits;
63211
63212-atomic_t fscache_n_stores;
63213-atomic_t fscache_n_stores_ok;
63214-atomic_t fscache_n_stores_again;
63215-atomic_t fscache_n_stores_nobufs;
63216-atomic_t fscache_n_stores_oom;
63217-atomic_t fscache_n_store_ops;
63218-atomic_t fscache_n_store_calls;
63219-atomic_t fscache_n_store_pages;
63220-atomic_t fscache_n_store_radix_deletes;
63221-atomic_t fscache_n_store_pages_over_limit;
63222+atomic_unchecked_t fscache_n_stores;
63223+atomic_unchecked_t fscache_n_stores_ok;
63224+atomic_unchecked_t fscache_n_stores_again;
63225+atomic_unchecked_t fscache_n_stores_nobufs;
63226+atomic_unchecked_t fscache_n_stores_oom;
63227+atomic_unchecked_t fscache_n_store_ops;
63228+atomic_unchecked_t fscache_n_store_calls;
63229+atomic_unchecked_t fscache_n_store_pages;
63230+atomic_unchecked_t fscache_n_store_radix_deletes;
63231+atomic_unchecked_t fscache_n_store_pages_over_limit;
63232
63233-atomic_t fscache_n_store_vmscan_not_storing;
63234-atomic_t fscache_n_store_vmscan_gone;
63235-atomic_t fscache_n_store_vmscan_busy;
63236-atomic_t fscache_n_store_vmscan_cancelled;
63237-atomic_t fscache_n_store_vmscan_wait;
63238+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63239+atomic_unchecked_t fscache_n_store_vmscan_gone;
63240+atomic_unchecked_t fscache_n_store_vmscan_busy;
63241+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63242+atomic_unchecked_t fscache_n_store_vmscan_wait;
63243
63244-atomic_t fscache_n_marks;
63245-atomic_t fscache_n_uncaches;
63246+atomic_unchecked_t fscache_n_marks;
63247+atomic_unchecked_t fscache_n_uncaches;
63248
63249-atomic_t fscache_n_acquires;
63250-atomic_t fscache_n_acquires_null;
63251-atomic_t fscache_n_acquires_no_cache;
63252-atomic_t fscache_n_acquires_ok;
63253-atomic_t fscache_n_acquires_nobufs;
63254-atomic_t fscache_n_acquires_oom;
63255+atomic_unchecked_t fscache_n_acquires;
63256+atomic_unchecked_t fscache_n_acquires_null;
63257+atomic_unchecked_t fscache_n_acquires_no_cache;
63258+atomic_unchecked_t fscache_n_acquires_ok;
63259+atomic_unchecked_t fscache_n_acquires_nobufs;
63260+atomic_unchecked_t fscache_n_acquires_oom;
63261
63262-atomic_t fscache_n_invalidates;
63263-atomic_t fscache_n_invalidates_run;
63264+atomic_unchecked_t fscache_n_invalidates;
63265+atomic_unchecked_t fscache_n_invalidates_run;
63266
63267-atomic_t fscache_n_updates;
63268-atomic_t fscache_n_updates_null;
63269-atomic_t fscache_n_updates_run;
63270+atomic_unchecked_t fscache_n_updates;
63271+atomic_unchecked_t fscache_n_updates_null;
63272+atomic_unchecked_t fscache_n_updates_run;
63273
63274-atomic_t fscache_n_relinquishes;
63275-atomic_t fscache_n_relinquishes_null;
63276-atomic_t fscache_n_relinquishes_waitcrt;
63277-atomic_t fscache_n_relinquishes_retire;
63278+atomic_unchecked_t fscache_n_relinquishes;
63279+atomic_unchecked_t fscache_n_relinquishes_null;
63280+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63281+atomic_unchecked_t fscache_n_relinquishes_retire;
63282
63283-atomic_t fscache_n_cookie_index;
63284-atomic_t fscache_n_cookie_data;
63285-atomic_t fscache_n_cookie_special;
63286+atomic_unchecked_t fscache_n_cookie_index;
63287+atomic_unchecked_t fscache_n_cookie_data;
63288+atomic_unchecked_t fscache_n_cookie_special;
63289
63290-atomic_t fscache_n_object_alloc;
63291-atomic_t fscache_n_object_no_alloc;
63292-atomic_t fscache_n_object_lookups;
63293-atomic_t fscache_n_object_lookups_negative;
63294-atomic_t fscache_n_object_lookups_positive;
63295-atomic_t fscache_n_object_lookups_timed_out;
63296-atomic_t fscache_n_object_created;
63297-atomic_t fscache_n_object_avail;
63298-atomic_t fscache_n_object_dead;
63299+atomic_unchecked_t fscache_n_object_alloc;
63300+atomic_unchecked_t fscache_n_object_no_alloc;
63301+atomic_unchecked_t fscache_n_object_lookups;
63302+atomic_unchecked_t fscache_n_object_lookups_negative;
63303+atomic_unchecked_t fscache_n_object_lookups_positive;
63304+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63305+atomic_unchecked_t fscache_n_object_created;
63306+atomic_unchecked_t fscache_n_object_avail;
63307+atomic_unchecked_t fscache_n_object_dead;
63308
63309-atomic_t fscache_n_checkaux_none;
63310-atomic_t fscache_n_checkaux_okay;
63311-atomic_t fscache_n_checkaux_update;
63312-atomic_t fscache_n_checkaux_obsolete;
63313+atomic_unchecked_t fscache_n_checkaux_none;
63314+atomic_unchecked_t fscache_n_checkaux_okay;
63315+atomic_unchecked_t fscache_n_checkaux_update;
63316+atomic_unchecked_t fscache_n_checkaux_obsolete;
63317
63318 atomic_t fscache_n_cop_alloc_object;
63319 atomic_t fscache_n_cop_lookup_object;
63320@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63321 seq_puts(m, "FS-Cache statistics\n");
63322
63323 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63324- atomic_read(&fscache_n_cookie_index),
63325- atomic_read(&fscache_n_cookie_data),
63326- atomic_read(&fscache_n_cookie_special));
63327+ atomic_read_unchecked(&fscache_n_cookie_index),
63328+ atomic_read_unchecked(&fscache_n_cookie_data),
63329+ atomic_read_unchecked(&fscache_n_cookie_special));
63330
63331 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63332- atomic_read(&fscache_n_object_alloc),
63333- atomic_read(&fscache_n_object_no_alloc),
63334- atomic_read(&fscache_n_object_avail),
63335- atomic_read(&fscache_n_object_dead));
63336+ atomic_read_unchecked(&fscache_n_object_alloc),
63337+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63338+ atomic_read_unchecked(&fscache_n_object_avail),
63339+ atomic_read_unchecked(&fscache_n_object_dead));
63340 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63341- atomic_read(&fscache_n_checkaux_none),
63342- atomic_read(&fscache_n_checkaux_okay),
63343- atomic_read(&fscache_n_checkaux_update),
63344- atomic_read(&fscache_n_checkaux_obsolete));
63345+ atomic_read_unchecked(&fscache_n_checkaux_none),
63346+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63347+ atomic_read_unchecked(&fscache_n_checkaux_update),
63348+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63349
63350 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63351- atomic_read(&fscache_n_marks),
63352- atomic_read(&fscache_n_uncaches));
63353+ atomic_read_unchecked(&fscache_n_marks),
63354+ atomic_read_unchecked(&fscache_n_uncaches));
63355
63356 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63357 " oom=%u\n",
63358- atomic_read(&fscache_n_acquires),
63359- atomic_read(&fscache_n_acquires_null),
63360- atomic_read(&fscache_n_acquires_no_cache),
63361- atomic_read(&fscache_n_acquires_ok),
63362- atomic_read(&fscache_n_acquires_nobufs),
63363- atomic_read(&fscache_n_acquires_oom));
63364+ atomic_read_unchecked(&fscache_n_acquires),
63365+ atomic_read_unchecked(&fscache_n_acquires_null),
63366+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63367+ atomic_read_unchecked(&fscache_n_acquires_ok),
63368+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63369+ atomic_read_unchecked(&fscache_n_acquires_oom));
63370
63371 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63372- atomic_read(&fscache_n_object_lookups),
63373- atomic_read(&fscache_n_object_lookups_negative),
63374- atomic_read(&fscache_n_object_lookups_positive),
63375- atomic_read(&fscache_n_object_created),
63376- atomic_read(&fscache_n_object_lookups_timed_out));
63377+ atomic_read_unchecked(&fscache_n_object_lookups),
63378+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63379+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63380+ atomic_read_unchecked(&fscache_n_object_created),
63381+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63382
63383 seq_printf(m, "Invals : n=%u run=%u\n",
63384- atomic_read(&fscache_n_invalidates),
63385- atomic_read(&fscache_n_invalidates_run));
63386+ atomic_read_unchecked(&fscache_n_invalidates),
63387+ atomic_read_unchecked(&fscache_n_invalidates_run));
63388
63389 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63390- atomic_read(&fscache_n_updates),
63391- atomic_read(&fscache_n_updates_null),
63392- atomic_read(&fscache_n_updates_run));
63393+ atomic_read_unchecked(&fscache_n_updates),
63394+ atomic_read_unchecked(&fscache_n_updates_null),
63395+ atomic_read_unchecked(&fscache_n_updates_run));
63396
63397 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63398- atomic_read(&fscache_n_relinquishes),
63399- atomic_read(&fscache_n_relinquishes_null),
63400- atomic_read(&fscache_n_relinquishes_waitcrt),
63401- atomic_read(&fscache_n_relinquishes_retire));
63402+ atomic_read_unchecked(&fscache_n_relinquishes),
63403+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63404+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63405+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63406
63407 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63408- atomic_read(&fscache_n_attr_changed),
63409- atomic_read(&fscache_n_attr_changed_ok),
63410- atomic_read(&fscache_n_attr_changed_nobufs),
63411- atomic_read(&fscache_n_attr_changed_nomem),
63412- atomic_read(&fscache_n_attr_changed_calls));
63413+ atomic_read_unchecked(&fscache_n_attr_changed),
63414+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63415+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63416+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63417+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63418
63419 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63420- atomic_read(&fscache_n_allocs),
63421- atomic_read(&fscache_n_allocs_ok),
63422- atomic_read(&fscache_n_allocs_wait),
63423- atomic_read(&fscache_n_allocs_nobufs),
63424- atomic_read(&fscache_n_allocs_intr));
63425+ atomic_read_unchecked(&fscache_n_allocs),
63426+ atomic_read_unchecked(&fscache_n_allocs_ok),
63427+ atomic_read_unchecked(&fscache_n_allocs_wait),
63428+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63429+ atomic_read_unchecked(&fscache_n_allocs_intr));
63430 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63431- atomic_read(&fscache_n_alloc_ops),
63432- atomic_read(&fscache_n_alloc_op_waits),
63433- atomic_read(&fscache_n_allocs_object_dead));
63434+ atomic_read_unchecked(&fscache_n_alloc_ops),
63435+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63436+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63437
63438 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63439 " int=%u oom=%u\n",
63440- atomic_read(&fscache_n_retrievals),
63441- atomic_read(&fscache_n_retrievals_ok),
63442- atomic_read(&fscache_n_retrievals_wait),
63443- atomic_read(&fscache_n_retrievals_nodata),
63444- atomic_read(&fscache_n_retrievals_nobufs),
63445- atomic_read(&fscache_n_retrievals_intr),
63446- atomic_read(&fscache_n_retrievals_nomem));
63447+ atomic_read_unchecked(&fscache_n_retrievals),
63448+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63449+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63450+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63451+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63452+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63453+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63454 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63455- atomic_read(&fscache_n_retrieval_ops),
63456- atomic_read(&fscache_n_retrieval_op_waits),
63457- atomic_read(&fscache_n_retrievals_object_dead));
63458+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63459+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63460+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63461
63462 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63463- atomic_read(&fscache_n_stores),
63464- atomic_read(&fscache_n_stores_ok),
63465- atomic_read(&fscache_n_stores_again),
63466- atomic_read(&fscache_n_stores_nobufs),
63467- atomic_read(&fscache_n_stores_oom));
63468+ atomic_read_unchecked(&fscache_n_stores),
63469+ atomic_read_unchecked(&fscache_n_stores_ok),
63470+ atomic_read_unchecked(&fscache_n_stores_again),
63471+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63472+ atomic_read_unchecked(&fscache_n_stores_oom));
63473 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63474- atomic_read(&fscache_n_store_ops),
63475- atomic_read(&fscache_n_store_calls),
63476- atomic_read(&fscache_n_store_pages),
63477- atomic_read(&fscache_n_store_radix_deletes),
63478- atomic_read(&fscache_n_store_pages_over_limit));
63479+ atomic_read_unchecked(&fscache_n_store_ops),
63480+ atomic_read_unchecked(&fscache_n_store_calls),
63481+ atomic_read_unchecked(&fscache_n_store_pages),
63482+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63483+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63484
63485 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63486- atomic_read(&fscache_n_store_vmscan_not_storing),
63487- atomic_read(&fscache_n_store_vmscan_gone),
63488- atomic_read(&fscache_n_store_vmscan_busy),
63489- atomic_read(&fscache_n_store_vmscan_cancelled),
63490- atomic_read(&fscache_n_store_vmscan_wait));
63491+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63492+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63493+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63494+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63495+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63496
63497 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63498- atomic_read(&fscache_n_op_pend),
63499- atomic_read(&fscache_n_op_run),
63500- atomic_read(&fscache_n_op_enqueue),
63501- atomic_read(&fscache_n_op_cancelled),
63502- atomic_read(&fscache_n_op_rejected));
63503+ atomic_read_unchecked(&fscache_n_op_pend),
63504+ atomic_read_unchecked(&fscache_n_op_run),
63505+ atomic_read_unchecked(&fscache_n_op_enqueue),
63506+ atomic_read_unchecked(&fscache_n_op_cancelled),
63507+ atomic_read_unchecked(&fscache_n_op_rejected));
63508 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63509- atomic_read(&fscache_n_op_deferred_release),
63510- atomic_read(&fscache_n_op_release),
63511- atomic_read(&fscache_n_op_gc));
63512+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63513+ atomic_read_unchecked(&fscache_n_op_release),
63514+ atomic_read_unchecked(&fscache_n_op_gc));
63515
63516 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63517 atomic_read(&fscache_n_cop_alloc_object),
63518diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63519index 28d0c7a..04816b7 100644
63520--- a/fs/fuse/cuse.c
63521+++ b/fs/fuse/cuse.c
63522@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63523 INIT_LIST_HEAD(&cuse_conntbl[i]);
63524
63525 /* inherit and extend fuse_dev_operations */
63526- cuse_channel_fops = fuse_dev_operations;
63527- cuse_channel_fops.owner = THIS_MODULE;
63528- cuse_channel_fops.open = cuse_channel_open;
63529- cuse_channel_fops.release = cuse_channel_release;
63530+ pax_open_kernel();
63531+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63532+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63533+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63534+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63535+ pax_close_kernel();
63536
63537 cuse_class = class_create(THIS_MODULE, "cuse");
63538 if (IS_ERR(cuse_class))
63539diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63540index 71c4619..6a9f6d4 100644
63541--- a/fs/fuse/dev.c
63542+++ b/fs/fuse/dev.c
63543@@ -1394,7 +1394,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63544 ret = 0;
63545 pipe_lock(pipe);
63546
63547- if (!pipe->readers) {
63548+ if (!atomic_read(&pipe->readers)) {
63549 send_sig(SIGPIPE, current, 0);
63550 if (!ret)
63551 ret = -EPIPE;
63552@@ -1423,7 +1423,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63553 page_nr++;
63554 ret += buf->len;
63555
63556- if (pipe->files)
63557+ if (atomic_read(&pipe->files))
63558 do_wakeup = 1;
63559 }
63560
63561diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63562index 08e7b1a..d91c6ee 100644
63563--- a/fs/fuse/dir.c
63564+++ b/fs/fuse/dir.c
63565@@ -1394,7 +1394,7 @@ static char *read_link(struct dentry *dentry)
63566 return link;
63567 }
63568
63569-static void free_link(char *link)
63570+static void free_link(const char *link)
63571 {
63572 if (!IS_ERR(link))
63573 free_page((unsigned long) link);
63574diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63575index fd62cae..3494dfa 100644
63576--- a/fs/hostfs/hostfs_kern.c
63577+++ b/fs/hostfs/hostfs_kern.c
63578@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63579
63580 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63581 {
63582- char *s = nd_get_link(nd);
63583+ const char *s = nd_get_link(nd);
63584 if (!IS_ERR(s))
63585 __putname(s);
63586 }
63587diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63588index 5eba47f..d353c22 100644
63589--- a/fs/hugetlbfs/inode.c
63590+++ b/fs/hugetlbfs/inode.c
63591@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63592 struct mm_struct *mm = current->mm;
63593 struct vm_area_struct *vma;
63594 struct hstate *h = hstate_file(file);
63595+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63596 struct vm_unmapped_area_info info;
63597
63598 if (len & ~huge_page_mask(h))
63599@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63600 return addr;
63601 }
63602
63603+#ifdef CONFIG_PAX_RANDMMAP
63604+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63605+#endif
63606+
63607 if (addr) {
63608 addr = ALIGN(addr, huge_page_size(h));
63609 vma = find_vma(mm, addr);
63610- if (TASK_SIZE - len >= addr &&
63611- (!vma || addr + len <= vma->vm_start))
63612+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63613 return addr;
63614 }
63615
63616 info.flags = 0;
63617 info.length = len;
63618 info.low_limit = TASK_UNMAPPED_BASE;
63619+
63620+#ifdef CONFIG_PAX_RANDMMAP
63621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63622+ info.low_limit += mm->delta_mmap;
63623+#endif
63624+
63625 info.high_limit = TASK_SIZE;
63626 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63627 info.align_offset = 0;
63628@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63629 };
63630 MODULE_ALIAS_FS("hugetlbfs");
63631
63632-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63633+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63634
63635 static int can_do_hugetlb_shm(void)
63636 {
63637diff --git a/fs/inode.c b/fs/inode.c
63638index aa149e7..46f1f65 100644
63639--- a/fs/inode.c
63640+++ b/fs/inode.c
63641@@ -842,16 +842,20 @@ unsigned int get_next_ino(void)
63642 unsigned int *p = &get_cpu_var(last_ino);
63643 unsigned int res = *p;
63644
63645+start:
63646+
63647 #ifdef CONFIG_SMP
63648 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63649- static atomic_t shared_last_ino;
63650- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63651+ static atomic_unchecked_t shared_last_ino;
63652+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63653
63654 res = next - LAST_INO_BATCH;
63655 }
63656 #endif
63657
63658- *p = ++res;
63659+ if (unlikely(!++res))
63660+ goto start; /* never zero */
63661+ *p = res;
63662 put_cpu_var(last_ino);
63663 return res;
63664 }
63665diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63666index 4a6cf28..d3a29d3 100644
63667--- a/fs/jffs2/erase.c
63668+++ b/fs/jffs2/erase.c
63669@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63670 struct jffs2_unknown_node marker = {
63671 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63672 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63673- .totlen = cpu_to_je32(c->cleanmarker_size)
63674+ .totlen = cpu_to_je32(c->cleanmarker_size),
63675+ .hdr_crc = cpu_to_je32(0)
63676 };
63677
63678 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63679diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63680index 09ed551..45684f8 100644
63681--- a/fs/jffs2/wbuf.c
63682+++ b/fs/jffs2/wbuf.c
63683@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63684 {
63685 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63686 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63687- .totlen = constant_cpu_to_je32(8)
63688+ .totlen = constant_cpu_to_je32(8),
63689+ .hdr_crc = constant_cpu_to_je32(0)
63690 };
63691
63692 /*
63693diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63694index 16c3a95..e9cb75d 100644
63695--- a/fs/jfs/super.c
63696+++ b/fs/jfs/super.c
63697@@ -902,7 +902,7 @@ static int __init init_jfs_fs(void)
63698
63699 jfs_inode_cachep =
63700 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63701- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63702+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63703 init_once);
63704 if (jfs_inode_cachep == NULL)
63705 return -ENOMEM;
63706diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63707index 2d881b3..fe1ac77 100644
63708--- a/fs/kernfs/dir.c
63709+++ b/fs/kernfs/dir.c
63710@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63711 *
63712 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63713 */
63714-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63715+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63716 {
63717 unsigned long hash = init_name_hash();
63718 unsigned int len = strlen(name);
63719@@ -833,6 +833,12 @@ static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
63720 ret = scops->mkdir(parent, dentry->d_name.name, mode);
63721
63722 kernfs_put_active(parent);
63723+
63724+ if (!ret) {
63725+ struct dentry *dentry_ret = kernfs_iop_lookup(dir, dentry, 0);
63726+ ret = PTR_ERR_OR_ZERO(dentry_ret);
63727+ }
63728+
63729 return ret;
63730 }
63731
63732diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63733index ddc9f96..4e450ad 100644
63734--- a/fs/kernfs/file.c
63735+++ b/fs/kernfs/file.c
63736@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63737
63738 struct kernfs_open_node {
63739 atomic_t refcnt;
63740- atomic_t event;
63741+ atomic_unchecked_t event;
63742 wait_queue_head_t poll;
63743 struct list_head files; /* goes through kernfs_open_file.list */
63744 };
63745@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63746 {
63747 struct kernfs_open_file *of = sf->private;
63748
63749- of->event = atomic_read(&of->kn->attr.open->event);
63750+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63751
63752 return of->kn->attr.ops->seq_show(sf, v);
63753 }
63754@@ -271,7 +271,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
63755 {
63756 struct kernfs_open_file *of = kernfs_of(file);
63757 const struct kernfs_ops *ops;
63758- size_t len;
63759+ ssize_t len;
63760 char *buf;
63761
63762 if (of->atomic_write_len) {
63763@@ -384,12 +384,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63764 return ret;
63765 }
63766
63767-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63768- void *buf, int len, int write)
63769+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63770+ void *buf, size_t len, int write)
63771 {
63772 struct file *file = vma->vm_file;
63773 struct kernfs_open_file *of = kernfs_of(file);
63774- int ret;
63775+ ssize_t ret;
63776
63777 if (!of->vm_ops)
63778 return -EINVAL;
63779@@ -568,7 +568,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63780 return -ENOMEM;
63781
63782 atomic_set(&new_on->refcnt, 0);
63783- atomic_set(&new_on->event, 1);
63784+ atomic_set_unchecked(&new_on->event, 1);
63785 init_waitqueue_head(&new_on->poll);
63786 INIT_LIST_HEAD(&new_on->files);
63787 goto retry;
63788@@ -792,7 +792,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
63789
63790 kernfs_put_active(kn);
63791
63792- if (of->event != atomic_read(&on->event))
63793+ if (of->event != atomic_read_unchecked(&on->event))
63794 goto trigger;
63795
63796 return DEFAULT_POLLMASK;
63797@@ -823,7 +823,7 @@ repeat:
63798
63799 on = kn->attr.open;
63800 if (on) {
63801- atomic_inc(&on->event);
63802+ atomic_inc_unchecked(&on->event);
63803 wake_up_interruptible(&on->poll);
63804 }
63805
63806diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
63807index 8a19889..4c3069a 100644
63808--- a/fs/kernfs/symlink.c
63809+++ b/fs/kernfs/symlink.c
63810@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
63811 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
63812 void *cookie)
63813 {
63814- char *page = nd_get_link(nd);
63815+ const char *page = nd_get_link(nd);
63816 if (!IS_ERR(page))
63817 free_page((unsigned long)page);
63818 }
63819diff --git a/fs/libfs.c b/fs/libfs.c
63820index 005843c..06c4191 100644
63821--- a/fs/libfs.c
63822+++ b/fs/libfs.c
63823@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63824
63825 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
63826 struct dentry *next = list_entry(p, struct dentry, d_child);
63827+ char d_name[sizeof(next->d_iname)];
63828+ const unsigned char *name;
63829+
63830 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
63831 if (!simple_positive(next)) {
63832 spin_unlock(&next->d_lock);
63833@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
63834
63835 spin_unlock(&next->d_lock);
63836 spin_unlock(&dentry->d_lock);
63837- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
63838+ name = next->d_name.name;
63839+ if (name == next->d_iname) {
63840+ memcpy(d_name, name, next->d_name.len);
63841+ name = d_name;
63842+ }
63843+ if (!dir_emit(ctx, name, next->d_name.len,
63844 next->d_inode->i_ino, dt_type(next->d_inode)))
63845 return 0;
63846 spin_lock(&dentry->d_lock);
63847@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
63848 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
63849 void *cookie)
63850 {
63851- char *s = nd_get_link(nd);
63852+ const char *s = nd_get_link(nd);
63853 if (!IS_ERR(s))
63854 kfree(s);
63855 }
63856diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
63857index acd3947..1f896e2 100644
63858--- a/fs/lockd/clntproc.c
63859+++ b/fs/lockd/clntproc.c
63860@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
63861 /*
63862 * Cookie counter for NLM requests
63863 */
63864-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
63865+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
63866
63867 void nlmclnt_next_cookie(struct nlm_cookie *c)
63868 {
63869- u32 cookie = atomic_inc_return(&nlm_cookie);
63870+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
63871
63872 memcpy(c->data, &cookie, 4);
63873 c->len=4;
63874diff --git a/fs/locks.c b/fs/locks.c
63875index 59e2f90..bd69071 100644
63876--- a/fs/locks.c
63877+++ b/fs/locks.c
63878@@ -2374,7 +2374,7 @@ void locks_remove_file(struct file *filp)
63879 locks_remove_posix(filp, filp);
63880
63881 if (filp->f_op->flock) {
63882- struct file_lock fl = {
63883+ struct file_lock flock = {
63884 .fl_owner = filp,
63885 .fl_pid = current->tgid,
63886 .fl_file = filp,
63887@@ -2382,9 +2382,9 @@ void locks_remove_file(struct file *filp)
63888 .fl_type = F_UNLCK,
63889 .fl_end = OFFSET_MAX,
63890 };
63891- filp->f_op->flock(filp, F_SETLKW, &fl);
63892- if (fl.fl_ops && fl.fl_ops->fl_release_private)
63893- fl.fl_ops->fl_release_private(&fl);
63894+ filp->f_op->flock(filp, F_SETLKW, &flock);
63895+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
63896+ flock.fl_ops->fl_release_private(&flock);
63897 }
63898
63899 spin_lock(&inode->i_lock);
63900diff --git a/fs/mount.h b/fs/mount.h
63901index 0ad6f76..a04c146 100644
63902--- a/fs/mount.h
63903+++ b/fs/mount.h
63904@@ -12,7 +12,7 @@ struct mnt_namespace {
63905 u64 seq; /* Sequence number to prevent loops */
63906 wait_queue_head_t poll;
63907 u64 event;
63908-};
63909+} __randomize_layout;
63910
63911 struct mnt_pcp {
63912 int mnt_count;
63913@@ -63,7 +63,7 @@ struct mount {
63914 int mnt_expiry_mark; /* true if marked for expiry */
63915 struct hlist_head mnt_pins;
63916 struct path mnt_ex_mountpoint;
63917-};
63918+} __randomize_layout;
63919
63920 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
63921
63922diff --git a/fs/namei.c b/fs/namei.c
63923index bc35b02..7ed1f1d 100644
63924--- a/fs/namei.c
63925+++ b/fs/namei.c
63926@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
63927 if (ret != -EACCES)
63928 return ret;
63929
63930+#ifdef CONFIG_GRKERNSEC
63931+ /* we'll block if we have to log due to a denied capability use */
63932+ if (mask & MAY_NOT_BLOCK)
63933+ return -ECHILD;
63934+#endif
63935+
63936 if (S_ISDIR(inode->i_mode)) {
63937 /* DACs are overridable for directories */
63938- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63939- return 0;
63940 if (!(mask & MAY_WRITE))
63941- if (capable_wrt_inode_uidgid(inode,
63942- CAP_DAC_READ_SEARCH))
63943+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63944+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63945 return 0;
63946+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63947+ return 0;
63948 return -EACCES;
63949 }
63950 /*
63951+ * Searching includes executable on directories, else just read.
63952+ */
63953+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63954+ if (mask == MAY_READ)
63955+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
63956+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63957+ return 0;
63958+
63959+ /*
63960 * Read/write DACs are always overridable.
63961 * Executable DACs are overridable when there is
63962 * at least one exec bit set.
63963@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
63964 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
63965 return 0;
63966
63967- /*
63968- * Searching includes executable on directories, else just read.
63969- */
63970- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
63971- if (mask == MAY_READ)
63972- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
63973- return 0;
63974-
63975 return -EACCES;
63976 }
63977 EXPORT_SYMBOL(generic_permission);
63978@@ -497,7 +504,7 @@ struct nameidata {
63979 int last_type;
63980 unsigned depth;
63981 struct file *base;
63982- char *saved_names[MAX_NESTED_LINKS + 1];
63983+ const char *saved_names[MAX_NESTED_LINKS + 1];
63984 };
63985
63986 /*
63987@@ -708,13 +715,13 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
63988 nd->flags |= LOOKUP_JUMPED;
63989 }
63990
63991-void nd_set_link(struct nameidata *nd, char *path)
63992+void nd_set_link(struct nameidata *nd, const char *path)
63993 {
63994 nd->saved_names[nd->depth] = path;
63995 }
63996 EXPORT_SYMBOL(nd_set_link);
63997
63998-char *nd_get_link(struct nameidata *nd)
63999+const char *nd_get_link(const struct nameidata *nd)
64000 {
64001 return nd->saved_names[nd->depth];
64002 }
64003@@ -849,7 +856,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64004 {
64005 struct dentry *dentry = link->dentry;
64006 int error;
64007- char *s;
64008+ const char *s;
64009
64010 BUG_ON(nd->flags & LOOKUP_RCU);
64011
64012@@ -870,6 +877,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64013 if (error)
64014 goto out_put_nd_path;
64015
64016+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64017+ dentry->d_inode, dentry, nd->path.mnt)) {
64018+ error = -EACCES;
64019+ goto out_put_nd_path;
64020+ }
64021+
64022 nd->last_type = LAST_BIND;
64023 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64024 error = PTR_ERR(*p);
64025@@ -1633,6 +1646,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64026 if (res)
64027 break;
64028 res = walk_component(nd, path, LOOKUP_FOLLOW);
64029+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64030+ res = -EACCES;
64031 put_link(nd, &link, cookie);
64032 } while (res > 0);
64033
64034@@ -1705,7 +1720,7 @@ EXPORT_SYMBOL(full_name_hash);
64035 static inline u64 hash_name(const char *name)
64036 {
64037 unsigned long a, b, adata, bdata, mask, hash, len;
64038- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64039+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64040
64041 hash = a = 0;
64042 len = -sizeof(unsigned long);
64043@@ -2000,6 +2015,8 @@ static int path_lookupat(int dfd, const char *name,
64044 if (err)
64045 break;
64046 err = lookup_last(nd, &path);
64047+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64048+ err = -EACCES;
64049 put_link(nd, &link, cookie);
64050 }
64051 }
64052@@ -2007,6 +2024,13 @@ static int path_lookupat(int dfd, const char *name,
64053 if (!err)
64054 err = complete_walk(nd);
64055
64056+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64057+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64058+ path_put(&nd->path);
64059+ err = -ENOENT;
64060+ }
64061+ }
64062+
64063 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64064 if (!d_can_lookup(nd->path.dentry)) {
64065 path_put(&nd->path);
64066@@ -2028,8 +2052,15 @@ static int filename_lookup(int dfd, struct filename *name,
64067 retval = path_lookupat(dfd, name->name,
64068 flags | LOOKUP_REVAL, nd);
64069
64070- if (likely(!retval))
64071+ if (likely(!retval)) {
64072 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64073+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64074+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64075+ path_put(&nd->path);
64076+ return -ENOENT;
64077+ }
64078+ }
64079+ }
64080 return retval;
64081 }
64082
64083@@ -2595,6 +2626,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64084 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64085 return -EPERM;
64086
64087+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64088+ return -EPERM;
64089+ if (gr_handle_rawio(inode))
64090+ return -EPERM;
64091+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64092+ return -EACCES;
64093+
64094 return 0;
64095 }
64096
64097@@ -2826,7 +2864,7 @@ looked_up:
64098 * cleared otherwise prior to returning.
64099 */
64100 static int lookup_open(struct nameidata *nd, struct path *path,
64101- struct file *file,
64102+ struct path *link, struct file *file,
64103 const struct open_flags *op,
64104 bool got_write, int *opened)
64105 {
64106@@ -2861,6 +2899,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64107 /* Negative dentry, just create the file */
64108 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64109 umode_t mode = op->mode;
64110+
64111+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64112+ error = -EACCES;
64113+ goto out_dput;
64114+ }
64115+
64116+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64117+ error = -EACCES;
64118+ goto out_dput;
64119+ }
64120+
64121 if (!IS_POSIXACL(dir->d_inode))
64122 mode &= ~current_umask();
64123 /*
64124@@ -2882,6 +2931,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64125 nd->flags & LOOKUP_EXCL);
64126 if (error)
64127 goto out_dput;
64128+ else
64129+ gr_handle_create(dentry, nd->path.mnt);
64130 }
64131 out_no_open:
64132 path->dentry = dentry;
64133@@ -2896,7 +2947,7 @@ out_dput:
64134 /*
64135 * Handle the last step of open()
64136 */
64137-static int do_last(struct nameidata *nd, struct path *path,
64138+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64139 struct file *file, const struct open_flags *op,
64140 int *opened, struct filename *name)
64141 {
64142@@ -2946,6 +2997,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64143 if (error)
64144 return error;
64145
64146+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64147+ error = -ENOENT;
64148+ goto out;
64149+ }
64150+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64151+ error = -EACCES;
64152+ goto out;
64153+ }
64154+
64155 audit_inode(name, dir, LOOKUP_PARENT);
64156 error = -EISDIR;
64157 /* trailing slashes? */
64158@@ -2965,7 +3025,7 @@ retry_lookup:
64159 */
64160 }
64161 mutex_lock(&dir->d_inode->i_mutex);
64162- error = lookup_open(nd, path, file, op, got_write, opened);
64163+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64164 mutex_unlock(&dir->d_inode->i_mutex);
64165
64166 if (error <= 0) {
64167@@ -2989,11 +3049,28 @@ retry_lookup:
64168 goto finish_open_created;
64169 }
64170
64171+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64172+ error = -ENOENT;
64173+ goto exit_dput;
64174+ }
64175+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64176+ error = -EACCES;
64177+ goto exit_dput;
64178+ }
64179+
64180 /*
64181 * create/update audit record if it already exists.
64182 */
64183- if (d_is_positive(path->dentry))
64184+ if (d_is_positive(path->dentry)) {
64185+ /* only check if O_CREAT is specified, all other checks need to go
64186+ into may_open */
64187+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64188+ error = -EACCES;
64189+ goto exit_dput;
64190+ }
64191+
64192 audit_inode(name, path->dentry, 0);
64193+ }
64194
64195 /*
64196 * If atomic_open() acquired write access it is dropped now due to
64197@@ -3034,6 +3111,11 @@ finish_lookup:
64198 }
64199 }
64200 BUG_ON(inode != path->dentry->d_inode);
64201+ /* if we're resolving a symlink to another symlink */
64202+ if (link && gr_handle_symlink_owner(link, inode)) {
64203+ error = -EACCES;
64204+ goto out;
64205+ }
64206 return 1;
64207 }
64208
64209@@ -3053,7 +3135,18 @@ finish_open:
64210 path_put(&save_parent);
64211 return error;
64212 }
64213+
64214+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64215+ error = -ENOENT;
64216+ goto out;
64217+ }
64218+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64219+ error = -EACCES;
64220+ goto out;
64221+ }
64222+
64223 audit_inode(name, nd->path.dentry, 0);
64224+
64225 error = -EISDIR;
64226 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64227 goto out;
64228@@ -3214,7 +3307,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64229 if (unlikely(error))
64230 goto out;
64231
64232- error = do_last(nd, &path, file, op, &opened, pathname);
64233+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64234 while (unlikely(error > 0)) { /* trailing symlink */
64235 struct path link = path;
64236 void *cookie;
64237@@ -3232,7 +3325,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64238 error = follow_link(&link, nd, &cookie);
64239 if (unlikely(error))
64240 break;
64241- error = do_last(nd, &path, file, op, &opened, pathname);
64242+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64243 put_link(nd, &link, cookie);
64244 }
64245 out:
64246@@ -3329,9 +3422,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64247 goto unlock;
64248
64249 error = -EEXIST;
64250- if (d_is_positive(dentry))
64251+ if (d_is_positive(dentry)) {
64252+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64253+ error = -ENOENT;
64254 goto fail;
64255-
64256+ }
64257 /*
64258 * Special case - lookup gave negative, but... we had foo/bar/
64259 * From the vfs_mknod() POV we just have a negative dentry -
64260@@ -3383,6 +3478,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64261 }
64262 EXPORT_SYMBOL(user_path_create);
64263
64264+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64265+{
64266+ struct filename *tmp = getname(pathname);
64267+ struct dentry *res;
64268+ if (IS_ERR(tmp))
64269+ return ERR_CAST(tmp);
64270+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64271+ if (IS_ERR(res))
64272+ putname(tmp);
64273+ else
64274+ *to = tmp;
64275+ return res;
64276+}
64277+
64278 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64279 {
64280 int error = may_create(dir, dentry);
64281@@ -3446,6 +3555,17 @@ retry:
64282
64283 if (!IS_POSIXACL(path.dentry->d_inode))
64284 mode &= ~current_umask();
64285+
64286+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64287+ error = -EPERM;
64288+ goto out;
64289+ }
64290+
64291+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64292+ error = -EACCES;
64293+ goto out;
64294+ }
64295+
64296 error = security_path_mknod(&path, dentry, mode, dev);
64297 if (error)
64298 goto out;
64299@@ -3461,6 +3581,8 @@ retry:
64300 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64301 break;
64302 }
64303+ if (!error)
64304+ gr_handle_create(dentry, path.mnt);
64305 out:
64306 done_path_create(&path, dentry);
64307 if (retry_estale(error, lookup_flags)) {
64308@@ -3515,9 +3637,16 @@ retry:
64309
64310 if (!IS_POSIXACL(path.dentry->d_inode))
64311 mode &= ~current_umask();
64312+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64313+ error = -EACCES;
64314+ goto out;
64315+ }
64316 error = security_path_mkdir(&path, dentry, mode);
64317 if (!error)
64318 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64319+ if (!error)
64320+ gr_handle_create(dentry, path.mnt);
64321+out:
64322 done_path_create(&path, dentry);
64323 if (retry_estale(error, lookup_flags)) {
64324 lookup_flags |= LOOKUP_REVAL;
64325@@ -3601,6 +3730,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64326 struct filename *name;
64327 struct dentry *dentry;
64328 struct nameidata nd;
64329+ u64 saved_ino = 0;
64330+ dev_t saved_dev = 0;
64331 unsigned int lookup_flags = 0;
64332 retry:
64333 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64334@@ -3633,10 +3764,21 @@ retry:
64335 error = -ENOENT;
64336 goto exit3;
64337 }
64338+
64339+ saved_ino = gr_get_ino_from_dentry(dentry);
64340+ saved_dev = gr_get_dev_from_dentry(dentry);
64341+
64342+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64343+ error = -EACCES;
64344+ goto exit3;
64345+ }
64346+
64347 error = security_path_rmdir(&nd.path, dentry);
64348 if (error)
64349 goto exit3;
64350 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64351+ if (!error && (saved_dev || saved_ino))
64352+ gr_handle_delete(saved_ino, saved_dev);
64353 exit3:
64354 dput(dentry);
64355 exit2:
64356@@ -3729,6 +3871,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64357 struct nameidata nd;
64358 struct inode *inode = NULL;
64359 struct inode *delegated_inode = NULL;
64360+ u64 saved_ino = 0;
64361+ dev_t saved_dev = 0;
64362 unsigned int lookup_flags = 0;
64363 retry:
64364 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64365@@ -3755,10 +3899,22 @@ retry_deleg:
64366 if (d_is_negative(dentry))
64367 goto slashes;
64368 ihold(inode);
64369+
64370+ if (inode->i_nlink <= 1) {
64371+ saved_ino = gr_get_ino_from_dentry(dentry);
64372+ saved_dev = gr_get_dev_from_dentry(dentry);
64373+ }
64374+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64375+ error = -EACCES;
64376+ goto exit2;
64377+ }
64378+
64379 error = security_path_unlink(&nd.path, dentry);
64380 if (error)
64381 goto exit2;
64382 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64383+ if (!error && (saved_ino || saved_dev))
64384+ gr_handle_delete(saved_ino, saved_dev);
64385 exit2:
64386 dput(dentry);
64387 }
64388@@ -3847,9 +4003,17 @@ retry:
64389 if (IS_ERR(dentry))
64390 goto out_putname;
64391
64392+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64393+ error = -EACCES;
64394+ goto out;
64395+ }
64396+
64397 error = security_path_symlink(&path, dentry, from->name);
64398 if (!error)
64399 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64400+ if (!error)
64401+ gr_handle_create(dentry, path.mnt);
64402+out:
64403 done_path_create(&path, dentry);
64404 if (retry_estale(error, lookup_flags)) {
64405 lookup_flags |= LOOKUP_REVAL;
64406@@ -3953,6 +4117,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64407 struct dentry *new_dentry;
64408 struct path old_path, new_path;
64409 struct inode *delegated_inode = NULL;
64410+ struct filename *to = NULL;
64411 int how = 0;
64412 int error;
64413
64414@@ -3976,7 +4141,7 @@ retry:
64415 if (error)
64416 return error;
64417
64418- new_dentry = user_path_create(newdfd, newname, &new_path,
64419+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64420 (how & LOOKUP_REVAL));
64421 error = PTR_ERR(new_dentry);
64422 if (IS_ERR(new_dentry))
64423@@ -3988,11 +4153,28 @@ retry:
64424 error = may_linkat(&old_path);
64425 if (unlikely(error))
64426 goto out_dput;
64427+
64428+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64429+ old_path.dentry->d_inode,
64430+ old_path.dentry->d_inode->i_mode, to)) {
64431+ error = -EACCES;
64432+ goto out_dput;
64433+ }
64434+
64435+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64436+ old_path.dentry, old_path.mnt, to)) {
64437+ error = -EACCES;
64438+ goto out_dput;
64439+ }
64440+
64441 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64442 if (error)
64443 goto out_dput;
64444 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64445+ if (!error)
64446+ gr_handle_create(new_dentry, new_path.mnt);
64447 out_dput:
64448+ putname(to);
64449 done_path_create(&new_path, new_dentry);
64450 if (delegated_inode) {
64451 error = break_deleg_wait(&delegated_inode);
64452@@ -4308,6 +4490,20 @@ retry_deleg:
64453 if (new_dentry == trap)
64454 goto exit5;
64455
64456+ if (gr_bad_chroot_rename(old_dentry, oldnd.path.mnt, new_dentry, newnd.path.mnt)) {
64457+ /* use EXDEV error to cause 'mv' to switch to an alternative
64458+ * method for usability
64459+ */
64460+ error = -EXDEV;
64461+ goto exit5;
64462+ }
64463+
64464+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64465+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64466+ to, flags);
64467+ if (error)
64468+ goto exit5;
64469+
64470 error = security_path_rename(&oldnd.path, old_dentry,
64471 &newnd.path, new_dentry, flags);
64472 if (error)
64473@@ -4315,6 +4511,9 @@ retry_deleg:
64474 error = vfs_rename(old_dir->d_inode, old_dentry,
64475 new_dir->d_inode, new_dentry,
64476 &delegated_inode, flags);
64477+ if (!error)
64478+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64479+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64480 exit5:
64481 dput(new_dentry);
64482 exit4:
64483@@ -4371,14 +4570,24 @@ EXPORT_SYMBOL(vfs_whiteout);
64484
64485 int readlink_copy(char __user *buffer, int buflen, const char *link)
64486 {
64487+ char tmpbuf[64];
64488+ const char *newlink;
64489 int len = PTR_ERR(link);
64490+
64491 if (IS_ERR(link))
64492 goto out;
64493
64494 len = strlen(link);
64495 if (len > (unsigned) buflen)
64496 len = buflen;
64497- if (copy_to_user(buffer, link, len))
64498+
64499+ if (len < sizeof(tmpbuf)) {
64500+ memcpy(tmpbuf, link, len);
64501+ newlink = tmpbuf;
64502+ } else
64503+ newlink = link;
64504+
64505+ if (copy_to_user(buffer, newlink, len))
64506 len = -EFAULT;
64507 out:
64508 return len;
64509diff --git a/fs/namespace.c b/fs/namespace.c
64510index cd1e968..e64ff16 100644
64511--- a/fs/namespace.c
64512+++ b/fs/namespace.c
64513@@ -1448,6 +1448,9 @@ static int do_umount(struct mount *mnt, int flags)
64514 if (!(sb->s_flags & MS_RDONLY))
64515 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64516 up_write(&sb->s_umount);
64517+
64518+ gr_log_remount(mnt->mnt_devname, retval);
64519+
64520 return retval;
64521 }
64522
64523@@ -1470,6 +1473,9 @@ static int do_umount(struct mount *mnt, int flags)
64524 }
64525 unlock_mount_hash();
64526 namespace_unlock();
64527+
64528+ gr_log_unmount(mnt->mnt_devname, retval);
64529+
64530 return retval;
64531 }
64532
64533@@ -1520,7 +1526,7 @@ static inline bool may_mount(void)
64534 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64535 */
64536
64537-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64538+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64539 {
64540 struct path path;
64541 struct mount *mnt;
64542@@ -1565,7 +1571,7 @@ out:
64543 /*
64544 * The 2.0 compatible umount. No flags.
64545 */
64546-SYSCALL_DEFINE1(oldumount, char __user *, name)
64547+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64548 {
64549 return sys_umount(name, 0);
64550 }
64551@@ -2631,6 +2637,16 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64552 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64553 MS_STRICTATIME);
64554
64555+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64556+ retval = -EPERM;
64557+ goto dput_out;
64558+ }
64559+
64560+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64561+ retval = -EPERM;
64562+ goto dput_out;
64563+ }
64564+
64565 if (flags & MS_REMOUNT)
64566 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64567 data_page);
64568@@ -2644,7 +2660,10 @@ long do_mount(const char *dev_name, const char __user *dir_name,
64569 retval = do_new_mount(&path, type_page, flags, mnt_flags,
64570 dev_name, data_page);
64571 dput_out:
64572+ gr_log_mount(dev_name, &path, retval);
64573+
64574 path_put(&path);
64575+
64576 return retval;
64577 }
64578
64579@@ -2662,7 +2681,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64580 * number incrementing at 10Ghz will take 12,427 years to wrap which
64581 * is effectively never, so we can ignore the possibility.
64582 */
64583-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64584+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64585
64586 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64587 {
64588@@ -2678,7 +2697,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64589 return ERR_PTR(ret);
64590 }
64591 new_ns->ns.ops = &mntns_operations;
64592- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64593+ new_ns->seq = atomic64_add_return_unchecked(1, &mnt_ns_seq);
64594 atomic_set(&new_ns->count, 1);
64595 new_ns->root = NULL;
64596 INIT_LIST_HEAD(&new_ns->list);
64597@@ -2688,7 +2707,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64598 return new_ns;
64599 }
64600
64601-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64602+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64603 struct user_namespace *user_ns, struct fs_struct *new_fs)
64604 {
64605 struct mnt_namespace *new_ns;
64606@@ -2809,8 +2828,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64607 }
64608 EXPORT_SYMBOL(mount_subtree);
64609
64610-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64611- char __user *, type, unsigned long, flags, void __user *, data)
64612+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64613+ const char __user *, type, unsigned long, flags, void __user *, data)
64614 {
64615 int ret;
64616 char *kernel_type;
64617@@ -2916,6 +2935,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64618 if (error)
64619 goto out2;
64620
64621+ if (gr_handle_chroot_pivot()) {
64622+ error = -EPERM;
64623+ goto out2;
64624+ }
64625+
64626 get_fs_root(current->fs, &root);
64627 old_mp = lock_mount(&old);
64628 error = PTR_ERR(old_mp);
64629@@ -3190,7 +3214,7 @@ static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
64630 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64631 return -EPERM;
64632
64633- if (fs->users != 1)
64634+ if (atomic_read(&fs->users) != 1)
64635 return -EINVAL;
64636
64637 get_mnt_ns(mnt_ns);
64638diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64639index 02f8d09..a5c25d1 100644
64640--- a/fs/nfs/callback_xdr.c
64641+++ b/fs/nfs/callback_xdr.c
64642@@ -51,7 +51,7 @@ struct callback_op {
64643 callback_decode_arg_t decode_args;
64644 callback_encode_res_t encode_res;
64645 long res_maxsize;
64646-};
64647+} __do_const;
64648
64649 static struct callback_op callback_ops[];
64650
64651diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64652index 2211f6b..30d0950 100644
64653--- a/fs/nfs/inode.c
64654+++ b/fs/nfs/inode.c
64655@@ -1234,16 +1234,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64656 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64657 }
64658
64659-static atomic_long_t nfs_attr_generation_counter;
64660+static atomic_long_unchecked_t nfs_attr_generation_counter;
64661
64662 static unsigned long nfs_read_attr_generation_counter(void)
64663 {
64664- return atomic_long_read(&nfs_attr_generation_counter);
64665+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64666 }
64667
64668 unsigned long nfs_inc_attr_generation_counter(void)
64669 {
64670- return atomic_long_inc_return(&nfs_attr_generation_counter);
64671+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64672 }
64673
64674 void nfs_fattr_init(struct nfs_fattr *fattr)
64675diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64676index ac71d13..a2e590a 100644
64677--- a/fs/nfsd/nfs4proc.c
64678+++ b/fs/nfsd/nfs4proc.c
64679@@ -1237,7 +1237,7 @@ struct nfsd4_operation {
64680 nfsd4op_rsize op_rsize_bop;
64681 stateid_getter op_get_currentstateid;
64682 stateid_setter op_set_currentstateid;
64683-};
64684+} __do_const;
64685
64686 static struct nfsd4_operation nfsd4_ops[];
64687
64688diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64689index 15f7b73..00e230b 100644
64690--- a/fs/nfsd/nfs4xdr.c
64691+++ b/fs/nfsd/nfs4xdr.c
64692@@ -1560,7 +1560,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64693
64694 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64695
64696-static nfsd4_dec nfsd4_dec_ops[] = {
64697+static const nfsd4_dec nfsd4_dec_ops[] = {
64698 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64699 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64700 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64701diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64702index 83a9694..6b7f928 100644
64703--- a/fs/nfsd/nfscache.c
64704+++ b/fs/nfsd/nfscache.c
64705@@ -537,7 +537,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64706 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64707 u32 hash;
64708 struct nfsd_drc_bucket *b;
64709- int len;
64710+ long len;
64711 size_t bufsize = 0;
64712
64713 if (!rp)
64714@@ -546,11 +546,14 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64715 hash = nfsd_cache_hash(rp->c_xid);
64716 b = &drc_hashtbl[hash];
64717
64718- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64719- len >>= 2;
64720+ if (statp) {
64721+ len = (char*)statp - (char*)resv->iov_base;
64722+ len = resv->iov_len - len;
64723+ len >>= 2;
64724+ }
64725
64726 /* Don't cache excessive amounts of data and XDR failures */
64727- if (!statp || len > (256 >> 2)) {
64728+ if (!statp || len > (256 >> 2) || len < 0) {
64729 nfsd_reply_cache_free(b, rp);
64730 return;
64731 }
64732@@ -558,7 +561,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64733 switch (cachetype) {
64734 case RC_REPLSTAT:
64735 if (len != 1)
64736- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64737+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64738 rp->c_replstat = *statp;
64739 break;
64740 case RC_REPLBUFF:
64741diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64742index 5685c67..73029ef 100644
64743--- a/fs/nfsd/vfs.c
64744+++ b/fs/nfsd/vfs.c
64745@@ -893,7 +893,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64746
64747 oldfs = get_fs();
64748 set_fs(KERNEL_DS);
64749- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64750+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64751 set_fs(oldfs);
64752 return nfsd_finish_read(file, count, host_err);
64753 }
64754@@ -980,7 +980,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64755
64756 /* Write the data. */
64757 oldfs = get_fs(); set_fs(KERNEL_DS);
64758- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64759+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64760 set_fs(oldfs);
64761 if (host_err < 0)
64762 goto out_nfserr;
64763@@ -1525,7 +1525,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64764 */
64765
64766 oldfs = get_fs(); set_fs(KERNEL_DS);
64767- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64768+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64769 set_fs(oldfs);
64770
64771 if (host_err < 0)
64772diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64773index 52ccd34..7a6b202 100644
64774--- a/fs/nls/nls_base.c
64775+++ b/fs/nls/nls_base.c
64776@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64777
64778 int __register_nls(struct nls_table *nls, struct module *owner)
64779 {
64780- struct nls_table ** tmp = &tables;
64781+ struct nls_table *tmp = tables;
64782
64783 if (nls->next)
64784 return -EBUSY;
64785
64786- nls->owner = owner;
64787+ pax_open_kernel();
64788+ *(void **)&nls->owner = owner;
64789+ pax_close_kernel();
64790 spin_lock(&nls_lock);
64791- while (*tmp) {
64792- if (nls == *tmp) {
64793+ while (tmp) {
64794+ if (nls == tmp) {
64795 spin_unlock(&nls_lock);
64796 return -EBUSY;
64797 }
64798- tmp = &(*tmp)->next;
64799+ tmp = tmp->next;
64800 }
64801- nls->next = tables;
64802+ pax_open_kernel();
64803+ *(struct nls_table **)&nls->next = tables;
64804+ pax_close_kernel();
64805 tables = nls;
64806 spin_unlock(&nls_lock);
64807 return 0;
64808@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64809
64810 int unregister_nls(struct nls_table * nls)
64811 {
64812- struct nls_table ** tmp = &tables;
64813+ struct nls_table * const * tmp = &tables;
64814
64815 spin_lock(&nls_lock);
64816 while (*tmp) {
64817 if (nls == *tmp) {
64818- *tmp = nls->next;
64819+ pax_open_kernel();
64820+ *(struct nls_table **)tmp = nls->next;
64821+ pax_close_kernel();
64822 spin_unlock(&nls_lock);
64823 return 0;
64824 }
64825@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
64826 return -EINVAL;
64827 }
64828
64829-static struct nls_table *find_nls(char *charset)
64830+static struct nls_table *find_nls(const char *charset)
64831 {
64832 struct nls_table *nls;
64833 spin_lock(&nls_lock);
64834@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
64835 return nls;
64836 }
64837
64838-struct nls_table *load_nls(char *charset)
64839+struct nls_table *load_nls(const char *charset)
64840 {
64841 return try_then_request_module(find_nls(charset), "nls_%s", charset);
64842 }
64843diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
64844index 162b3f1..6076a7c 100644
64845--- a/fs/nls/nls_euc-jp.c
64846+++ b/fs/nls/nls_euc-jp.c
64847@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
64848 p_nls = load_nls("cp932");
64849
64850 if (p_nls) {
64851- table.charset2upper = p_nls->charset2upper;
64852- table.charset2lower = p_nls->charset2lower;
64853+ pax_open_kernel();
64854+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64855+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64856+ pax_close_kernel();
64857 return register_nls(&table);
64858 }
64859
64860diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
64861index a80a741..7b96e1b 100644
64862--- a/fs/nls/nls_koi8-ru.c
64863+++ b/fs/nls/nls_koi8-ru.c
64864@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
64865 p_nls = load_nls("koi8-u");
64866
64867 if (p_nls) {
64868- table.charset2upper = p_nls->charset2upper;
64869- table.charset2lower = p_nls->charset2lower;
64870+ pax_open_kernel();
64871+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
64872+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
64873+ pax_close_kernel();
64874 return register_nls(&table);
64875 }
64876
64877diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
64878index bff8567..83281c6 100644
64879--- a/fs/notify/fanotify/fanotify_user.c
64880+++ b/fs/notify/fanotify/fanotify_user.c
64881@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
64882
64883 fd = fanotify_event_metadata.fd;
64884 ret = -EFAULT;
64885- if (copy_to_user(buf, &fanotify_event_metadata,
64886- fanotify_event_metadata.event_len))
64887+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
64888+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
64889 goto out_close_fd;
64890
64891 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
64892diff --git a/fs/notify/notification.c b/fs/notify/notification.c
64893index a95d8e0..a91a5fd 100644
64894--- a/fs/notify/notification.c
64895+++ b/fs/notify/notification.c
64896@@ -48,7 +48,7 @@
64897 #include <linux/fsnotify_backend.h>
64898 #include "fsnotify.h"
64899
64900-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64901+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64902
64903 /**
64904 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
64905@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
64906 */
64907 u32 fsnotify_get_cookie(void)
64908 {
64909- return atomic_inc_return(&fsnotify_sync_cookie);
64910+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
64911 }
64912 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
64913
64914diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
64915index 9e38daf..5727cae 100644
64916--- a/fs/ntfs/dir.c
64917+++ b/fs/ntfs/dir.c
64918@@ -1310,7 +1310,7 @@ find_next_index_buffer:
64919 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
64920 ~(s64)(ndir->itype.index.block_size - 1)));
64921 /* Bounds checks. */
64922- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64923+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
64924 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
64925 "inode 0x%lx or driver bug.", vdir->i_ino);
64926 goto err_out;
64927diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
64928index 643faa4..ef9027e 100644
64929--- a/fs/ntfs/file.c
64930+++ b/fs/ntfs/file.c
64931@@ -1280,7 +1280,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
64932 char *addr;
64933 size_t total = 0;
64934 unsigned len;
64935- int left;
64936+ unsigned left;
64937
64938 do {
64939 len = PAGE_CACHE_SIZE - ofs;
64940diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
64941index 9e1e112..241a52a 100644
64942--- a/fs/ntfs/super.c
64943+++ b/fs/ntfs/super.c
64944@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64945 if (!silent)
64946 ntfs_error(sb, "Primary boot sector is invalid.");
64947 } else if (!silent)
64948- ntfs_error(sb, read_err_str, "primary");
64949+ ntfs_error(sb, read_err_str, "%s", "primary");
64950 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
64951 if (bh_primary)
64952 brelse(bh_primary);
64953@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64954 goto hotfix_primary_boot_sector;
64955 brelse(bh_backup);
64956 } else if (!silent)
64957- ntfs_error(sb, read_err_str, "backup");
64958+ ntfs_error(sb, read_err_str, "%s", "backup");
64959 /* Try to read NT3.51- backup boot sector. */
64960 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
64961 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
64962@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
64963 "sector.");
64964 brelse(bh_backup);
64965 } else if (!silent)
64966- ntfs_error(sb, read_err_str, "backup");
64967+ ntfs_error(sb, read_err_str, "%s", "backup");
64968 /* We failed. Cleanup and return. */
64969 if (bh_primary)
64970 brelse(bh_primary);
64971diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
64972index 0440134..d52c93a 100644
64973--- a/fs/ocfs2/localalloc.c
64974+++ b/fs/ocfs2/localalloc.c
64975@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
64976 goto bail;
64977 }
64978
64979- atomic_inc(&osb->alloc_stats.moves);
64980+ atomic_inc_unchecked(&osb->alloc_stats.moves);
64981
64982 bail:
64983 if (handle)
64984diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
64985index 7d6b7d0..5fb529a 100644
64986--- a/fs/ocfs2/ocfs2.h
64987+++ b/fs/ocfs2/ocfs2.h
64988@@ -242,11 +242,11 @@ enum ocfs2_vol_state
64989
64990 struct ocfs2_alloc_stats
64991 {
64992- atomic_t moves;
64993- atomic_t local_data;
64994- atomic_t bitmap_data;
64995- atomic_t bg_allocs;
64996- atomic_t bg_extends;
64997+ atomic_unchecked_t moves;
64998+ atomic_unchecked_t local_data;
64999+ atomic_unchecked_t bitmap_data;
65000+ atomic_unchecked_t bg_allocs;
65001+ atomic_unchecked_t bg_extends;
65002 };
65003
65004 enum ocfs2_local_alloc_state
65005diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65006index 0cb889a..6a26b24 100644
65007--- a/fs/ocfs2/suballoc.c
65008+++ b/fs/ocfs2/suballoc.c
65009@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65010 mlog_errno(status);
65011 goto bail;
65012 }
65013- atomic_inc(&osb->alloc_stats.bg_extends);
65014+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65015
65016 /* You should never ask for this much metadata */
65017 BUG_ON(bits_wanted >
65018@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65019 mlog_errno(status);
65020 goto bail;
65021 }
65022- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65023+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65024
65025 *suballoc_loc = res.sr_bg_blkno;
65026 *suballoc_bit_start = res.sr_bit_offset;
65027@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65028 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65029 res->sr_bits);
65030
65031- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65032+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65033
65034 BUG_ON(res->sr_bits != 1);
65035
65036@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65037 mlog_errno(status);
65038 goto bail;
65039 }
65040- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65041+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65042
65043 BUG_ON(res.sr_bits != 1);
65044
65045@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65046 cluster_start,
65047 num_clusters);
65048 if (!status)
65049- atomic_inc(&osb->alloc_stats.local_data);
65050+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65051 } else {
65052 if (min_clusters > (osb->bitmap_cpg - 1)) {
65053 /* The only paths asking for contiguousness
65054@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65055 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65056 res.sr_bg_blkno,
65057 res.sr_bit_offset);
65058- atomic_inc(&osb->alloc_stats.bitmap_data);
65059+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65060 *num_clusters = res.sr_bits;
65061 }
65062 }
65063diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65064index 8372317..ec86e79 100644
65065--- a/fs/ocfs2/super.c
65066+++ b/fs/ocfs2/super.c
65067@@ -306,11 +306,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65068 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65069 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65070 "Stats",
65071- atomic_read(&osb->alloc_stats.bitmap_data),
65072- atomic_read(&osb->alloc_stats.local_data),
65073- atomic_read(&osb->alloc_stats.bg_allocs),
65074- atomic_read(&osb->alloc_stats.moves),
65075- atomic_read(&osb->alloc_stats.bg_extends));
65076+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65077+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65078+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65079+ atomic_read_unchecked(&osb->alloc_stats.moves),
65080+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65081
65082 out += snprintf(buf + out, len - out,
65083 "%10s => State: %u Descriptor: %llu Size: %u bits "
65084@@ -2113,11 +2113,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65085
65086 mutex_init(&osb->system_file_mutex);
65087
65088- atomic_set(&osb->alloc_stats.moves, 0);
65089- atomic_set(&osb->alloc_stats.local_data, 0);
65090- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65091- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65092- atomic_set(&osb->alloc_stats.bg_extends, 0);
65093+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65094+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65095+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65096+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65097+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65098
65099 /* Copy the blockcheck stats from the superblock probe */
65100 osb->osb_ecc_stats = *stats;
65101diff --git a/fs/open.c b/fs/open.c
65102index 813be03..781941d 100644
65103--- a/fs/open.c
65104+++ b/fs/open.c
65105@@ -32,6 +32,8 @@
65106 #include <linux/dnotify.h>
65107 #include <linux/compat.h>
65108
65109+#define CREATE_TRACE_POINTS
65110+#include <trace/events/fs.h>
65111 #include "internal.h"
65112
65113 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65114@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65115 error = locks_verify_truncate(inode, NULL, length);
65116 if (!error)
65117 error = security_path_truncate(path);
65118+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65119+ error = -EACCES;
65120 if (!error)
65121 error = do_truncate(path->dentry, length, 0, NULL);
65122
65123@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65124 error = locks_verify_truncate(inode, f.file, length);
65125 if (!error)
65126 error = security_path_truncate(&f.file->f_path);
65127+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65128+ error = -EACCES;
65129 if (!error)
65130 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65131 sb_end_write(inode->i_sb);
65132@@ -392,6 +398,9 @@ retry:
65133 if (__mnt_is_readonly(path.mnt))
65134 res = -EROFS;
65135
65136+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65137+ res = -EACCES;
65138+
65139 out_path_release:
65140 path_put(&path);
65141 if (retry_estale(res, lookup_flags)) {
65142@@ -423,6 +432,8 @@ retry:
65143 if (error)
65144 goto dput_and_out;
65145
65146+ gr_log_chdir(path.dentry, path.mnt);
65147+
65148 set_fs_pwd(current->fs, &path);
65149
65150 dput_and_out:
65151@@ -452,6 +463,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65152 goto out_putf;
65153
65154 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65155+
65156+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65157+ error = -EPERM;
65158+
65159+ if (!error)
65160+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65161+
65162 if (!error)
65163 set_fs_pwd(current->fs, &f.file->f_path);
65164 out_putf:
65165@@ -481,7 +499,13 @@ retry:
65166 if (error)
65167 goto dput_and_out;
65168
65169+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65170+ goto dput_and_out;
65171+
65172 set_fs_root(current->fs, &path);
65173+
65174+ gr_handle_chroot_chdir(&path);
65175+
65176 error = 0;
65177 dput_and_out:
65178 path_put(&path);
65179@@ -505,6 +529,16 @@ static int chmod_common(struct path *path, umode_t mode)
65180 return error;
65181 retry_deleg:
65182 mutex_lock(&inode->i_mutex);
65183+
65184+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65185+ error = -EACCES;
65186+ goto out_unlock;
65187+ }
65188+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65189+ error = -EACCES;
65190+ goto out_unlock;
65191+ }
65192+
65193 error = security_path_chmod(path, mode);
65194 if (error)
65195 goto out_unlock;
65196@@ -570,6 +604,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65197 uid = make_kuid(current_user_ns(), user);
65198 gid = make_kgid(current_user_ns(), group);
65199
65200+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65201+ return -EACCES;
65202+
65203 newattrs.ia_valid = ATTR_CTIME;
65204 if (user != (uid_t) -1) {
65205 if (!uid_valid(uid))
65206@@ -1014,6 +1051,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65207 } else {
65208 fsnotify_open(f);
65209 fd_install(fd, f);
65210+ trace_do_sys_open(tmp->name, flags, mode);
65211 }
65212 }
65213 putname(tmp);
65214diff --git a/fs/pipe.c b/fs/pipe.c
65215index 21981e5..3d5f55c 100644
65216--- a/fs/pipe.c
65217+++ b/fs/pipe.c
65218@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65219
65220 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65221 {
65222- if (pipe->files)
65223+ if (atomic_read(&pipe->files))
65224 mutex_lock_nested(&pipe->mutex, subclass);
65225 }
65226
65227@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65228
65229 void pipe_unlock(struct pipe_inode_info *pipe)
65230 {
65231- if (pipe->files)
65232+ if (atomic_read(&pipe->files))
65233 mutex_unlock(&pipe->mutex);
65234 }
65235 EXPORT_SYMBOL(pipe_unlock);
65236@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65237 }
65238 if (bufs) /* More to do? */
65239 continue;
65240- if (!pipe->writers)
65241+ if (!atomic_read(&pipe->writers))
65242 break;
65243- if (!pipe->waiting_writers) {
65244+ if (!atomic_read(&pipe->waiting_writers)) {
65245 /* syscall merging: Usually we must not sleep
65246 * if O_NONBLOCK is set, or if we got some data.
65247 * But if a writer sleeps in kernel space, then
65248@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65249
65250 __pipe_lock(pipe);
65251
65252- if (!pipe->readers) {
65253+ if (!atomic_read(&pipe->readers)) {
65254 send_sig(SIGPIPE, current, 0);
65255 ret = -EPIPE;
65256 goto out;
65257@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65258 for (;;) {
65259 int bufs;
65260
65261- if (!pipe->readers) {
65262+ if (!atomic_read(&pipe->readers)) {
65263 send_sig(SIGPIPE, current, 0);
65264 if (!ret)
65265 ret = -EPIPE;
65266@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65267 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65268 do_wakeup = 0;
65269 }
65270- pipe->waiting_writers++;
65271+ atomic_inc(&pipe->waiting_writers);
65272 pipe_wait(pipe);
65273- pipe->waiting_writers--;
65274+ atomic_dec(&pipe->waiting_writers);
65275 }
65276 out:
65277 __pipe_unlock(pipe);
65278@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65279 mask = 0;
65280 if (filp->f_mode & FMODE_READ) {
65281 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65282- if (!pipe->writers && filp->f_version != pipe->w_counter)
65283+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65284 mask |= POLLHUP;
65285 }
65286
65287@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65288 * Most Unices do not set POLLERR for FIFOs but on Linux they
65289 * behave exactly like pipes for poll().
65290 */
65291- if (!pipe->readers)
65292+ if (!atomic_read(&pipe->readers))
65293 mask |= POLLERR;
65294 }
65295
65296@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65297 int kill = 0;
65298
65299 spin_lock(&inode->i_lock);
65300- if (!--pipe->files) {
65301+ if (atomic_dec_and_test(&pipe->files)) {
65302 inode->i_pipe = NULL;
65303 kill = 1;
65304 }
65305@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65306
65307 __pipe_lock(pipe);
65308 if (file->f_mode & FMODE_READ)
65309- pipe->readers--;
65310+ atomic_dec(&pipe->readers);
65311 if (file->f_mode & FMODE_WRITE)
65312- pipe->writers--;
65313+ atomic_dec(&pipe->writers);
65314
65315- if (pipe->readers || pipe->writers) {
65316+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65317 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65318 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65319 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65320@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65321 kfree(pipe);
65322 }
65323
65324-static struct vfsmount *pipe_mnt __read_mostly;
65325+struct vfsmount *pipe_mnt __read_mostly;
65326
65327 /*
65328 * pipefs_dname() is called from d_path().
65329@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65330 goto fail_iput;
65331
65332 inode->i_pipe = pipe;
65333- pipe->files = 2;
65334- pipe->readers = pipe->writers = 1;
65335+ atomic_set(&pipe->files, 2);
65336+ atomic_set(&pipe->readers, 1);
65337+ atomic_set(&pipe->writers, 1);
65338 inode->i_fop = &pipefifo_fops;
65339
65340 /*
65341@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65342 spin_lock(&inode->i_lock);
65343 if (inode->i_pipe) {
65344 pipe = inode->i_pipe;
65345- pipe->files++;
65346+ atomic_inc(&pipe->files);
65347 spin_unlock(&inode->i_lock);
65348 } else {
65349 spin_unlock(&inode->i_lock);
65350 pipe = alloc_pipe_info();
65351 if (!pipe)
65352 return -ENOMEM;
65353- pipe->files = 1;
65354+ atomic_set(&pipe->files, 1);
65355 spin_lock(&inode->i_lock);
65356 if (unlikely(inode->i_pipe)) {
65357- inode->i_pipe->files++;
65358+ atomic_inc(&inode->i_pipe->files);
65359 spin_unlock(&inode->i_lock);
65360 free_pipe_info(pipe);
65361 pipe = inode->i_pipe;
65362@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65363 * opened, even when there is no process writing the FIFO.
65364 */
65365 pipe->r_counter++;
65366- if (pipe->readers++ == 0)
65367+ if (atomic_inc_return(&pipe->readers) == 1)
65368 wake_up_partner(pipe);
65369
65370- if (!is_pipe && !pipe->writers) {
65371+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65372 if ((filp->f_flags & O_NONBLOCK)) {
65373 /* suppress POLLHUP until we have
65374 * seen a writer */
65375@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65376 * errno=ENXIO when there is no process reading the FIFO.
65377 */
65378 ret = -ENXIO;
65379- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65380+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65381 goto err;
65382
65383 pipe->w_counter++;
65384- if (!pipe->writers++)
65385+ if (atomic_inc_return(&pipe->writers) == 1)
65386 wake_up_partner(pipe);
65387
65388- if (!is_pipe && !pipe->readers) {
65389+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65390 if (wait_for_partner(pipe, &pipe->r_counter))
65391 goto err_wr;
65392 }
65393@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65394 * the process can at least talk to itself.
65395 */
65396
65397- pipe->readers++;
65398- pipe->writers++;
65399+ atomic_inc(&pipe->readers);
65400+ atomic_inc(&pipe->writers);
65401 pipe->r_counter++;
65402 pipe->w_counter++;
65403- if (pipe->readers == 1 || pipe->writers == 1)
65404+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65405 wake_up_partner(pipe);
65406 break;
65407
65408@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65409 return 0;
65410
65411 err_rd:
65412- if (!--pipe->readers)
65413+ if (atomic_dec_and_test(&pipe->readers))
65414 wake_up_interruptible(&pipe->wait);
65415 ret = -ERESTARTSYS;
65416 goto err;
65417
65418 err_wr:
65419- if (!--pipe->writers)
65420+ if (atomic_dec_and_test(&pipe->writers))
65421 wake_up_interruptible(&pipe->wait);
65422 ret = -ERESTARTSYS;
65423 goto err;
65424diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65425index 0855f77..6787d50 100644
65426--- a/fs/posix_acl.c
65427+++ b/fs/posix_acl.c
65428@@ -20,6 +20,7 @@
65429 #include <linux/xattr.h>
65430 #include <linux/export.h>
65431 #include <linux/user_namespace.h>
65432+#include <linux/grsecurity.h>
65433
65434 struct posix_acl **acl_by_type(struct inode *inode, int type)
65435 {
65436@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65437 }
65438 }
65439 if (mode_p)
65440- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65441+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65442 return not_equiv;
65443 }
65444 EXPORT_SYMBOL(posix_acl_equiv_mode);
65445@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65446 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65447 }
65448
65449- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65450+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65451 return not_equiv;
65452 }
65453
65454@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65455 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65456 int err = -ENOMEM;
65457 if (clone) {
65458+ *mode_p &= ~gr_acl_umask();
65459+
65460 err = posix_acl_create_masq(clone, mode_p);
65461 if (err < 0) {
65462 posix_acl_release(clone);
65463@@ -659,11 +662,12 @@ struct posix_acl *
65464 posix_acl_from_xattr(struct user_namespace *user_ns,
65465 const void *value, size_t size)
65466 {
65467- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65468- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65469+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65470+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65471 int count;
65472 struct posix_acl *acl;
65473 struct posix_acl_entry *acl_e;
65474+ umode_t umask = gr_acl_umask();
65475
65476 if (!value)
65477 return NULL;
65478@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65479
65480 switch(acl_e->e_tag) {
65481 case ACL_USER_OBJ:
65482+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65483+ break;
65484 case ACL_GROUP_OBJ:
65485 case ACL_MASK:
65486+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65487+ break;
65488 case ACL_OTHER:
65489+ acl_e->e_perm &= ~(umask & S_IRWXO);
65490 break;
65491
65492 case ACL_USER:
65493+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65494 acl_e->e_uid =
65495 make_kuid(user_ns,
65496 le32_to_cpu(entry->e_id));
65497@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65498 goto fail;
65499 break;
65500 case ACL_GROUP:
65501+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65502 acl_e->e_gid =
65503 make_kgid(user_ns,
65504 le32_to_cpu(entry->e_id));
65505diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65506index 2183fcf..3c32a98 100644
65507--- a/fs/proc/Kconfig
65508+++ b/fs/proc/Kconfig
65509@@ -30,7 +30,7 @@ config PROC_FS
65510
65511 config PROC_KCORE
65512 bool "/proc/kcore support" if !ARM
65513- depends on PROC_FS && MMU
65514+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65515 help
65516 Provides a virtual ELF core file of the live kernel. This can
65517 be read with gdb and other ELF tools. No modifications can be
65518@@ -38,8 +38,8 @@ config PROC_KCORE
65519
65520 config PROC_VMCORE
65521 bool "/proc/vmcore support"
65522- depends on PROC_FS && CRASH_DUMP
65523- default y
65524+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65525+ default n
65526 help
65527 Exports the dump image of crashed kernel in ELF format.
65528
65529@@ -63,8 +63,8 @@ config PROC_SYSCTL
65530 limited in memory.
65531
65532 config PROC_PAGE_MONITOR
65533- default y
65534- depends on PROC_FS && MMU
65535+ default n
65536+ depends on PROC_FS && MMU && !GRKERNSEC
65537 bool "Enable /proc page monitoring" if EXPERT
65538 help
65539 Various /proc files exist to monitor process memory utilization:
65540diff --git a/fs/proc/array.c b/fs/proc/array.c
65541index bd117d0..e6872d7 100644
65542--- a/fs/proc/array.c
65543+++ b/fs/proc/array.c
65544@@ -60,6 +60,7 @@
65545 #include <linux/tty.h>
65546 #include <linux/string.h>
65547 #include <linux/mman.h>
65548+#include <linux/grsecurity.h>
65549 #include <linux/proc_fs.h>
65550 #include <linux/ioport.h>
65551 #include <linux/uaccess.h>
65552@@ -344,6 +345,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65553 seq_putc(m, '\n');
65554 }
65555
65556+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65557+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65558+{
65559+ if (p->mm)
65560+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65561+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65562+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65563+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65564+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65565+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65566+ else
65567+ seq_printf(m, "PaX:\t-----\n");
65568+}
65569+#endif
65570+
65571 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65572 struct pid *pid, struct task_struct *task)
65573 {
65574@@ -362,9 +378,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65575 task_cpus_allowed(m, task);
65576 cpuset_task_status_allowed(m, task);
65577 task_context_switch_counts(m, task);
65578+
65579+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65580+ task_pax(m, task);
65581+#endif
65582+
65583+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65584+ task_grsec_rbac(m, task);
65585+#endif
65586+
65587 return 0;
65588 }
65589
65590+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65591+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65592+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65593+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65594+#endif
65595+
65596 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65597 struct pid *pid, struct task_struct *task, int whole)
65598 {
65599@@ -386,6 +417,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65600 char tcomm[sizeof(task->comm)];
65601 unsigned long flags;
65602
65603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65604+ if (current->exec_id != m->exec_id) {
65605+ gr_log_badprocpid("stat");
65606+ return 0;
65607+ }
65608+#endif
65609+
65610 state = *get_task_state(task);
65611 vsize = eip = esp = 0;
65612 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65613@@ -456,6 +494,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65614 gtime = task_gtime(task);
65615 }
65616
65617+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65618+ if (PAX_RAND_FLAGS(mm)) {
65619+ eip = 0;
65620+ esp = 0;
65621+ wchan = 0;
65622+ }
65623+#endif
65624+#ifdef CONFIG_GRKERNSEC_HIDESYM
65625+ wchan = 0;
65626+ eip =0;
65627+ esp =0;
65628+#endif
65629+
65630 /* scale priority and nice values from timeslices to -20..20 */
65631 /* to make it look like a "normal" Unix priority/nice value */
65632 priority = task_prio(task);
65633@@ -487,9 +538,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65634 seq_put_decimal_ull(m, ' ', vsize);
65635 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65636 seq_put_decimal_ull(m, ' ', rsslim);
65637+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65638+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65639+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65640+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65641+#else
65642 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65643 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65644 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65645+#endif
65646 seq_put_decimal_ull(m, ' ', esp);
65647 seq_put_decimal_ull(m, ' ', eip);
65648 /* The signal information here is obsolete.
65649@@ -511,7 +568,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65650 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65651 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65652
65653- if (mm && permitted) {
65654+ if (mm && permitted
65655+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65656+ && !PAX_RAND_FLAGS(mm)
65657+#endif
65658+ ) {
65659 seq_put_decimal_ull(m, ' ', mm->start_data);
65660 seq_put_decimal_ull(m, ' ', mm->end_data);
65661 seq_put_decimal_ull(m, ' ', mm->start_brk);
65662@@ -549,8 +610,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65663 struct pid *pid, struct task_struct *task)
65664 {
65665 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65666- struct mm_struct *mm = get_task_mm(task);
65667+ struct mm_struct *mm;
65668
65669+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65670+ if (current->exec_id != m->exec_id) {
65671+ gr_log_badprocpid("statm");
65672+ return 0;
65673+ }
65674+#endif
65675+ mm = get_task_mm(task);
65676 if (mm) {
65677 size = task_statm(mm, &shared, &text, &data, &resident);
65678 mmput(mm);
65679@@ -573,6 +641,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65680 return 0;
65681 }
65682
65683+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65684+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65685+{
65686+ unsigned long flags;
65687+ u32 curr_ip = 0;
65688+
65689+ if (lock_task_sighand(task, &flags)) {
65690+ curr_ip = task->signal->curr_ip;
65691+ unlock_task_sighand(task, &flags);
65692+ }
65693+ return seq_printf(m, "%pI4\n", &curr_ip);
65694+}
65695+#endif
65696+
65697 #ifdef CONFIG_CHECKPOINT_RESTORE
65698 static struct pid *
65699 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65700diff --git a/fs/proc/base.c b/fs/proc/base.c
65701index 3f3d7ae..68de109 100644
65702--- a/fs/proc/base.c
65703+++ b/fs/proc/base.c
65704@@ -113,6 +113,14 @@ struct pid_entry {
65705 union proc_op op;
65706 };
65707
65708+struct getdents_callback {
65709+ struct linux_dirent __user * current_dir;
65710+ struct linux_dirent __user * previous;
65711+ struct file * file;
65712+ int count;
65713+ int error;
65714+};
65715+
65716 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65717 .name = (NAME), \
65718 .len = sizeof(NAME) - 1, \
65719@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65720 return 0;
65721 }
65722
65723+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65724+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65725+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65726+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65727+#endif
65728+
65729 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65730 struct pid *pid, struct task_struct *task)
65731 {
65732 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65733 if (mm && !IS_ERR(mm)) {
65734 unsigned int nwords = 0;
65735+
65736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65737+ /* allow if we're currently ptracing this task */
65738+ if (PAX_RAND_FLAGS(mm) &&
65739+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65740+ mmput(mm);
65741+ return 0;
65742+ }
65743+#endif
65744+
65745 do {
65746 nwords += 2;
65747 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65748@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65749 }
65750
65751
65752-#ifdef CONFIG_KALLSYMS
65753+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65754 /*
65755 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65756 * Returns the resolved symbol. If that fails, simply return the address.
65757@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65758 mutex_unlock(&task->signal->cred_guard_mutex);
65759 }
65760
65761-#ifdef CONFIG_STACKTRACE
65762+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65763
65764 #define MAX_STACK_TRACE_DEPTH 64
65765
65766@@ -456,7 +480,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65767 return 0;
65768 }
65769
65770-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65771+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65772 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65773 struct pid *pid, struct task_struct *task)
65774 {
65775@@ -486,7 +510,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65776 /************************************************************************/
65777
65778 /* permission checks */
65779-static int proc_fd_access_allowed(struct inode *inode)
65780+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65781 {
65782 struct task_struct *task;
65783 int allowed = 0;
65784@@ -496,7 +520,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65785 */
65786 task = get_proc_task(inode);
65787 if (task) {
65788- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65789+ if (log)
65790+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
65791+ else
65792+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65793 put_task_struct(task);
65794 }
65795 return allowed;
65796@@ -527,10 +554,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
65797 struct task_struct *task,
65798 int hide_pid_min)
65799 {
65800+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
65801+ return false;
65802+
65803+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65804+ rcu_read_lock();
65805+ {
65806+ const struct cred *tmpcred = current_cred();
65807+ const struct cred *cred = __task_cred(task);
65808+
65809+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
65810+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65811+ || in_group_p(grsec_proc_gid)
65812+#endif
65813+ ) {
65814+ rcu_read_unlock();
65815+ return true;
65816+ }
65817+ }
65818+ rcu_read_unlock();
65819+
65820+ if (!pid->hide_pid)
65821+ return false;
65822+#endif
65823+
65824 if (pid->hide_pid < hide_pid_min)
65825 return true;
65826 if (in_group_p(pid->pid_gid))
65827 return true;
65828+
65829 return ptrace_may_access(task, PTRACE_MODE_READ);
65830 }
65831
65832@@ -548,7 +600,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
65833 put_task_struct(task);
65834
65835 if (!has_perms) {
65836+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65837+ {
65838+#else
65839 if (pid->hide_pid == 2) {
65840+#endif
65841 /*
65842 * Let's make getdents(), stat(), and open()
65843 * consistent with each other. If a process
65844@@ -609,6 +665,10 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
65845
65846 if (task) {
65847 mm = mm_access(task, mode);
65848+ if (!IS_ERR_OR_NULL(mm) && gr_acl_handle_procpidmem(task)) {
65849+ mmput(mm);
65850+ mm = ERR_PTR(-EPERM);
65851+ }
65852 put_task_struct(task);
65853
65854 if (!IS_ERR_OR_NULL(mm)) {
65855@@ -630,6 +690,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
65856 return PTR_ERR(mm);
65857
65858 file->private_data = mm;
65859+
65860+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65861+ file->f_version = current->exec_id;
65862+#endif
65863+
65864 return 0;
65865 }
65866
65867@@ -651,6 +716,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65868 ssize_t copied;
65869 char *page;
65870
65871+#ifdef CONFIG_GRKERNSEC
65872+ if (write)
65873+ return -EPERM;
65874+#endif
65875+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65876+ if (file->f_version != current->exec_id) {
65877+ gr_log_badprocpid("mem");
65878+ return 0;
65879+ }
65880+#endif
65881+
65882 if (!mm)
65883 return 0;
65884
65885@@ -663,7 +739,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
65886 goto free;
65887
65888 while (count > 0) {
65889- int this_len = min_t(int, count, PAGE_SIZE);
65890+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
65891
65892 if (write && copy_from_user(page, buf, this_len)) {
65893 copied = -EFAULT;
65894@@ -755,6 +831,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65895 if (!mm)
65896 return 0;
65897
65898+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65899+ if (file->f_version != current->exec_id) {
65900+ gr_log_badprocpid("environ");
65901+ return 0;
65902+ }
65903+#endif
65904+
65905 page = (char *)__get_free_page(GFP_TEMPORARY);
65906 if (!page)
65907 return -ENOMEM;
65908@@ -764,7 +847,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
65909 goto free;
65910 while (count > 0) {
65911 size_t this_len, max_len;
65912- int retval;
65913+ ssize_t retval;
65914
65915 if (src >= (mm->env_end - mm->env_start))
65916 break;
65917@@ -1378,7 +1461,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
65918 int error = -EACCES;
65919
65920 /* Are we allowed to snoop on the tasks file descriptors? */
65921- if (!proc_fd_access_allowed(inode))
65922+ if (!proc_fd_access_allowed(inode, 0))
65923 goto out;
65924
65925 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65926@@ -1422,8 +1505,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
65927 struct path path;
65928
65929 /* Are we allowed to snoop on the tasks file descriptors? */
65930- if (!proc_fd_access_allowed(inode))
65931- goto out;
65932+ /* logging this is needed for learning on chromium to work properly,
65933+ but we don't want to flood the logs from 'ps' which does a readlink
65934+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
65935+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
65936+ */
65937+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
65938+ if (!proc_fd_access_allowed(inode,0))
65939+ goto out;
65940+ } else {
65941+ if (!proc_fd_access_allowed(inode,1))
65942+ goto out;
65943+ }
65944
65945 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
65946 if (error)
65947@@ -1473,7 +1566,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
65948 rcu_read_lock();
65949 cred = __task_cred(task);
65950 inode->i_uid = cred->euid;
65951+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65952+ inode->i_gid = grsec_proc_gid;
65953+#else
65954 inode->i_gid = cred->egid;
65955+#endif
65956 rcu_read_unlock();
65957 }
65958 security_task_to_inode(task, inode);
65959@@ -1509,10 +1606,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
65960 return -ENOENT;
65961 }
65962 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65963+#ifdef CONFIG_GRKERNSEC_PROC_USER
65964+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65965+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65966+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65967+#endif
65968 task_dumpable(task)) {
65969 cred = __task_cred(task);
65970 stat->uid = cred->euid;
65971+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65972+ stat->gid = grsec_proc_gid;
65973+#else
65974 stat->gid = cred->egid;
65975+#endif
65976 }
65977 }
65978 rcu_read_unlock();
65979@@ -1550,11 +1656,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
65980
65981 if (task) {
65982 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
65983+#ifdef CONFIG_GRKERNSEC_PROC_USER
65984+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
65985+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65986+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
65987+#endif
65988 task_dumpable(task)) {
65989 rcu_read_lock();
65990 cred = __task_cred(task);
65991 inode->i_uid = cred->euid;
65992+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
65993+ inode->i_gid = grsec_proc_gid;
65994+#else
65995 inode->i_gid = cred->egid;
65996+#endif
65997 rcu_read_unlock();
65998 } else {
65999 inode->i_uid = GLOBAL_ROOT_UID;
66000@@ -2085,6 +2200,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66001 if (!task)
66002 goto out_no_task;
66003
66004+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66005+ goto out;
66006+
66007 /*
66008 * Yes, it does not scale. And it should not. Don't add
66009 * new entries into /proc/<tgid>/ without very good reasons.
66010@@ -2115,6 +2233,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66011 if (!task)
66012 return -ENOENT;
66013
66014+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66015+ goto out;
66016+
66017 if (!dir_emit_dots(file, ctx))
66018 goto out;
66019
66020@@ -2557,7 +2678,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66021 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66022 #endif
66023 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66024-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66025+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66026 ONE("syscall", S_IRUSR, proc_pid_syscall),
66027 #endif
66028 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66029@@ -2582,10 +2703,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66030 #ifdef CONFIG_SECURITY
66031 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66032 #endif
66033-#ifdef CONFIG_KALLSYMS
66034+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66035 ONE("wchan", S_IRUGO, proc_pid_wchan),
66036 #endif
66037-#ifdef CONFIG_STACKTRACE
66038+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66039 ONE("stack", S_IRUSR, proc_pid_stack),
66040 #endif
66041 #ifdef CONFIG_SCHEDSTATS
66042@@ -2619,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66043 #ifdef CONFIG_HARDWALL
66044 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66045 #endif
66046+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66047+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66048+#endif
66049 #ifdef CONFIG_USER_NS
66050 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66051 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66052@@ -2751,7 +2875,14 @@ static int proc_pid_instantiate(struct inode *dir,
66053 if (!inode)
66054 goto out;
66055
66056+#ifdef CONFIG_GRKERNSEC_PROC_USER
66057+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66058+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66059+ inode->i_gid = grsec_proc_gid;
66060+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66061+#else
66062 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66063+#endif
66064 inode->i_op = &proc_tgid_base_inode_operations;
66065 inode->i_fop = &proc_tgid_base_operations;
66066 inode->i_flags|=S_IMMUTABLE;
66067@@ -2789,7 +2920,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66068 if (!task)
66069 goto out;
66070
66071+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66072+ goto out_put_task;
66073+
66074 result = proc_pid_instantiate(dir, dentry, task, NULL);
66075+out_put_task:
66076 put_task_struct(task);
66077 out:
66078 return ERR_PTR(result);
66079@@ -2903,7 +3038,7 @@ static const struct pid_entry tid_base_stuff[] = {
66080 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66081 #endif
66082 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66083-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66084+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66085 ONE("syscall", S_IRUSR, proc_pid_syscall),
66086 #endif
66087 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66088@@ -2930,10 +3065,10 @@ static const struct pid_entry tid_base_stuff[] = {
66089 #ifdef CONFIG_SECURITY
66090 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66091 #endif
66092-#ifdef CONFIG_KALLSYMS
66093+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66094 ONE("wchan", S_IRUGO, proc_pid_wchan),
66095 #endif
66096-#ifdef CONFIG_STACKTRACE
66097+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66098 ONE("stack", S_IRUSR, proc_pid_stack),
66099 #endif
66100 #ifdef CONFIG_SCHEDSTATS
66101diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66102index cbd82df..c0407d2 100644
66103--- a/fs/proc/cmdline.c
66104+++ b/fs/proc/cmdline.c
66105@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66106
66107 static int __init proc_cmdline_init(void)
66108 {
66109+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66110+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66111+#else
66112 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66113+#endif
66114 return 0;
66115 }
66116 fs_initcall(proc_cmdline_init);
66117diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66118index 50493ed..248166b 100644
66119--- a/fs/proc/devices.c
66120+++ b/fs/proc/devices.c
66121@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66122
66123 static int __init proc_devices_init(void)
66124 {
66125+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66126+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66127+#else
66128 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66129+#endif
66130 return 0;
66131 }
66132 fs_initcall(proc_devices_init);
66133diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66134index 8e5ad83..1f07a8c 100644
66135--- a/fs/proc/fd.c
66136+++ b/fs/proc/fd.c
66137@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66138 if (!task)
66139 return -ENOENT;
66140
66141- files = get_files_struct(task);
66142+ if (!gr_acl_handle_procpidmem(task))
66143+ files = get_files_struct(task);
66144 put_task_struct(task);
66145
66146 if (files) {
66147@@ -284,11 +285,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66148 */
66149 int proc_fd_permission(struct inode *inode, int mask)
66150 {
66151+ struct task_struct *task;
66152 int rv = generic_permission(inode, mask);
66153- if (rv == 0)
66154- return 0;
66155+
66156 if (task_tgid(current) == proc_pid(inode))
66157 rv = 0;
66158+
66159+ task = get_proc_task(inode);
66160+ if (task == NULL)
66161+ return rv;
66162+
66163+ if (gr_acl_handle_procpidmem(task))
66164+ rv = -EACCES;
66165+
66166+ put_task_struct(task);
66167+
66168 return rv;
66169 }
66170
66171diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66172index b502bba..849e216 100644
66173--- a/fs/proc/generic.c
66174+++ b/fs/proc/generic.c
66175@@ -22,6 +22,7 @@
66176 #include <linux/bitops.h>
66177 #include <linux/spinlock.h>
66178 #include <linux/completion.h>
66179+#include <linux/grsecurity.h>
66180 #include <asm/uaccess.h>
66181
66182 #include "internal.h"
66183@@ -253,6 +254,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66184 return proc_lookup_de(PDE(dir), dir, dentry);
66185 }
66186
66187+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66188+ unsigned int flags)
66189+{
66190+ if (gr_proc_is_restricted())
66191+ return ERR_PTR(-EACCES);
66192+
66193+ return proc_lookup_de(PDE(dir), dir, dentry);
66194+}
66195+
66196 /*
66197 * This returns non-zero if at EOF, so that the /proc
66198 * root directory can use this and check if it should
66199@@ -310,6 +320,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66200 return proc_readdir_de(PDE(inode), file, ctx);
66201 }
66202
66203+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66204+{
66205+ struct inode *inode = file_inode(file);
66206+
66207+ if (gr_proc_is_restricted())
66208+ return -EACCES;
66209+
66210+ return proc_readdir_de(PDE(inode), file, ctx);
66211+}
66212+
66213 /*
66214 * These are the generic /proc directory operations. They
66215 * use the in-memory "struct proc_dir_entry" tree to parse
66216@@ -321,6 +341,12 @@ static const struct file_operations proc_dir_operations = {
66217 .iterate = proc_readdir,
66218 };
66219
66220+static const struct file_operations proc_dir_restricted_operations = {
66221+ .llseek = generic_file_llseek,
66222+ .read = generic_read_dir,
66223+ .iterate = proc_readdir_restrict,
66224+};
66225+
66226 /*
66227 * proc directories can do almost nothing..
66228 */
66229@@ -330,6 +356,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66230 .setattr = proc_notify_change,
66231 };
66232
66233+static const struct inode_operations proc_dir_restricted_inode_operations = {
66234+ .lookup = proc_lookup_restrict,
66235+ .getattr = proc_getattr,
66236+ .setattr = proc_notify_change,
66237+};
66238+
66239 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66240 {
66241 int ret;
66242@@ -339,8 +371,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66243 return ret;
66244
66245 if (S_ISDIR(dp->mode)) {
66246- dp->proc_fops = &proc_dir_operations;
66247- dp->proc_iops = &proc_dir_inode_operations;
66248+ if (dp->restricted) {
66249+ dp->proc_fops = &proc_dir_restricted_operations;
66250+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66251+ } else {
66252+ dp->proc_fops = &proc_dir_operations;
66253+ dp->proc_iops = &proc_dir_inode_operations;
66254+ }
66255 dir->nlink++;
66256 } else if (S_ISLNK(dp->mode)) {
66257 dp->proc_iops = &proc_link_inode_operations;
66258@@ -453,6 +490,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66259 }
66260 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66261
66262+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66263+ struct proc_dir_entry *parent, void *data)
66264+{
66265+ struct proc_dir_entry *ent;
66266+
66267+ if (mode == 0)
66268+ mode = S_IRUGO | S_IXUGO;
66269+
66270+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66271+ if (ent) {
66272+ ent->data = data;
66273+ ent->restricted = 1;
66274+ if (proc_register(parent, ent) < 0) {
66275+ kfree(ent);
66276+ ent = NULL;
66277+ }
66278+ }
66279+ return ent;
66280+}
66281+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66282+
66283 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66284 struct proc_dir_entry *parent)
66285 {
66286@@ -467,6 +525,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66287 }
66288 EXPORT_SYMBOL(proc_mkdir);
66289
66290+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66291+ struct proc_dir_entry *parent)
66292+{
66293+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66294+}
66295+EXPORT_SYMBOL(proc_mkdir_restrict);
66296+
66297 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66298 struct proc_dir_entry *parent,
66299 const struct file_operations *proc_fops,
66300diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66301index 3b0f838..a0e0f63e 100644
66302--- a/fs/proc/inode.c
66303+++ b/fs/proc/inode.c
66304@@ -24,11 +24,17 @@
66305 #include <linux/mount.h>
66306 #include <linux/magic.h>
66307 #include <linux/namei.h>
66308+#include <linux/grsecurity.h>
66309
66310 #include <asm/uaccess.h>
66311
66312 #include "internal.h"
66313
66314+#ifdef CONFIG_PROC_SYSCTL
66315+extern const struct inode_operations proc_sys_inode_operations;
66316+extern const struct inode_operations proc_sys_dir_operations;
66317+#endif
66318+
66319 static void proc_evict_inode(struct inode *inode)
66320 {
66321 struct proc_dir_entry *de;
66322@@ -49,6 +55,13 @@ static void proc_evict_inode(struct inode *inode)
66323 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
66324 sysctl_head_put(head);
66325 }
66326+
66327+#ifdef CONFIG_PROC_SYSCTL
66328+ if (inode->i_op == &proc_sys_inode_operations ||
66329+ inode->i_op == &proc_sys_dir_operations)
66330+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66331+#endif
66332+
66333 }
66334
66335 static struct kmem_cache * proc_inode_cachep;
66336@@ -426,7 +439,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66337 if (de->mode) {
66338 inode->i_mode = de->mode;
66339 inode->i_uid = de->uid;
66340+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66341+ inode->i_gid = grsec_proc_gid;
66342+#else
66343 inode->i_gid = de->gid;
66344+#endif
66345 }
66346 if (de->size)
66347 inode->i_size = de->size;
66348diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66349index c835b94..c9e01a3 100644
66350--- a/fs/proc/internal.h
66351+++ b/fs/proc/internal.h
66352@@ -47,9 +47,10 @@ struct proc_dir_entry {
66353 struct completion *pde_unload_completion;
66354 struct list_head pde_openers; /* who did ->open, but not ->release */
66355 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66356+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66357 u8 namelen;
66358 char name[];
66359-};
66360+} __randomize_layout;
66361
66362 union proc_op {
66363 int (*proc_get_link)(struct dentry *, struct path *);
66364@@ -67,7 +68,7 @@ struct proc_inode {
66365 struct ctl_table *sysctl_entry;
66366 const struct proc_ns_operations *ns_ops;
66367 struct inode vfs_inode;
66368-};
66369+} __randomize_layout;
66370
66371 /*
66372 * General functions
66373@@ -155,6 +156,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66374 struct pid *, struct task_struct *);
66375 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66376 struct pid *, struct task_struct *);
66377+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66378+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66379+ struct pid *, struct task_struct *);
66380+#endif
66381
66382 /*
66383 * base.c
66384@@ -179,9 +184,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66385 * generic.c
66386 */
66387 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66388+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66389 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66390 struct dentry *);
66391 extern int proc_readdir(struct file *, struct dir_context *);
66392+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66393 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66394
66395 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66396diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66397index a352d57..cb94a5c 100644
66398--- a/fs/proc/interrupts.c
66399+++ b/fs/proc/interrupts.c
66400@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66401
66402 static int __init proc_interrupts_init(void)
66403 {
66404+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66405+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66406+#else
66407 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66408+#endif
66409 return 0;
66410 }
66411 fs_initcall(proc_interrupts_init);
66412diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66413index 91a4e64..cb007c0 100644
66414--- a/fs/proc/kcore.c
66415+++ b/fs/proc/kcore.c
66416@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66417 * the addresses in the elf_phdr on our list.
66418 */
66419 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66420- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66421+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66422+ if (tsz > buflen)
66423 tsz = buflen;
66424-
66425+
66426 while (buflen) {
66427 struct kcore_list *m;
66428
66429@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66430 kfree(elf_buf);
66431 } else {
66432 if (kern_addr_valid(start)) {
66433- unsigned long n;
66434+ char *elf_buf;
66435+ mm_segment_t oldfs;
66436
66437- n = copy_to_user(buffer, (char *)start, tsz);
66438- /*
66439- * We cannot distinguish between fault on source
66440- * and fault on destination. When this happens
66441- * we clear too and hope it will trigger the
66442- * EFAULT again.
66443- */
66444- if (n) {
66445- if (clear_user(buffer + tsz - n,
66446- n))
66447+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66448+ if (!elf_buf)
66449+ return -ENOMEM;
66450+ oldfs = get_fs();
66451+ set_fs(KERNEL_DS);
66452+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66453+ set_fs(oldfs);
66454+ if (copy_to_user(buffer, elf_buf, tsz)) {
66455+ kfree(elf_buf);
66456 return -EFAULT;
66457+ }
66458 }
66459+ set_fs(oldfs);
66460+ kfree(elf_buf);
66461 } else {
66462 if (clear_user(buffer, tsz))
66463 return -EFAULT;
66464@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66465
66466 static int open_kcore(struct inode *inode, struct file *filp)
66467 {
66468+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66469+ return -EPERM;
66470+#endif
66471 if (!capable(CAP_SYS_RAWIO))
66472 return -EPERM;
66473 if (kcore_need_update)
66474diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66475index d3ebf2e..6ad42d1 100644
66476--- a/fs/proc/meminfo.c
66477+++ b/fs/proc/meminfo.c
66478@@ -194,7 +194,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66479 vmi.used >> 10,
66480 vmi.largest_chunk >> 10
66481 #ifdef CONFIG_MEMORY_FAILURE
66482- , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66483+ , atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66484 #endif
66485 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66486 , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66487diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66488index d4a3574..b421ce9 100644
66489--- a/fs/proc/nommu.c
66490+++ b/fs/proc/nommu.c
66491@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66492
66493 if (file) {
66494 seq_pad(m, ' ');
66495- seq_path(m, &file->f_path, "");
66496+ seq_path(m, &file->f_path, "\n\\");
66497 }
66498
66499 seq_putc(m, '\n');
66500diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66501index 1bde894..22ac7eb 100644
66502--- a/fs/proc/proc_net.c
66503+++ b/fs/proc/proc_net.c
66504@@ -23,9 +23,27 @@
66505 #include <linux/nsproxy.h>
66506 #include <net/net_namespace.h>
66507 #include <linux/seq_file.h>
66508+#include <linux/grsecurity.h>
66509
66510 #include "internal.h"
66511
66512+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66513+static struct seq_operations *ipv6_seq_ops_addr;
66514+
66515+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66516+{
66517+ ipv6_seq_ops_addr = addr;
66518+}
66519+
66520+void unregister_ipv6_seq_ops_addr(void)
66521+{
66522+ ipv6_seq_ops_addr = NULL;
66523+}
66524+
66525+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66526+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66527+#endif
66528+
66529 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66530 {
66531 return pde->parent->data;
66532@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66533 return maybe_get_net(PDE_NET(PDE(inode)));
66534 }
66535
66536+extern const struct seq_operations dev_seq_ops;
66537+
66538 int seq_open_net(struct inode *ino, struct file *f,
66539 const struct seq_operations *ops, int size)
66540 {
66541@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66542
66543 BUG_ON(size < sizeof(*p));
66544
66545+ /* only permit access to /proc/net/dev */
66546+ if (
66547+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66548+ ops != ipv6_seq_ops_addr &&
66549+#endif
66550+ ops != &dev_seq_ops && gr_proc_is_restricted())
66551+ return -EACCES;
66552+
66553 net = get_proc_net(ino);
66554 if (net == NULL)
66555 return -ENXIO;
66556@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66557 int err;
66558 struct net *net;
66559
66560+ if (gr_proc_is_restricted())
66561+ return -EACCES;
66562+
66563 err = -ENXIO;
66564 net = get_proc_net(inode);
66565 if (net == NULL)
66566diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66567index f92d5dd..26398ac 100644
66568--- a/fs/proc/proc_sysctl.c
66569+++ b/fs/proc/proc_sysctl.c
66570@@ -11,13 +11,21 @@
66571 #include <linux/namei.h>
66572 #include <linux/mm.h>
66573 #include <linux/module.h>
66574+#include <linux/nsproxy.h>
66575+#ifdef CONFIG_GRKERNSEC
66576+#include <net/net_namespace.h>
66577+#endif
66578 #include "internal.h"
66579
66580+extern int gr_handle_chroot_sysctl(const int op);
66581+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66582+ const int op);
66583+
66584 static const struct dentry_operations proc_sys_dentry_operations;
66585 static const struct file_operations proc_sys_file_operations;
66586-static const struct inode_operations proc_sys_inode_operations;
66587+const struct inode_operations proc_sys_inode_operations;
66588 static const struct file_operations proc_sys_dir_file_operations;
66589-static const struct inode_operations proc_sys_dir_operations;
66590+const struct inode_operations proc_sys_dir_operations;
66591
66592 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66593 {
66594@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66595
66596 err = NULL;
66597 d_set_d_op(dentry, &proc_sys_dentry_operations);
66598+
66599+ gr_handle_proc_create(dentry, inode);
66600+
66601 d_add(dentry, inode);
66602
66603 out:
66604@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66605 struct inode *inode = file_inode(filp);
66606 struct ctl_table_header *head = grab_header(inode);
66607 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66608+ int op = write ? MAY_WRITE : MAY_READ;
66609 ssize_t error;
66610 size_t res;
66611
66612@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66613 * and won't be until we finish.
66614 */
66615 error = -EPERM;
66616- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66617+ if (sysctl_perm(head, table, op))
66618 goto out;
66619
66620 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66621@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66622 if (!table->proc_handler)
66623 goto out;
66624
66625+#ifdef CONFIG_GRKERNSEC
66626+ error = -EPERM;
66627+ if (gr_handle_chroot_sysctl(op))
66628+ goto out;
66629+ dget(filp->f_path.dentry);
66630+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66631+ dput(filp->f_path.dentry);
66632+ goto out;
66633+ }
66634+ dput(filp->f_path.dentry);
66635+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66636+ goto out;
66637+ if (write) {
66638+ if (current->nsproxy->net_ns != table->extra2) {
66639+ if (!capable(CAP_SYS_ADMIN))
66640+ goto out;
66641+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66642+ goto out;
66643+ }
66644+#endif
66645+
66646 /* careful: calling conventions are nasty here */
66647 res = count;
66648 error = table->proc_handler(table, write, buf, &res, ppos);
66649@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66650 return false;
66651 } else {
66652 d_set_d_op(child, &proc_sys_dentry_operations);
66653+
66654+ gr_handle_proc_create(child, inode);
66655+
66656 d_add(child, inode);
66657 }
66658 } else {
66659@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66660 if ((*pos)++ < ctx->pos)
66661 return true;
66662
66663+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66664+ return 0;
66665+
66666 if (unlikely(S_ISLNK(table->mode)))
66667 res = proc_sys_link_fill_cache(file, ctx, head, table);
66668 else
66669@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66670 if (IS_ERR(head))
66671 return PTR_ERR(head);
66672
66673+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66674+ return -ENOENT;
66675+
66676 generic_fillattr(inode, stat);
66677 if (table)
66678 stat->mode = (stat->mode & S_IFMT) | table->mode;
66679@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66680 .llseek = generic_file_llseek,
66681 };
66682
66683-static const struct inode_operations proc_sys_inode_operations = {
66684+const struct inode_operations proc_sys_inode_operations = {
66685 .permission = proc_sys_permission,
66686 .setattr = proc_sys_setattr,
66687 .getattr = proc_sys_getattr,
66688 };
66689
66690-static const struct inode_operations proc_sys_dir_operations = {
66691+const struct inode_operations proc_sys_dir_operations = {
66692 .lookup = proc_sys_lookup,
66693 .permission = proc_sys_permission,
66694 .setattr = proc_sys_setattr,
66695@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66696 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66697 const char *name, int namelen)
66698 {
66699- struct ctl_table *table;
66700+ ctl_table_no_const *table;
66701 struct ctl_dir *new;
66702 struct ctl_node *node;
66703 char *new_name;
66704@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66705 return NULL;
66706
66707 node = (struct ctl_node *)(new + 1);
66708- table = (struct ctl_table *)(node + 1);
66709+ table = (ctl_table_no_const *)(node + 1);
66710 new_name = (char *)(table + 2);
66711 memcpy(new_name, name, namelen);
66712 new_name[namelen] = '\0';
66713@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66714 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66715 struct ctl_table_root *link_root)
66716 {
66717- struct ctl_table *link_table, *entry, *link;
66718+ ctl_table_no_const *link_table, *link;
66719+ struct ctl_table *entry;
66720 struct ctl_table_header *links;
66721 struct ctl_node *node;
66722 char *link_name;
66723@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66724 return NULL;
66725
66726 node = (struct ctl_node *)(links + 1);
66727- link_table = (struct ctl_table *)(node + nr_entries);
66728+ link_table = (ctl_table_no_const *)(node + nr_entries);
66729 link_name = (char *)&link_table[nr_entries + 1];
66730
66731 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66732@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66733 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66734 struct ctl_table *table)
66735 {
66736- struct ctl_table *ctl_table_arg = NULL;
66737- struct ctl_table *entry, *files;
66738+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66739+ struct ctl_table *entry;
66740 int nr_files = 0;
66741 int nr_dirs = 0;
66742 int err = -ENOMEM;
66743@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66744 nr_files++;
66745 }
66746
66747- files = table;
66748 /* If there are mixed files and directories we need a new table */
66749 if (nr_dirs && nr_files) {
66750- struct ctl_table *new;
66751+ ctl_table_no_const *new;
66752 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66753 GFP_KERNEL);
66754 if (!files)
66755@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66756 /* Register everything except a directory full of subdirectories */
66757 if (nr_files || !nr_dirs) {
66758 struct ctl_table_header *header;
66759- header = __register_sysctl_table(set, path, files);
66760+ header = __register_sysctl_table(set, path, files ? files : table);
66761 if (!header) {
66762 kfree(ctl_table_arg);
66763 goto out;
66764diff --git a/fs/proc/root.c b/fs/proc/root.c
66765index e74ac9f..35e89f4 100644
66766--- a/fs/proc/root.c
66767+++ b/fs/proc/root.c
66768@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66769 proc_mkdir("openprom", NULL);
66770 #endif
66771 proc_tty_init();
66772+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66773+#ifdef CONFIG_GRKERNSEC_PROC_USER
66774+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66775+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66776+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66777+#endif
66778+#else
66779 proc_mkdir("bus", NULL);
66780+#endif
66781 proc_sys_init();
66782 }
66783
66784diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66785index 510413eb..34d9a8c 100644
66786--- a/fs/proc/stat.c
66787+++ b/fs/proc/stat.c
66788@@ -11,6 +11,7 @@
66789 #include <linux/irqnr.h>
66790 #include <linux/cputime.h>
66791 #include <linux/tick.h>
66792+#include <linux/grsecurity.h>
66793
66794 #ifndef arch_irq_stat_cpu
66795 #define arch_irq_stat_cpu(cpu) 0
66796@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
66797 u64 sum_softirq = 0;
66798 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
66799 struct timespec boottime;
66800+ int unrestricted = 1;
66801+
66802+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66803+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66804+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
66805+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66806+ && !in_group_p(grsec_proc_gid)
66807+#endif
66808+ )
66809+ unrestricted = 0;
66810+#endif
66811+#endif
66812
66813 user = nice = system = idle = iowait =
66814 irq = softirq = steal = 0;
66815@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
66816 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66817 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66818 idle += get_idle_time(i);
66819- iowait += get_iowait_time(i);
66820- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66821- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66822- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66823- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66824- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66825- sum += kstat_cpu_irqs_sum(i);
66826- sum += arch_irq_stat_cpu(i);
66827+ if (unrestricted) {
66828+ iowait += get_iowait_time(i);
66829+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66830+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66831+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66832+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66833+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66834+ sum += kstat_cpu_irqs_sum(i);
66835+ sum += arch_irq_stat_cpu(i);
66836+ for (j = 0; j < NR_SOFTIRQS; j++) {
66837+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66838
66839- for (j = 0; j < NR_SOFTIRQS; j++) {
66840- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
66841-
66842- per_softirq_sums[j] += softirq_stat;
66843- sum_softirq += softirq_stat;
66844+ per_softirq_sums[j] += softirq_stat;
66845+ sum_softirq += softirq_stat;
66846+ }
66847 }
66848 }
66849- sum += arch_irq_stat();
66850+ if (unrestricted)
66851+ sum += arch_irq_stat();
66852
66853 seq_puts(p, "cpu ");
66854 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66855@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
66856 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
66857 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
66858 idle = get_idle_time(i);
66859- iowait = get_iowait_time(i);
66860- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66861- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66862- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66863- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66864- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66865+ if (unrestricted) {
66866+ iowait = get_iowait_time(i);
66867+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
66868+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
66869+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
66870+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
66871+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
66872+ }
66873 seq_printf(p, "cpu%d", i);
66874 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
66875 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
66876@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
66877
66878 /* sum again ? it could be updated? */
66879 for_each_irq_nr(j)
66880- seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
66881+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs_usr(j) : 0ULL);
66882
66883 seq_printf(p,
66884 "\nctxt %llu\n"
66885@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
66886 "processes %lu\n"
66887 "procs_running %lu\n"
66888 "procs_blocked %lu\n",
66889- nr_context_switches(),
66890+ unrestricted ? nr_context_switches() : 0ULL,
66891 (unsigned long)jif,
66892- total_forks,
66893- nr_running(),
66894- nr_iowait());
66895+ unrestricted ? total_forks : 0UL,
66896+ unrestricted ? nr_running() : 0UL,
66897+ unrestricted ? nr_iowait() : 0UL);
66898
66899 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
66900
66901diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
66902index f86e549..3a88fcd 100644
66903--- a/fs/proc/task_mmu.c
66904+++ b/fs/proc/task_mmu.c
66905@@ -13,12 +13,19 @@
66906 #include <linux/swap.h>
66907 #include <linux/swapops.h>
66908 #include <linux/mmu_notifier.h>
66909+#include <linux/grsecurity.h>
66910
66911 #include <asm/elf.h>
66912 #include <asm/uaccess.h>
66913 #include <asm/tlbflush.h>
66914 #include "internal.h"
66915
66916+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66917+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
66918+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
66919+ _mm->pax_flags & MF_PAX_SEGMEXEC))
66920+#endif
66921+
66922 void task_mem(struct seq_file *m, struct mm_struct *mm)
66923 {
66924 unsigned long data, text, lib, swap;
66925@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66926 "VmExe:\t%8lu kB\n"
66927 "VmLib:\t%8lu kB\n"
66928 "VmPTE:\t%8lu kB\n"
66929- "VmSwap:\t%8lu kB\n",
66930- hiwater_vm << (PAGE_SHIFT-10),
66931+ "VmSwap:\t%8lu kB\n"
66932+
66933+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66934+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
66935+#endif
66936+
66937+ ,hiwater_vm << (PAGE_SHIFT-10),
66938 total_vm << (PAGE_SHIFT-10),
66939 mm->locked_vm << (PAGE_SHIFT-10),
66940 mm->pinned_vm << (PAGE_SHIFT-10),
66941@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
66942 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66943 (PTRS_PER_PTE * sizeof(pte_t) *
66944 atomic_long_read(&mm->nr_ptes)) >> 10,
66945- swap << (PAGE_SHIFT-10));
66946+ swap << (PAGE_SHIFT-10)
66947+
66948+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66950+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
66951+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
66952+#else
66953+ , mm->context.user_cs_base
66954+ , mm->context.user_cs_limit
66955+#endif
66956+#endif
66957+
66958+ );
66959 }
66960
66961 unsigned long task_vsize(struct mm_struct *mm)
66962@@ -282,13 +306,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66963 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
66964 }
66965
66966- /* We don't show the stack guard page in /proc/maps */
66967+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66968+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
66969+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
66970+#else
66971 start = vma->vm_start;
66972- if (stack_guard_page_start(vma, start))
66973- start += PAGE_SIZE;
66974 end = vma->vm_end;
66975- if (stack_guard_page_end(vma, end))
66976- end -= PAGE_SIZE;
66977+#endif
66978
66979 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
66980 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
66981@@ -298,7 +322,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66982 flags & VM_WRITE ? 'w' : '-',
66983 flags & VM_EXEC ? 'x' : '-',
66984 flags & VM_MAYSHARE ? 's' : 'p',
66985+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66986+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
66987+#else
66988 pgoff,
66989+#endif
66990 MAJOR(dev), MINOR(dev), ino);
66991
66992 /*
66993@@ -307,7 +335,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
66994 */
66995 if (file) {
66996 seq_pad(m, ' ');
66997- seq_path(m, &file->f_path, "\n");
66998+ seq_path(m, &file->f_path, "\n\\");
66999 goto done;
67000 }
67001
67002@@ -338,8 +366,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67003 * Thread stack in /proc/PID/task/TID/maps or
67004 * the main process stack.
67005 */
67006- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67007- vma->vm_end >= mm->start_stack)) {
67008+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67009+ (vma->vm_start <= mm->start_stack &&
67010+ vma->vm_end >= mm->start_stack)) {
67011 name = "[stack]";
67012 } else {
67013 /* Thread stack in /proc/PID/maps */
67014@@ -359,6 +388,12 @@ done:
67015
67016 static int show_map(struct seq_file *m, void *v, int is_pid)
67017 {
67018+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67019+ if (current->exec_id != m->exec_id) {
67020+ gr_log_badprocpid("maps");
67021+ return 0;
67022+ }
67023+#endif
67024 show_map_vma(m, v, is_pid);
67025 m_cache_vma(m, v);
67026 return 0;
67027@@ -629,12 +664,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67028 .private = &mss,
67029 };
67030
67031+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67032+ if (current->exec_id != m->exec_id) {
67033+ gr_log_badprocpid("smaps");
67034+ return 0;
67035+ }
67036+#endif
67037 memset(&mss, 0, sizeof mss);
67038- mss.vma = vma;
67039- /* mmap_sem is held in m_start */
67040- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67041- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67042-
67043+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67044+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67045+#endif
67046+ mss.vma = vma;
67047+ /* mmap_sem is held in m_start */
67048+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67049+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67050+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67051+ }
67052+#endif
67053 show_map_vma(m, vma, is_pid);
67054
67055 seq_printf(m,
67056@@ -652,7 +698,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67057 "KernelPageSize: %8lu kB\n"
67058 "MMUPageSize: %8lu kB\n"
67059 "Locked: %8lu kB\n",
67060+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67061+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67062+#else
67063 (vma->vm_end - vma->vm_start) >> 10,
67064+#endif
67065 mss.resident >> 10,
67066 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67067 mss.shared_clean >> 10,
67068@@ -1489,6 +1539,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67069 char buffer[64];
67070 int nid;
67071
67072+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67073+ if (current->exec_id != m->exec_id) {
67074+ gr_log_badprocpid("numa_maps");
67075+ return 0;
67076+ }
67077+#endif
67078+
67079 if (!mm)
67080 return 0;
67081
67082@@ -1510,11 +1567,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67083 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
67084 }
67085
67086+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67087+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67088+#else
67089 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67090+#endif
67091
67092 if (file) {
67093 seq_puts(m, " file=");
67094- seq_path(m, &file->f_path, "\n\t= ");
67095+ seq_path(m, &file->f_path, "\n\t\\= ");
67096 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67097 seq_puts(m, " heap");
67098 } else {
67099diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67100index 599ec2e..f1413ae 100644
67101--- a/fs/proc/task_nommu.c
67102+++ b/fs/proc/task_nommu.c
67103@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67104 else
67105 bytes += kobjsize(mm);
67106
67107- if (current->fs && current->fs->users > 1)
67108+ if (current->fs && atomic_read(&current->fs->users) > 1)
67109 sbytes += kobjsize(current->fs);
67110 else
67111 bytes += kobjsize(current->fs);
67112@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67113
67114 if (file) {
67115 seq_pad(m, ' ');
67116- seq_path(m, &file->f_path, "");
67117+ seq_path(m, &file->f_path, "\n\\");
67118 } else if (mm) {
67119 pid_t tid = pid_of_stack(priv, vma, is_pid);
67120
67121diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67122index a90d6d35..d08047c 100644
67123--- a/fs/proc/vmcore.c
67124+++ b/fs/proc/vmcore.c
67125@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67126 nr_bytes = count;
67127
67128 /* If pfn is not ram, return zeros for sparse dump files */
67129- if (pfn_is_ram(pfn) == 0)
67130- memset(buf, 0, nr_bytes);
67131- else {
67132+ if (pfn_is_ram(pfn) == 0) {
67133+ if (userbuf) {
67134+ if (clear_user((char __force_user *)buf, nr_bytes))
67135+ return -EFAULT;
67136+ } else
67137+ memset(buf, 0, nr_bytes);
67138+ } else {
67139 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67140 offset, userbuf);
67141 if (tmp < 0)
67142@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67143 static int copy_to(void *target, void *src, size_t size, int userbuf)
67144 {
67145 if (userbuf) {
67146- if (copy_to_user((char __user *) target, src, size))
67147+ if (copy_to_user((char __force_user *) target, src, size))
67148 return -EFAULT;
67149 } else {
67150 memcpy(target, src, size);
67151@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67152 if (*fpos < m->offset + m->size) {
67153 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67154 start = m->paddr + *fpos - m->offset;
67155- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67156+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67157 if (tmp < 0)
67158 return tmp;
67159 buflen -= tsz;
67160@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67161 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67162 size_t buflen, loff_t *fpos)
67163 {
67164- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67165+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67166 }
67167
67168 /*
67169diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67170index d3fb2b6..43a8140 100644
67171--- a/fs/qnx6/qnx6.h
67172+++ b/fs/qnx6/qnx6.h
67173@@ -74,7 +74,7 @@ enum {
67174 BYTESEX_BE,
67175 };
67176
67177-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67178+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67179 {
67180 if (sbi->s_bytesex == BYTESEX_LE)
67181 return le64_to_cpu((__force __le64)n);
67182@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67183 return (__force __fs64)cpu_to_be64(n);
67184 }
67185
67186-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67187+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67188 {
67189 if (sbi->s_bytesex == BYTESEX_LE)
67190 return le32_to_cpu((__force __le32)n);
67191diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67192index bb2869f..d34ada8 100644
67193--- a/fs/quota/netlink.c
67194+++ b/fs/quota/netlink.c
67195@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67196 void quota_send_warning(struct kqid qid, dev_t dev,
67197 const char warntype)
67198 {
67199- static atomic_t seq;
67200+ static atomic_unchecked_t seq;
67201 struct sk_buff *skb;
67202 void *msg_head;
67203 int ret;
67204@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67205 "VFS: Not enough memory to send quota warning.\n");
67206 return;
67207 }
67208- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67209+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67210 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67211 if (!msg_head) {
67212 printk(KERN_ERR
67213diff --git a/fs/read_write.c b/fs/read_write.c
67214index c0805c93..d39f2eb 100644
67215--- a/fs/read_write.c
67216+++ b/fs/read_write.c
67217@@ -507,7 +507,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67218
67219 old_fs = get_fs();
67220 set_fs(get_ds());
67221- p = (__force const char __user *)buf;
67222+ p = (const char __force_user *)buf;
67223 if (count > MAX_RW_COUNT)
67224 count = MAX_RW_COUNT;
67225 if (file->f_op->write)
67226diff --git a/fs/readdir.c b/fs/readdir.c
67227index ced6791..936687b 100644
67228--- a/fs/readdir.c
67229+++ b/fs/readdir.c
67230@@ -18,6 +18,7 @@
67231 #include <linux/security.h>
67232 #include <linux/syscalls.h>
67233 #include <linux/unistd.h>
67234+#include <linux/namei.h>
67235
67236 #include <asm/uaccess.h>
67237
67238@@ -71,6 +72,7 @@ struct old_linux_dirent {
67239 struct readdir_callback {
67240 struct dir_context ctx;
67241 struct old_linux_dirent __user * dirent;
67242+ struct file * file;
67243 int result;
67244 };
67245
67246@@ -89,6 +91,10 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
67247 buf->result = -EOVERFLOW;
67248 return -EOVERFLOW;
67249 }
67250+
67251+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67252+ return 0;
67253+
67254 buf->result++;
67255 dirent = buf->dirent;
67256 if (!access_ok(VERIFY_WRITE, dirent,
67257@@ -120,6 +126,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67258 if (!f.file)
67259 return -EBADF;
67260
67261+ buf.file = f.file;
67262 error = iterate_dir(f.file, &buf.ctx);
67263 if (buf.result)
67264 error = buf.result;
67265@@ -145,6 +152,7 @@ struct getdents_callback {
67266 struct dir_context ctx;
67267 struct linux_dirent __user * current_dir;
67268 struct linux_dirent __user * previous;
67269+ struct file * file;
67270 int count;
67271 int error;
67272 };
67273@@ -167,6 +175,10 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
67274 buf->error = -EOVERFLOW;
67275 return -EOVERFLOW;
67276 }
67277+
67278+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67279+ return 0;
67280+
67281 dirent = buf->previous;
67282 if (dirent) {
67283 if (__put_user(offset, &dirent->d_off))
67284@@ -212,6 +224,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67285 if (!f.file)
67286 return -EBADF;
67287
67288+ buf.file = f.file;
67289 error = iterate_dir(f.file, &buf.ctx);
67290 if (error >= 0)
67291 error = buf.error;
67292@@ -230,6 +243,7 @@ struct getdents_callback64 {
67293 struct dir_context ctx;
67294 struct linux_dirent64 __user * current_dir;
67295 struct linux_dirent64 __user * previous;
67296+ struct file *file;
67297 int count;
67298 int error;
67299 };
67300@@ -246,6 +260,10 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
67301 buf->error = -EINVAL; /* only used if we fail.. */
67302 if (reclen > buf->count)
67303 return -EINVAL;
67304+
67305+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67306+ return 0;
67307+
67308 dirent = buf->previous;
67309 if (dirent) {
67310 if (__put_user(offset, &dirent->d_off))
67311@@ -293,6 +311,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67312 if (!f.file)
67313 return -EBADF;
67314
67315+ buf.file = f.file;
67316 error = iterate_dir(f.file, &buf.ctx);
67317 if (error >= 0)
67318 error = buf.error;
67319diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67320index 9c02d96..6562c10 100644
67321--- a/fs/reiserfs/do_balan.c
67322+++ b/fs/reiserfs/do_balan.c
67323@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67324 return;
67325 }
67326
67327- atomic_inc(&fs_generation(tb->tb_sb));
67328+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67329 do_balance_starts(tb);
67330
67331 /*
67332diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67333index aca73dd..e3c558d 100644
67334--- a/fs/reiserfs/item_ops.c
67335+++ b/fs/reiserfs/item_ops.c
67336@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67337 }
67338
67339 static struct item_operations errcatch_ops = {
67340- errcatch_bytes_number,
67341- errcatch_decrement_key,
67342- errcatch_is_left_mergeable,
67343- errcatch_print_item,
67344- errcatch_check_item,
67345+ .bytes_number = errcatch_bytes_number,
67346+ .decrement_key = errcatch_decrement_key,
67347+ .is_left_mergeable = errcatch_is_left_mergeable,
67348+ .print_item = errcatch_print_item,
67349+ .check_item = errcatch_check_item,
67350
67351- errcatch_create_vi,
67352- errcatch_check_left,
67353- errcatch_check_right,
67354- errcatch_part_size,
67355- errcatch_unit_num,
67356- errcatch_print_vi
67357+ .create_vi = errcatch_create_vi,
67358+ .check_left = errcatch_check_left,
67359+ .check_right = errcatch_check_right,
67360+ .part_size = errcatch_part_size,
67361+ .unit_num = errcatch_unit_num,
67362+ .print_vi = errcatch_print_vi
67363 };
67364
67365 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67366diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67367index 621b9f3..af527fd 100644
67368--- a/fs/reiserfs/procfs.c
67369+++ b/fs/reiserfs/procfs.c
67370@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67371 "SMALL_TAILS " : "NO_TAILS ",
67372 replay_only(sb) ? "REPLAY_ONLY " : "",
67373 convert_reiserfs(sb) ? "CONV " : "",
67374- atomic_read(&r->s_generation_counter),
67375+ atomic_read_unchecked(&r->s_generation_counter),
67376 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67377 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67378 SF(s_good_search_by_key_reada), SF(s_bmaps),
67379diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67380index bb79cdd..fcf49ef 100644
67381--- a/fs/reiserfs/reiserfs.h
67382+++ b/fs/reiserfs/reiserfs.h
67383@@ -580,7 +580,7 @@ struct reiserfs_sb_info {
67384 /* Comment? -Hans */
67385 wait_queue_head_t s_wait;
67386 /* increased by one every time the tree gets re-balanced */
67387- atomic_t s_generation_counter;
67388+ atomic_unchecked_t s_generation_counter;
67389
67390 /* File system properties. Currently holds on-disk FS format */
67391 unsigned long s_properties;
67392@@ -2301,7 +2301,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67393 #define REISERFS_USER_MEM 1 /* user memory mode */
67394
67395 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67396-#define get_generation(s) atomic_read (&fs_generation(s))
67397+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67398 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67399 #define __fs_changed(gen,s) (gen != get_generation (s))
67400 #define fs_changed(gen,s) \
67401diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
67402index 71fbbe3..eff29ba 100644
67403--- a/fs/reiserfs/super.c
67404+++ b/fs/reiserfs/super.c
67405@@ -1868,6 +1868,10 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
67406 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
67407 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
67408 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
67409+#ifdef CONFIG_REISERFS_FS_XATTR
67410+ /* turn on user xattrs by default */
67411+ sbi->s_mount_opt |= (1 << REISERFS_XATTRS_USER);
67412+#endif
67413 /* no preallocation minimum, be smart in reiserfs_file_write instead */
67414 sbi->s_alloc_options.preallocmin = 0;
67415 /* Preallocate by 16 blocks (17-1) at once */
67416diff --git a/fs/select.c b/fs/select.c
67417index 467bb1c..cf9d65a 100644
67418--- a/fs/select.c
67419+++ b/fs/select.c
67420@@ -20,6 +20,7 @@
67421 #include <linux/export.h>
67422 #include <linux/slab.h>
67423 #include <linux/poll.h>
67424+#include <linux/security.h>
67425 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67426 #include <linux/file.h>
67427 #include <linux/fdtable.h>
67428@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67429 struct poll_list *walk = head;
67430 unsigned long todo = nfds;
67431
67432+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67433 if (nfds > rlimit(RLIMIT_NOFILE))
67434 return -EINVAL;
67435
67436diff --git a/fs/seq_file.c b/fs/seq_file.c
67437index dbf3a59..daf023f 100644
67438--- a/fs/seq_file.c
67439+++ b/fs/seq_file.c
67440@@ -12,6 +12,8 @@
67441 #include <linux/slab.h>
67442 #include <linux/cred.h>
67443 #include <linux/mm.h>
67444+#include <linux/sched.h>
67445+#include <linux/grsecurity.h>
67446
67447 #include <asm/uaccess.h>
67448 #include <asm/page.h>
67449@@ -23,16 +25,7 @@ static void seq_set_overflow(struct seq_file *m)
67450
67451 static void *seq_buf_alloc(unsigned long size)
67452 {
67453- void *buf;
67454-
67455- /*
67456- * __GFP_NORETRY to avoid oom-killings with high-order allocations -
67457- * it's better to fall back to vmalloc() than to kill things.
67458- */
67459- buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
67460- if (!buf && size > PAGE_SIZE)
67461- buf = vmalloc(size);
67462- return buf;
67463+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67464 }
67465
67466 /**
67467@@ -65,6 +58,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67468 #ifdef CONFIG_USER_NS
67469 p->user_ns = file->f_cred->user_ns;
67470 #endif
67471+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67472+ p->exec_id = current->exec_id;
67473+#endif
67474
67475 /*
67476 * Wrappers around seq_open(e.g. swaps_open) need to be
67477@@ -87,6 +83,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67478 }
67479 EXPORT_SYMBOL(seq_open);
67480
67481+
67482+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67483+{
67484+ if (gr_proc_is_restricted())
67485+ return -EACCES;
67486+
67487+ return seq_open(file, op);
67488+}
67489+EXPORT_SYMBOL(seq_open_restrict);
67490+
67491 static int traverse(struct seq_file *m, loff_t offset)
67492 {
67493 loff_t pos = 0, index;
67494@@ -158,7 +164,7 @@ Eoverflow:
67495 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67496 {
67497 struct seq_file *m = file->private_data;
67498- size_t copied = 0;
67499+ ssize_t copied = 0;
67500 loff_t pos;
67501 size_t n;
67502 void *p;
67503@@ -589,7 +595,7 @@ static void single_stop(struct seq_file *p, void *v)
67504 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67505 void *data)
67506 {
67507- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67508+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67509 int res = -ENOMEM;
67510
67511 if (op) {
67512@@ -625,6 +631,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67513 }
67514 EXPORT_SYMBOL(single_open_size);
67515
67516+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67517+ void *data)
67518+{
67519+ if (gr_proc_is_restricted())
67520+ return -EACCES;
67521+
67522+ return single_open(file, show, data);
67523+}
67524+EXPORT_SYMBOL(single_open_restrict);
67525+
67526+
67527 int single_release(struct inode *inode, struct file *file)
67528 {
67529 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67530diff --git a/fs/splice.c b/fs/splice.c
67531index 75c6058..770d40c 100644
67532--- a/fs/splice.c
67533+++ b/fs/splice.c
67534@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67535 pipe_lock(pipe);
67536
67537 for (;;) {
67538- if (!pipe->readers) {
67539+ if (!atomic_read(&pipe->readers)) {
67540 send_sig(SIGPIPE, current, 0);
67541 if (!ret)
67542 ret = -EPIPE;
67543@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67544 page_nr++;
67545 ret += buf->len;
67546
67547- if (pipe->files)
67548+ if (atomic_read(&pipe->files))
67549 do_wakeup = 1;
67550
67551 if (!--spd->nr_pages)
67552@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67553 do_wakeup = 0;
67554 }
67555
67556- pipe->waiting_writers++;
67557+ atomic_inc(&pipe->waiting_writers);
67558 pipe_wait(pipe);
67559- pipe->waiting_writers--;
67560+ atomic_dec(&pipe->waiting_writers);
67561 }
67562
67563 pipe_unlock(pipe);
67564@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67565 old_fs = get_fs();
67566 set_fs(get_ds());
67567 /* The cast to a user pointer is valid due to the set_fs() */
67568- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67569+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67570 set_fs(old_fs);
67571
67572 return res;
67573@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67574 old_fs = get_fs();
67575 set_fs(get_ds());
67576 /* The cast to a user pointer is valid due to the set_fs() */
67577- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67578+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67579 set_fs(old_fs);
67580
67581 return res;
67582@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67583 goto err;
67584
67585 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67586- vec[i].iov_base = (void __user *) page_address(page);
67587+ vec[i].iov_base = (void __force_user *) page_address(page);
67588 vec[i].iov_len = this_len;
67589 spd.pages[i] = page;
67590 spd.nr_pages++;
67591@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67592 ops->release(pipe, buf);
67593 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67594 pipe->nrbufs--;
67595- if (pipe->files)
67596+ if (atomic_read(&pipe->files))
67597 sd->need_wakeup = true;
67598 }
67599
67600@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67601 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67602 {
67603 while (!pipe->nrbufs) {
67604- if (!pipe->writers)
67605+ if (!atomic_read(&pipe->writers))
67606 return 0;
67607
67608- if (!pipe->waiting_writers && sd->num_spliced)
67609+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67610 return 0;
67611
67612 if (sd->flags & SPLICE_F_NONBLOCK)
67613@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67614 ops->release(pipe, buf);
67615 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67616 pipe->nrbufs--;
67617- if (pipe->files)
67618+ if (atomic_read(&pipe->files))
67619 sd.need_wakeup = true;
67620 } else {
67621 buf->offset += ret;
67622@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67623 * out of the pipe right after the splice_to_pipe(). So set
67624 * PIPE_READERS appropriately.
67625 */
67626- pipe->readers = 1;
67627+ atomic_set(&pipe->readers, 1);
67628
67629 current->splice_pipe = pipe;
67630 }
67631@@ -1497,6 +1497,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67632
67633 partial[buffers].offset = off;
67634 partial[buffers].len = plen;
67635+ partial[buffers].private = 0;
67636
67637 off = 0;
67638 len -= plen;
67639@@ -1733,9 +1734,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67640 ret = -ERESTARTSYS;
67641 break;
67642 }
67643- if (!pipe->writers)
67644+ if (!atomic_read(&pipe->writers))
67645 break;
67646- if (!pipe->waiting_writers) {
67647+ if (!atomic_read(&pipe->waiting_writers)) {
67648 if (flags & SPLICE_F_NONBLOCK) {
67649 ret = -EAGAIN;
67650 break;
67651@@ -1767,7 +1768,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67652 pipe_lock(pipe);
67653
67654 while (pipe->nrbufs >= pipe->buffers) {
67655- if (!pipe->readers) {
67656+ if (!atomic_read(&pipe->readers)) {
67657 send_sig(SIGPIPE, current, 0);
67658 ret = -EPIPE;
67659 break;
67660@@ -1780,9 +1781,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67661 ret = -ERESTARTSYS;
67662 break;
67663 }
67664- pipe->waiting_writers++;
67665+ atomic_inc(&pipe->waiting_writers);
67666 pipe_wait(pipe);
67667- pipe->waiting_writers--;
67668+ atomic_dec(&pipe->waiting_writers);
67669 }
67670
67671 pipe_unlock(pipe);
67672@@ -1818,14 +1819,14 @@ retry:
67673 pipe_double_lock(ipipe, opipe);
67674
67675 do {
67676- if (!opipe->readers) {
67677+ if (!atomic_read(&opipe->readers)) {
67678 send_sig(SIGPIPE, current, 0);
67679 if (!ret)
67680 ret = -EPIPE;
67681 break;
67682 }
67683
67684- if (!ipipe->nrbufs && !ipipe->writers)
67685+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67686 break;
67687
67688 /*
67689@@ -1922,7 +1923,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67690 pipe_double_lock(ipipe, opipe);
67691
67692 do {
67693- if (!opipe->readers) {
67694+ if (!atomic_read(&opipe->readers)) {
67695 send_sig(SIGPIPE, current, 0);
67696 if (!ret)
67697 ret = -EPIPE;
67698@@ -1967,7 +1968,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67699 * return EAGAIN if we have the potential of some data in the
67700 * future, otherwise just return 0
67701 */
67702- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67703+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67704 ret = -EAGAIN;
67705
67706 pipe_unlock(ipipe);
67707diff --git a/fs/stat.c b/fs/stat.c
67708index ae0c3ce..9ee641c 100644
67709--- a/fs/stat.c
67710+++ b/fs/stat.c
67711@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67712 stat->gid = inode->i_gid;
67713 stat->rdev = inode->i_rdev;
67714 stat->size = i_size_read(inode);
67715- stat->atime = inode->i_atime;
67716- stat->mtime = inode->i_mtime;
67717+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67718+ stat->atime = inode->i_ctime;
67719+ stat->mtime = inode->i_ctime;
67720+ } else {
67721+ stat->atime = inode->i_atime;
67722+ stat->mtime = inode->i_mtime;
67723+ }
67724 stat->ctime = inode->i_ctime;
67725 stat->blksize = (1 << inode->i_blkbits);
67726 stat->blocks = inode->i_blocks;
67727@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67728 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67729 {
67730 struct inode *inode = path->dentry->d_inode;
67731+ int retval;
67732
67733- if (inode->i_op->getattr)
67734- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67735+ if (inode->i_op->getattr) {
67736+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67737+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67738+ stat->atime = stat->ctime;
67739+ stat->mtime = stat->ctime;
67740+ }
67741+ return retval;
67742+ }
67743
67744 generic_fillattr(inode, stat);
67745 return 0;
67746diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67747index 0b45ff4..edf9d3a 100644
67748--- a/fs/sysfs/dir.c
67749+++ b/fs/sysfs/dir.c
67750@@ -33,6 +33,10 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67751 kfree(buf);
67752 }
67753
67754+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67755+extern int grsec_enable_sysfs_restrict;
67756+#endif
67757+
67758 /**
67759 * sysfs_create_dir_ns - create a directory for an object with a namespace tag
67760 * @kobj: object we're creating directory for
67761@@ -41,9 +45,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67762 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67763 {
67764 struct kernfs_node *parent, *kn;
67765+ const char *name;
67766+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67767+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67768+ const char *parent_name;
67769+#endif
67770
67771 BUG_ON(!kobj);
67772
67773+ name = kobject_name(kobj);
67774+
67775 if (kobj->parent)
67776 parent = kobj->parent->sd;
67777 else
67778@@ -52,11 +63,24 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67779 if (!parent)
67780 return -ENOENT;
67781
67782- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67783- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67784+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67785+ parent_name = parent->name;
67786+ mode = S_IRWXU;
67787+
67788+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67789+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67790+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67791+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67792+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67793+ if (!grsec_enable_sysfs_restrict)
67794+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67795+#endif
67796+
67797+ kn = kernfs_create_dir_ns(parent, name,
67798+ mode, kobj, ns);
67799 if (IS_ERR(kn)) {
67800 if (PTR_ERR(kn) == -EEXIST)
67801- sysfs_warn_dup(parent, kobject_name(kobj));
67802+ sysfs_warn_dup(parent, name);
67803 return PTR_ERR(kn);
67804 }
67805
67806diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67807index 69d4889..a810bd4 100644
67808--- a/fs/sysv/sysv.h
67809+++ b/fs/sysv/sysv.h
67810@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67811 #endif
67812 }
67813
67814-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67815+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67816 {
67817 if (sbi->s_bytesex == BYTESEX_PDP)
67818 return PDP_swab((__force __u32)n);
67819diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
67820index fb08b0c..65fcc7e 100644
67821--- a/fs/ubifs/io.c
67822+++ b/fs/ubifs/io.c
67823@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
67824 return err;
67825 }
67826
67827-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67828+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
67829 {
67830 int err;
67831
67832diff --git a/fs/udf/misc.c b/fs/udf/misc.c
67833index c175b4d..8f36a16 100644
67834--- a/fs/udf/misc.c
67835+++ b/fs/udf/misc.c
67836@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
67837
67838 u8 udf_tag_checksum(const struct tag *t)
67839 {
67840- u8 *data = (u8 *)t;
67841+ const u8 *data = (const u8 *)t;
67842 u8 checksum = 0;
67843 int i;
67844 for (i = 0; i < sizeof(struct tag); ++i)
67845diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
67846index 8d974c4..b82f6ec 100644
67847--- a/fs/ufs/swab.h
67848+++ b/fs/ufs/swab.h
67849@@ -22,7 +22,7 @@ enum {
67850 BYTESEX_BE
67851 };
67852
67853-static inline u64
67854+static inline u64 __intentional_overflow(-1)
67855 fs64_to_cpu(struct super_block *sbp, __fs64 n)
67856 {
67857 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67858@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
67859 return (__force __fs64)cpu_to_be64(n);
67860 }
67861
67862-static inline u32
67863+static inline u32 __intentional_overflow(-1)
67864 fs32_to_cpu(struct super_block *sbp, __fs32 n)
67865 {
67866 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
67867diff --git a/fs/utimes.c b/fs/utimes.c
67868index aa138d6..5f3a811 100644
67869--- a/fs/utimes.c
67870+++ b/fs/utimes.c
67871@@ -1,6 +1,7 @@
67872 #include <linux/compiler.h>
67873 #include <linux/file.h>
67874 #include <linux/fs.h>
67875+#include <linux/security.h>
67876 #include <linux/linkage.h>
67877 #include <linux/mount.h>
67878 #include <linux/namei.h>
67879@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
67880 }
67881 }
67882 retry_deleg:
67883+
67884+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
67885+ error = -EACCES;
67886+ goto mnt_drop_write_and_out;
67887+ }
67888+
67889 mutex_lock(&inode->i_mutex);
67890 error = notify_change(path->dentry, &newattrs, &delegated_inode);
67891 mutex_unlock(&inode->i_mutex);
67892diff --git a/fs/xattr.c b/fs/xattr.c
67893index 4ef6985..a6cd6567 100644
67894--- a/fs/xattr.c
67895+++ b/fs/xattr.c
67896@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
67897 return rc;
67898 }
67899
67900+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
67901+ssize_t
67902+pax_getxattr(struct dentry *dentry, void *value, size_t size)
67903+{
67904+ struct inode *inode = dentry->d_inode;
67905+ ssize_t error;
67906+
67907+ error = inode_permission(inode, MAY_EXEC);
67908+ if (error)
67909+ return error;
67910+
67911+ if (inode->i_op->getxattr)
67912+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
67913+ else
67914+ error = -EOPNOTSUPP;
67915+
67916+ return error;
67917+}
67918+EXPORT_SYMBOL(pax_getxattr);
67919+#endif
67920+
67921 ssize_t
67922 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
67923 {
67924@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
67925 * Extended attribute SET operations
67926 */
67927 static long
67928-setxattr(struct dentry *d, const char __user *name, const void __user *value,
67929+setxattr(struct path *path, const char __user *name, const void __user *value,
67930 size_t size, int flags)
67931 {
67932 int error;
67933@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
67934 posix_acl_fix_xattr_from_user(kvalue, size);
67935 }
67936
67937- error = vfs_setxattr(d, kname, kvalue, size, flags);
67938+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
67939+ error = -EACCES;
67940+ goto out;
67941+ }
67942+
67943+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
67944 out:
67945 if (vvalue)
67946 vfree(vvalue);
67947@@ -376,7 +402,7 @@ retry:
67948 return error;
67949 error = mnt_want_write(path.mnt);
67950 if (!error) {
67951- error = setxattr(path.dentry, name, value, size, flags);
67952+ error = setxattr(&path, name, value, size, flags);
67953 mnt_drop_write(path.mnt);
67954 }
67955 path_put(&path);
67956@@ -412,7 +438,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
67957 audit_file(f.file);
67958 error = mnt_want_write_file(f.file);
67959 if (!error) {
67960- error = setxattr(f.file->f_path.dentry, name, value, size, flags);
67961+ error = setxattr(&f.file->f_path, name, value, size, flags);
67962 mnt_drop_write_file(f.file);
67963 }
67964 fdput(f);
67965@@ -598,7 +624,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
67966 * Extended attribute REMOVE operations
67967 */
67968 static long
67969-removexattr(struct dentry *d, const char __user *name)
67970+removexattr(struct path *path, const char __user *name)
67971 {
67972 int error;
67973 char kname[XATTR_NAME_MAX + 1];
67974@@ -609,7 +635,10 @@ removexattr(struct dentry *d, const char __user *name)
67975 if (error < 0)
67976 return error;
67977
67978- return vfs_removexattr(d, kname);
67979+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
67980+ return -EACCES;
67981+
67982+ return vfs_removexattr(path->dentry, kname);
67983 }
67984
67985 static int path_removexattr(const char __user *pathname,
67986@@ -623,7 +652,7 @@ retry:
67987 return error;
67988 error = mnt_want_write(path.mnt);
67989 if (!error) {
67990- error = removexattr(path.dentry, name);
67991+ error = removexattr(&path, name);
67992 mnt_drop_write(path.mnt);
67993 }
67994 path_put(&path);
67995@@ -649,14 +678,16 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
67996 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
67997 {
67998 struct fd f = fdget(fd);
67999+ struct path *path;
68000 int error = -EBADF;
68001
68002 if (!f.file)
68003 return error;
68004+ path = &f.file->f_path;
68005 audit_file(f.file);
68006 error = mnt_want_write_file(f.file);
68007 if (!error) {
68008- error = removexattr(f.file->f_path.dentry, name);
68009+ error = removexattr(path, name);
68010 mnt_drop_write_file(f.file);
68011 }
68012 fdput(f);
68013diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68014index 4e20fe7..6d1a55a 100644
68015--- a/fs/xfs/libxfs/xfs_bmap.c
68016+++ b/fs/xfs/libxfs/xfs_bmap.c
68017@@ -580,7 +580,7 @@ xfs_bmap_validate_ret(
68018
68019 #else
68020 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68021-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68022+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68023 #endif /* DEBUG */
68024
68025 /*
68026diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68027index 098cd78..724d3f8 100644
68028--- a/fs/xfs/xfs_dir2_readdir.c
68029+++ b/fs/xfs/xfs_dir2_readdir.c
68030@@ -140,7 +140,12 @@ xfs_dir2_sf_getdents(
68031 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68032 filetype = dp->d_ops->sf_get_ftype(sfep);
68033 ctx->pos = off & 0x7fffffff;
68034- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68035+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68036+ char name[sfep->namelen];
68037+ memcpy(name, sfep->name, sfep->namelen);
68038+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68039+ return 0;
68040+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68041 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68042 return 0;
68043 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68044diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68045index a183198..6b52f52 100644
68046--- a/fs/xfs/xfs_ioctl.c
68047+++ b/fs/xfs/xfs_ioctl.c
68048@@ -119,7 +119,7 @@ xfs_find_handle(
68049 }
68050
68051 error = -EFAULT;
68052- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68053+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68054 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68055 goto out_put;
68056
68057diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68058index c31d2c2..6ec8f62 100644
68059--- a/fs/xfs/xfs_linux.h
68060+++ b/fs/xfs/xfs_linux.h
68061@@ -234,7 +234,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68062 * of the compiler which do not like us using do_div in the middle
68063 * of large functions.
68064 */
68065-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68066+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68067 {
68068 __u32 mod;
68069
68070@@ -290,7 +290,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68071 return 0;
68072 }
68073 #else
68074-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68075+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68076 {
68077 __u32 mod;
68078
68079diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68080new file mode 100644
68081index 0000000..31f8fe4
68082--- /dev/null
68083+++ b/grsecurity/Kconfig
68084@@ -0,0 +1,1182 @@
68085+#
68086+# grecurity configuration
68087+#
68088+menu "Memory Protections"
68089+depends on GRKERNSEC
68090+
68091+config GRKERNSEC_KMEM
68092+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68093+ default y if GRKERNSEC_CONFIG_AUTO
68094+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68095+ help
68096+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68097+ be written to or read from to modify or leak the contents of the running
68098+ kernel. /dev/port will also not be allowed to be opened, writing to
68099+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68100+ If you have module support disabled, enabling this will close up several
68101+ ways that are currently used to insert malicious code into the running
68102+ kernel.
68103+
68104+ Even with this feature enabled, we still highly recommend that
68105+ you use the RBAC system, as it is still possible for an attacker to
68106+ modify the running kernel through other more obscure methods.
68107+
68108+ It is highly recommended that you say Y here if you meet all the
68109+ conditions above.
68110+
68111+config GRKERNSEC_VM86
68112+ bool "Restrict VM86 mode"
68113+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68114+ depends on X86_32
68115+
68116+ help
68117+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68118+ make use of a special execution mode on 32bit x86 processors called
68119+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68120+ video cards and will still work with this option enabled. The purpose
68121+ of the option is to prevent exploitation of emulation errors in
68122+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68123+ Nearly all users should be able to enable this option.
68124+
68125+config GRKERNSEC_IO
68126+ bool "Disable privileged I/O"
68127+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68128+ depends on X86
68129+ select RTC_CLASS
68130+ select RTC_INTF_DEV
68131+ select RTC_DRV_CMOS
68132+
68133+ help
68134+ If you say Y here, all ioperm and iopl calls will return an error.
68135+ Ioperm and iopl can be used to modify the running kernel.
68136+ Unfortunately, some programs need this access to operate properly,
68137+ the most notable of which are XFree86 and hwclock. hwclock can be
68138+ remedied by having RTC support in the kernel, so real-time
68139+ clock support is enabled if this option is enabled, to ensure
68140+ that hwclock operates correctly. If hwclock still does not work,
68141+ either update udev or symlink /dev/rtc to /dev/rtc0.
68142+
68143+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68144+ you may not be able to boot into a graphical environment with this
68145+ option enabled. In this case, you should use the RBAC system instead.
68146+
68147+config GRKERNSEC_BPF_HARDEN
68148+ bool "Harden BPF interpreter"
68149+ default y if GRKERNSEC_CONFIG_AUTO
68150+ help
68151+ Unlike previous versions of grsecurity that hardened both the BPF
68152+ interpreted code against corruption at rest as well as the JIT code
68153+ against JIT-spray attacks and attacker-controlled immediate values
68154+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68155+ and will ensure the interpreted code is read-only at rest. This feature
68156+ may be removed at a later time when eBPF stabilizes to entirely revert
68157+ back to the more secure pre-3.16 BPF interpreter/JIT.
68158+
68159+ If you're using KERNEXEC, it's recommended that you enable this option
68160+ to supplement the hardening of the kernel.
68161+
68162+config GRKERNSEC_PERF_HARDEN
68163+ bool "Disable unprivileged PERF_EVENTS usage by default"
68164+ default y if GRKERNSEC_CONFIG_AUTO
68165+ depends on PERF_EVENTS
68166+ help
68167+ If you say Y here, the range of acceptable values for the
68168+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68169+ default to a new value: 3. When the sysctl is set to this value, no
68170+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68171+
68172+ Though PERF_EVENTS can be used legitimately for performance monitoring
68173+ and low-level application profiling, it is forced on regardless of
68174+ configuration, has been at fault for several vulnerabilities, and
68175+ creates new opportunities for side channels and other information leaks.
68176+
68177+ This feature puts PERF_EVENTS into a secure default state and permits
68178+ the administrator to change out of it temporarily if unprivileged
68179+ application profiling is needed.
68180+
68181+config GRKERNSEC_RAND_THREADSTACK
68182+ bool "Insert random gaps between thread stacks"
68183+ default y if GRKERNSEC_CONFIG_AUTO
68184+ depends on PAX_RANDMMAP && !PPC
68185+ help
68186+ If you say Y here, a random-sized gap will be enforced between allocated
68187+ thread stacks. Glibc's NPTL and other threading libraries that
68188+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68189+ The implementation currently provides 8 bits of entropy for the gap.
68190+
68191+ Many distributions do not compile threaded remote services with the
68192+ -fstack-check argument to GCC, causing the variable-sized stack-based
68193+ allocator, alloca(), to not probe the stack on allocation. This
68194+ permits an unbounded alloca() to skip over any guard page and potentially
68195+ modify another thread's stack reliably. An enforced random gap
68196+ reduces the reliability of such an attack and increases the chance
68197+ that such a read/write to another thread's stack instead lands in
68198+ an unmapped area, causing a crash and triggering grsecurity's
68199+ anti-bruteforcing logic.
68200+
68201+config GRKERNSEC_PROC_MEMMAP
68202+ bool "Harden ASLR against information leaks and entropy reduction"
68203+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68204+ depends on PAX_NOEXEC || PAX_ASLR
68205+ help
68206+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68207+ give no information about the addresses of its mappings if
68208+ PaX features that rely on random addresses are enabled on the task.
68209+ In addition to sanitizing this information and disabling other
68210+ dangerous sources of information, this option causes reads of sensitive
68211+ /proc/<pid> entries where the file descriptor was opened in a different
68212+ task than the one performing the read. Such attempts are logged.
68213+ This option also limits argv/env strings for suid/sgid binaries
68214+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68215+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68216+ binaries to prevent alternative mmap layouts from being abused.
68217+
68218+ If you use PaX it is essential that you say Y here as it closes up
68219+ several holes that make full ASLR useless locally.
68220+
68221+
68222+config GRKERNSEC_KSTACKOVERFLOW
68223+ bool "Prevent kernel stack overflows"
68224+ default y if GRKERNSEC_CONFIG_AUTO
68225+ depends on !IA64 && 64BIT
68226+ help
68227+ If you say Y here, the kernel's process stacks will be allocated
68228+ with vmalloc instead of the kernel's default allocator. This
68229+ introduces guard pages that in combination with the alloca checking
68230+ of the STACKLEAK feature prevents all forms of kernel process stack
68231+ overflow abuse. Note that this is different from kernel stack
68232+ buffer overflows.
68233+
68234+config GRKERNSEC_BRUTE
68235+ bool "Deter exploit bruteforcing"
68236+ default y if GRKERNSEC_CONFIG_AUTO
68237+ help
68238+ If you say Y here, attempts to bruteforce exploits against forking
68239+ daemons such as apache or sshd, as well as against suid/sgid binaries
68240+ will be deterred. When a child of a forking daemon is killed by PaX
68241+ or crashes due to an illegal instruction or other suspicious signal,
68242+ the parent process will be delayed 30 seconds upon every subsequent
68243+ fork until the administrator is able to assess the situation and
68244+ restart the daemon.
68245+ In the suid/sgid case, the attempt is logged, the user has all their
68246+ existing instances of the suid/sgid binary terminated and will
68247+ be unable to execute any suid/sgid binaries for 15 minutes.
68248+
68249+ It is recommended that you also enable signal logging in the auditing
68250+ section so that logs are generated when a process triggers a suspicious
68251+ signal.
68252+ If the sysctl option is enabled, a sysctl option with name
68253+ "deter_bruteforce" is created.
68254+
68255+config GRKERNSEC_MODHARDEN
68256+ bool "Harden module auto-loading"
68257+ default y if GRKERNSEC_CONFIG_AUTO
68258+ depends on MODULES
68259+ help
68260+ If you say Y here, module auto-loading in response to use of some
68261+ feature implemented by an unloaded module will be restricted to
68262+ root users. Enabling this option helps defend against attacks
68263+ by unprivileged users who abuse the auto-loading behavior to
68264+ cause a vulnerable module to load that is then exploited.
68265+
68266+ If this option prevents a legitimate use of auto-loading for a
68267+ non-root user, the administrator can execute modprobe manually
68268+ with the exact name of the module mentioned in the alert log.
68269+ Alternatively, the administrator can add the module to the list
68270+ of modules loaded at boot by modifying init scripts.
68271+
68272+ Modification of init scripts will most likely be needed on
68273+ Ubuntu servers with encrypted home directory support enabled,
68274+ as the first non-root user logging in will cause the ecb(aes),
68275+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68276+
68277+config GRKERNSEC_HIDESYM
68278+ bool "Hide kernel symbols"
68279+ default y if GRKERNSEC_CONFIG_AUTO
68280+ select PAX_USERCOPY_SLABS
68281+ help
68282+ If you say Y here, getting information on loaded modules, and
68283+ displaying all kernel symbols through a syscall will be restricted
68284+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68285+ /proc/kallsyms will be restricted to the root user. The RBAC
68286+ system can hide that entry even from root.
68287+
68288+ This option also prevents leaking of kernel addresses through
68289+ several /proc entries.
68290+
68291+ Note that this option is only effective provided the following
68292+ conditions are met:
68293+ 1) The kernel using grsecurity is not precompiled by some distribution
68294+ 2) You have also enabled GRKERNSEC_DMESG
68295+ 3) You are using the RBAC system and hiding other files such as your
68296+ kernel image and System.map. Alternatively, enabling this option
68297+ causes the permissions on /boot, /lib/modules, and the kernel
68298+ source directory to change at compile time to prevent
68299+ reading by non-root users.
68300+ If the above conditions are met, this option will aid in providing a
68301+ useful protection against local kernel exploitation of overflows
68302+ and arbitrary read/write vulnerabilities.
68303+
68304+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68305+ in addition to this feature.
68306+
68307+config GRKERNSEC_RANDSTRUCT
68308+ bool "Randomize layout of sensitive kernel structures"
68309+ default y if GRKERNSEC_CONFIG_AUTO
68310+ select GRKERNSEC_HIDESYM
68311+ select MODVERSIONS if MODULES
68312+ help
68313+ If you say Y here, the layouts of a number of sensitive kernel
68314+ structures (task, fs, cred, etc) and all structures composed entirely
68315+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68316+ This can introduce the requirement of an additional infoleak
68317+ vulnerability for exploits targeting these structure types.
68318+
68319+ Enabling this feature will introduce some performance impact, slightly
68320+ increase memory usage, and prevent the use of forensic tools like
68321+ Volatility against the system (unless the kernel source tree isn't
68322+ cleaned after kernel installation).
68323+
68324+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68325+ It remains after a make clean to allow for external modules to be compiled
68326+ with the existing seed and will be removed by a make mrproper or
68327+ make distclean.
68328+
68329+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68330+ to install the supporting headers explicitly in addition to the normal
68331+ gcc package.
68332+
68333+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68334+ bool "Use cacheline-aware structure randomization"
68335+ depends on GRKERNSEC_RANDSTRUCT
68336+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68337+ help
68338+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68339+ at restricting randomization to cacheline-sized groups of elements. It
68340+ will further not randomize bitfields in structures. This reduces the
68341+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68342+
68343+config GRKERNSEC_KERN_LOCKOUT
68344+ bool "Active kernel exploit response"
68345+ default y if GRKERNSEC_CONFIG_AUTO
68346+ depends on X86 || ARM || PPC || SPARC
68347+ help
68348+ If you say Y here, when a PaX alert is triggered due to suspicious
68349+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68350+ or an OOPS occurs due to bad memory accesses, instead of just
68351+ terminating the offending process (and potentially allowing
68352+ a subsequent exploit from the same user), we will take one of two
68353+ actions:
68354+ If the user was root, we will panic the system
68355+ If the user was non-root, we will log the attempt, terminate
68356+ all processes owned by the user, then prevent them from creating
68357+ any new processes until the system is restarted
68358+ This deters repeated kernel exploitation/bruteforcing attempts
68359+ and is useful for later forensics.
68360+
68361+config GRKERNSEC_OLD_ARM_USERLAND
68362+ bool "Old ARM userland compatibility"
68363+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68364+ help
68365+ If you say Y here, stubs of executable code to perform such operations
68366+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68367+ table. This is unfortunately needed for old ARM userland meant to run
68368+ across a wide range of processors. Without this option enabled,
68369+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68370+ which is enough for Linaro userlands or other userlands designed for v6
68371+ and newer ARM CPUs. It's recommended that you try without this option enabled
68372+ first, and only enable it if your userland does not boot (it will likely fail
68373+ at init time).
68374+
68375+endmenu
68376+menu "Role Based Access Control Options"
68377+depends on GRKERNSEC
68378+
68379+config GRKERNSEC_RBAC_DEBUG
68380+ bool
68381+
68382+config GRKERNSEC_NO_RBAC
68383+ bool "Disable RBAC system"
68384+ help
68385+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68386+ preventing the RBAC system from being enabled. You should only say Y
68387+ here if you have no intention of using the RBAC system, so as to prevent
68388+ an attacker with root access from misusing the RBAC system to hide files
68389+ and processes when loadable module support and /dev/[k]mem have been
68390+ locked down.
68391+
68392+config GRKERNSEC_ACL_HIDEKERN
68393+ bool "Hide kernel processes"
68394+ help
68395+ If you say Y here, all kernel threads will be hidden to all
68396+ processes but those whose subject has the "view hidden processes"
68397+ flag.
68398+
68399+config GRKERNSEC_ACL_MAXTRIES
68400+ int "Maximum tries before password lockout"
68401+ default 3
68402+ help
68403+ This option enforces the maximum number of times a user can attempt
68404+ to authorize themselves with the grsecurity RBAC system before being
68405+ denied the ability to attempt authorization again for a specified time.
68406+ The lower the number, the harder it will be to brute-force a password.
68407+
68408+config GRKERNSEC_ACL_TIMEOUT
68409+ int "Time to wait after max password tries, in seconds"
68410+ default 30
68411+ help
68412+ This option specifies the time the user must wait after attempting to
68413+ authorize to the RBAC system with the maximum number of invalid
68414+ passwords. The higher the number, the harder it will be to brute-force
68415+ a password.
68416+
68417+endmenu
68418+menu "Filesystem Protections"
68419+depends on GRKERNSEC
68420+
68421+config GRKERNSEC_PROC
68422+ bool "Proc restrictions"
68423+ default y if GRKERNSEC_CONFIG_AUTO
68424+ help
68425+ If you say Y here, the permissions of the /proc filesystem
68426+ will be altered to enhance system security and privacy. You MUST
68427+ choose either a user only restriction or a user and group restriction.
68428+ Depending upon the option you choose, you can either restrict users to
68429+ see only the processes they themselves run, or choose a group that can
68430+ view all processes and files normally restricted to root if you choose
68431+ the "restrict to user only" option. NOTE: If you're running identd or
68432+ ntpd as a non-root user, you will have to run it as the group you
68433+ specify here.
68434+
68435+config GRKERNSEC_PROC_USER
68436+ bool "Restrict /proc to user only"
68437+ depends on GRKERNSEC_PROC
68438+ help
68439+ If you say Y here, non-root users will only be able to view their own
68440+ processes, and restricts them from viewing network-related information,
68441+ and viewing kernel symbol and module information.
68442+
68443+config GRKERNSEC_PROC_USERGROUP
68444+ bool "Allow special group"
68445+ default y if GRKERNSEC_CONFIG_AUTO
68446+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68447+ help
68448+ If you say Y here, you will be able to select a group that will be
68449+ able to view all processes and network-related information. If you've
68450+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68451+ remain hidden. This option is useful if you want to run identd as
68452+ a non-root user. The group you select may also be chosen at boot time
68453+ via "grsec_proc_gid=" on the kernel commandline.
68454+
68455+config GRKERNSEC_PROC_GID
68456+ int "GID for special group"
68457+ depends on GRKERNSEC_PROC_USERGROUP
68458+ default 1001
68459+
68460+config GRKERNSEC_PROC_ADD
68461+ bool "Additional restrictions"
68462+ default y if GRKERNSEC_CONFIG_AUTO
68463+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68464+ help
68465+ If you say Y here, additional restrictions will be placed on
68466+ /proc that keep normal users from viewing device information and
68467+ slabinfo information that could be useful for exploits.
68468+
68469+config GRKERNSEC_LINK
68470+ bool "Linking restrictions"
68471+ default y if GRKERNSEC_CONFIG_AUTO
68472+ help
68473+ If you say Y here, /tmp race exploits will be prevented, since users
68474+ will no longer be able to follow symlinks owned by other users in
68475+ world-writable +t directories (e.g. /tmp), unless the owner of the
68476+ symlink is the owner of the directory. users will also not be
68477+ able to hardlink to files they do not own. If the sysctl option is
68478+ enabled, a sysctl option with name "linking_restrictions" is created.
68479+
68480+config GRKERNSEC_SYMLINKOWN
68481+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68482+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68483+ help
68484+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68485+ that prevents it from being used as a security feature. As Apache
68486+ verifies the symlink by performing a stat() against the target of
68487+ the symlink before it is followed, an attacker can setup a symlink
68488+ to point to a same-owned file, then replace the symlink with one
68489+ that targets another user's file just after Apache "validates" the
68490+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68491+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68492+ will be in place for the group you specify. If the sysctl option
68493+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68494+ created.
68495+
68496+config GRKERNSEC_SYMLINKOWN_GID
68497+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68498+ depends on GRKERNSEC_SYMLINKOWN
68499+ default 1006
68500+ help
68501+ Setting this GID determines what group kernel-enforced
68502+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68503+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68504+
68505+config GRKERNSEC_FIFO
68506+ bool "FIFO restrictions"
68507+ default y if GRKERNSEC_CONFIG_AUTO
68508+ help
68509+ If you say Y here, users will not be able to write to FIFOs they don't
68510+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68511+ the FIFO is the same owner of the directory it's held in. If the sysctl
68512+ option is enabled, a sysctl option with name "fifo_restrictions" is
68513+ created.
68514+
68515+config GRKERNSEC_SYSFS_RESTRICT
68516+ bool "Sysfs/debugfs restriction"
68517+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68518+ depends on SYSFS
68519+ help
68520+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68521+ any filesystem normally mounted under it (e.g. debugfs) will be
68522+ mostly accessible only by root. These filesystems generally provide access
68523+ to hardware and debug information that isn't appropriate for unprivileged
68524+ users of the system. Sysfs and debugfs have also become a large source
68525+ of new vulnerabilities, ranging from infoleaks to local compromise.
68526+ There has been very little oversight with an eye toward security involved
68527+ in adding new exporters of information to these filesystems, so their
68528+ use is discouraged.
68529+ For reasons of compatibility, a few directories have been whitelisted
68530+ for access by non-root users:
68531+ /sys/fs/selinux
68532+ /sys/fs/fuse
68533+ /sys/devices/system/cpu
68534+
68535+config GRKERNSEC_ROFS
68536+ bool "Runtime read-only mount protection"
68537+ depends on SYSCTL
68538+ help
68539+ If you say Y here, a sysctl option with name "romount_protect" will
68540+ be created. By setting this option to 1 at runtime, filesystems
68541+ will be protected in the following ways:
68542+ * No new writable mounts will be allowed
68543+ * Existing read-only mounts won't be able to be remounted read/write
68544+ * Write operations will be denied on all block devices
68545+ This option acts independently of grsec_lock: once it is set to 1,
68546+ it cannot be turned off. Therefore, please be mindful of the resulting
68547+ behavior if this option is enabled in an init script on a read-only
68548+ filesystem.
68549+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68550+ and GRKERNSEC_IO should be enabled and module loading disabled via
68551+ config or at runtime.
68552+ This feature is mainly intended for secure embedded systems.
68553+
68554+
68555+config GRKERNSEC_DEVICE_SIDECHANNEL
68556+ bool "Eliminate stat/notify-based device sidechannels"
68557+ default y if GRKERNSEC_CONFIG_AUTO
68558+ help
68559+ If you say Y here, timing analyses on block or character
68560+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68561+ will be thwarted for unprivileged users. If a process without
68562+ CAP_MKNOD stats such a device, the last access and last modify times
68563+ will match the device's create time. No access or modify events
68564+ will be triggered through inotify/dnotify/fanotify for such devices.
68565+ This feature will prevent attacks that may at a minimum
68566+ allow an attacker to determine the administrator's password length.
68567+
68568+config GRKERNSEC_CHROOT
68569+ bool "Chroot jail restrictions"
68570+ default y if GRKERNSEC_CONFIG_AUTO
68571+ help
68572+ If you say Y here, you will be able to choose several options that will
68573+ make breaking out of a chrooted jail much more difficult. If you
68574+ encounter no software incompatibilities with the following options, it
68575+ is recommended that you enable each one.
68576+
68577+ Note that the chroot restrictions are not intended to apply to "chroots"
68578+ to directories that are simple bind mounts of the global root filesystem.
68579+ For several other reasons, a user shouldn't expect any significant
68580+ security by performing such a chroot.
68581+
68582+config GRKERNSEC_CHROOT_MOUNT
68583+ bool "Deny mounts"
68584+ default y if GRKERNSEC_CONFIG_AUTO
68585+ depends on GRKERNSEC_CHROOT
68586+ help
68587+ If you say Y here, processes inside a chroot will not be able to
68588+ mount or remount filesystems. If the sysctl option is enabled, a
68589+ sysctl option with name "chroot_deny_mount" is created.
68590+
68591+config GRKERNSEC_CHROOT_DOUBLE
68592+ bool "Deny double-chroots"
68593+ default y if GRKERNSEC_CONFIG_AUTO
68594+ depends on GRKERNSEC_CHROOT
68595+ help
68596+ If you say Y here, processes inside a chroot will not be able to chroot
68597+ again outside the chroot. This is a widely used method of breaking
68598+ out of a chroot jail and should not be allowed. If the sysctl
68599+ option is enabled, a sysctl option with name
68600+ "chroot_deny_chroot" is created.
68601+
68602+config GRKERNSEC_CHROOT_PIVOT
68603+ bool "Deny pivot_root in chroot"
68604+ default y if GRKERNSEC_CONFIG_AUTO
68605+ depends on GRKERNSEC_CHROOT
68606+ help
68607+ If you say Y here, processes inside a chroot will not be able to use
68608+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68609+ works similar to chroot in that it changes the root filesystem. This
68610+ function could be misused in a chrooted process to attempt to break out
68611+ of the chroot, and therefore should not be allowed. If the sysctl
68612+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68613+ created.
68614+
68615+config GRKERNSEC_CHROOT_CHDIR
68616+ bool "Enforce chdir(\"/\") on all chroots"
68617+ default y if GRKERNSEC_CONFIG_AUTO
68618+ depends on GRKERNSEC_CHROOT
68619+ help
68620+ If you say Y here, the current working directory of all newly-chrooted
68621+ applications will be set to the the root directory of the chroot.
68622+ The man page on chroot(2) states:
68623+ Note that this call does not change the current working
68624+ directory, so that `.' can be outside the tree rooted at
68625+ `/'. In particular, the super-user can escape from a
68626+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68627+
68628+ It is recommended that you say Y here, since it's not known to break
68629+ any software. If the sysctl option is enabled, a sysctl option with
68630+ name "chroot_enforce_chdir" is created.
68631+
68632+config GRKERNSEC_CHROOT_CHMOD
68633+ bool "Deny (f)chmod +s"
68634+ default y if GRKERNSEC_CONFIG_AUTO
68635+ depends on GRKERNSEC_CHROOT
68636+ help
68637+ If you say Y here, processes inside a chroot will not be able to chmod
68638+ or fchmod files to make them have suid or sgid bits. This protects
68639+ against another published method of breaking a chroot. If the sysctl
68640+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68641+ created.
68642+
68643+config GRKERNSEC_CHROOT_FCHDIR
68644+ bool "Deny fchdir and fhandle out of chroot"
68645+ default y if GRKERNSEC_CONFIG_AUTO
68646+ depends on GRKERNSEC_CHROOT
68647+ help
68648+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68649+ to a file descriptor of the chrooting process that points to a directory
68650+ outside the filesystem will be stopped. Additionally, this option prevents
68651+ use of the recently-created syscall for opening files by a guessable "file
68652+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68653+ with name "chroot_deny_fchdir" is created.
68654+
68655+config GRKERNSEC_CHROOT_MKNOD
68656+ bool "Deny mknod"
68657+ default y if GRKERNSEC_CONFIG_AUTO
68658+ depends on GRKERNSEC_CHROOT
68659+ help
68660+ If you say Y here, processes inside a chroot will not be allowed to
68661+ mknod. The problem with using mknod inside a chroot is that it
68662+ would allow an attacker to create a device entry that is the same
68663+ as one on the physical root of your system, which could range from
68664+ anything from the console device to a device for your harddrive (which
68665+ they could then use to wipe the drive or steal data). It is recommended
68666+ that you say Y here, unless you run into software incompatibilities.
68667+ If the sysctl option is enabled, a sysctl option with name
68668+ "chroot_deny_mknod" is created.
68669+
68670+config GRKERNSEC_CHROOT_SHMAT
68671+ bool "Deny shmat() out of chroot"
68672+ default y if GRKERNSEC_CONFIG_AUTO
68673+ depends on GRKERNSEC_CHROOT
68674+ help
68675+ If you say Y here, processes inside a chroot will not be able to attach
68676+ to shared memory segments that were created outside of the chroot jail.
68677+ It is recommended that you say Y here. If the sysctl option is enabled,
68678+ a sysctl option with name "chroot_deny_shmat" is created.
68679+
68680+config GRKERNSEC_CHROOT_UNIX
68681+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68682+ default y if GRKERNSEC_CONFIG_AUTO
68683+ depends on GRKERNSEC_CHROOT
68684+ help
68685+ If you say Y here, processes inside a chroot will not be able to
68686+ connect to abstract (meaning not belonging to a filesystem) Unix
68687+ domain sockets that were bound outside of a chroot. It is recommended
68688+ that you say Y here. If the sysctl option is enabled, a sysctl option
68689+ with name "chroot_deny_unix" is created.
68690+
68691+config GRKERNSEC_CHROOT_FINDTASK
68692+ bool "Protect outside processes"
68693+ default y if GRKERNSEC_CONFIG_AUTO
68694+ depends on GRKERNSEC_CHROOT
68695+ help
68696+ If you say Y here, processes inside a chroot will not be able to
68697+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68698+ getsid, or view any process outside of the chroot. If the sysctl
68699+ option is enabled, a sysctl option with name "chroot_findtask" is
68700+ created.
68701+
68702+config GRKERNSEC_CHROOT_NICE
68703+ bool "Restrict priority changes"
68704+ default y if GRKERNSEC_CONFIG_AUTO
68705+ depends on GRKERNSEC_CHROOT
68706+ help
68707+ If you say Y here, processes inside a chroot will not be able to raise
68708+ the priority of processes in the chroot, or alter the priority of
68709+ processes outside the chroot. This provides more security than simply
68710+ removing CAP_SYS_NICE from the process' capability set. If the
68711+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68712+ is created.
68713+
68714+config GRKERNSEC_CHROOT_SYSCTL
68715+ bool "Deny sysctl writes"
68716+ default y if GRKERNSEC_CONFIG_AUTO
68717+ depends on GRKERNSEC_CHROOT
68718+ help
68719+ If you say Y here, an attacker in a chroot will not be able to
68720+ write to sysctl entries, either by sysctl(2) or through a /proc
68721+ interface. It is strongly recommended that you say Y here. If the
68722+ sysctl option is enabled, a sysctl option with name
68723+ "chroot_deny_sysctl" is created.
68724+
68725+config GRKERNSEC_CHROOT_RENAME
68726+ bool "Deny bad renames"
68727+ default y if GRKERNSEC_CONFIG_AUTO
68728+ depends on GRKERNSEC_CHROOT
68729+ help
68730+ If you say Y here, an attacker in a chroot will not be able to
68731+ abuse the ability to create double chroots to break out of the
68732+ chroot by exploiting a race condition between a rename of a directory
68733+ within a chroot against an open of a symlink with relative path
68734+ components. This feature will likewise prevent an accomplice outside
68735+ a chroot from enabling a user inside the chroot to break out and make
68736+ use of their credentials on the global filesystem. Enabling this
68737+ feature is essential to prevent root users from breaking out of a
68738+ chroot. If the sysctl option is enabled, a sysctl option with name
68739+ "chroot_deny_bad_rename" is created.
68740+
68741+config GRKERNSEC_CHROOT_CAPS
68742+ bool "Capability restrictions"
68743+ default y if GRKERNSEC_CONFIG_AUTO
68744+ depends on GRKERNSEC_CHROOT
68745+ help
68746+ If you say Y here, the capabilities on all processes within a
68747+ chroot jail will be lowered to stop module insertion, raw i/o,
68748+ system and net admin tasks, rebooting the system, modifying immutable
68749+ files, modifying IPC owned by another, and changing the system time.
68750+ This is left an option because it can break some apps. Disable this
68751+ if your chrooted apps are having problems performing those kinds of
68752+ tasks. If the sysctl option is enabled, a sysctl option with
68753+ name "chroot_caps" is created.
68754+
68755+config GRKERNSEC_CHROOT_INITRD
68756+ bool "Exempt initrd tasks from restrictions"
68757+ default y if GRKERNSEC_CONFIG_AUTO
68758+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68759+ help
68760+ If you say Y here, tasks started prior to init will be exempted from
68761+ grsecurity's chroot restrictions. This option is mainly meant to
68762+ resolve Plymouth's performing privileged operations unnecessarily
68763+ in a chroot.
68764+
68765+endmenu
68766+menu "Kernel Auditing"
68767+depends on GRKERNSEC
68768+
68769+config GRKERNSEC_AUDIT_GROUP
68770+ bool "Single group for auditing"
68771+ help
68772+ If you say Y here, the exec and chdir logging features will only operate
68773+ on a group you specify. This option is recommended if you only want to
68774+ watch certain users instead of having a large amount of logs from the
68775+ entire system. If the sysctl option is enabled, a sysctl option with
68776+ name "audit_group" is created.
68777+
68778+config GRKERNSEC_AUDIT_GID
68779+ int "GID for auditing"
68780+ depends on GRKERNSEC_AUDIT_GROUP
68781+ default 1007
68782+
68783+config GRKERNSEC_EXECLOG
68784+ bool "Exec logging"
68785+ help
68786+ If you say Y here, all execve() calls will be logged (since the
68787+ other exec*() calls are frontends to execve(), all execution
68788+ will be logged). Useful for shell-servers that like to keep track
68789+ of their users. If the sysctl option is enabled, a sysctl option with
68790+ name "exec_logging" is created.
68791+ WARNING: This option when enabled will produce a LOT of logs, especially
68792+ on an active system.
68793+
68794+config GRKERNSEC_RESLOG
68795+ bool "Resource logging"
68796+ default y if GRKERNSEC_CONFIG_AUTO
68797+ help
68798+ If you say Y here, all attempts to overstep resource limits will
68799+ be logged with the resource name, the requested size, and the current
68800+ limit. It is highly recommended that you say Y here. If the sysctl
68801+ option is enabled, a sysctl option with name "resource_logging" is
68802+ created. If the RBAC system is enabled, the sysctl value is ignored.
68803+
68804+config GRKERNSEC_CHROOT_EXECLOG
68805+ bool "Log execs within chroot"
68806+ help
68807+ If you say Y here, all executions inside a chroot jail will be logged
68808+ to syslog. This can cause a large amount of logs if certain
68809+ applications (eg. djb's daemontools) are installed on the system, and
68810+ is therefore left as an option. If the sysctl option is enabled, a
68811+ sysctl option with name "chroot_execlog" is created.
68812+
68813+config GRKERNSEC_AUDIT_PTRACE
68814+ bool "Ptrace logging"
68815+ help
68816+ If you say Y here, all attempts to attach to a process via ptrace
68817+ will be logged. If the sysctl option is enabled, a sysctl option
68818+ with name "audit_ptrace" is created.
68819+
68820+config GRKERNSEC_AUDIT_CHDIR
68821+ bool "Chdir logging"
68822+ help
68823+ If you say Y here, all chdir() calls will be logged. If the sysctl
68824+ option is enabled, a sysctl option with name "audit_chdir" is created.
68825+
68826+config GRKERNSEC_AUDIT_MOUNT
68827+ bool "(Un)Mount logging"
68828+ help
68829+ If you say Y here, all mounts and unmounts will be logged. If the
68830+ sysctl option is enabled, a sysctl option with name "audit_mount" is
68831+ created.
68832+
68833+config GRKERNSEC_SIGNAL
68834+ bool "Signal logging"
68835+ default y if GRKERNSEC_CONFIG_AUTO
68836+ help
68837+ If you say Y here, certain important signals will be logged, such as
68838+ SIGSEGV, which will as a result inform you of when a error in a program
68839+ occurred, which in some cases could mean a possible exploit attempt.
68840+ If the sysctl option is enabled, a sysctl option with name
68841+ "signal_logging" is created.
68842+
68843+config GRKERNSEC_FORKFAIL
68844+ bool "Fork failure logging"
68845+ help
68846+ If you say Y here, all failed fork() attempts will be logged.
68847+ This could suggest a fork bomb, or someone attempting to overstep
68848+ their process limit. If the sysctl option is enabled, a sysctl option
68849+ with name "forkfail_logging" is created.
68850+
68851+config GRKERNSEC_TIME
68852+ bool "Time change logging"
68853+ default y if GRKERNSEC_CONFIG_AUTO
68854+ help
68855+ If you say Y here, any changes of the system clock will be logged.
68856+ If the sysctl option is enabled, a sysctl option with name
68857+ "timechange_logging" is created.
68858+
68859+config GRKERNSEC_PROC_IPADDR
68860+ bool "/proc/<pid>/ipaddr support"
68861+ default y if GRKERNSEC_CONFIG_AUTO
68862+ help
68863+ If you say Y here, a new entry will be added to each /proc/<pid>
68864+ directory that contains the IP address of the person using the task.
68865+ The IP is carried across local TCP and AF_UNIX stream sockets.
68866+ This information can be useful for IDS/IPSes to perform remote response
68867+ to a local attack. The entry is readable by only the owner of the
68868+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
68869+ the RBAC system), and thus does not create privacy concerns.
68870+
68871+config GRKERNSEC_RWXMAP_LOG
68872+ bool 'Denied RWX mmap/mprotect logging'
68873+ default y if GRKERNSEC_CONFIG_AUTO
68874+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
68875+ help
68876+ If you say Y here, calls to mmap() and mprotect() with explicit
68877+ usage of PROT_WRITE and PROT_EXEC together will be logged when
68878+ denied by the PAX_MPROTECT feature. This feature will also
68879+ log other problematic scenarios that can occur when PAX_MPROTECT
68880+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
68881+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
68882+ is created.
68883+
68884+endmenu
68885+
68886+menu "Executable Protections"
68887+depends on GRKERNSEC
68888+
68889+config GRKERNSEC_DMESG
68890+ bool "Dmesg(8) restriction"
68891+ default y if GRKERNSEC_CONFIG_AUTO
68892+ help
68893+ If you say Y here, non-root users will not be able to use dmesg(8)
68894+ to view the contents of the kernel's circular log buffer.
68895+ The kernel's log buffer often contains kernel addresses and other
68896+ identifying information useful to an attacker in fingerprinting a
68897+ system for a targeted exploit.
68898+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
68899+ created.
68900+
68901+config GRKERNSEC_HARDEN_PTRACE
68902+ bool "Deter ptrace-based process snooping"
68903+ default y if GRKERNSEC_CONFIG_AUTO
68904+ help
68905+ If you say Y here, TTY sniffers and other malicious monitoring
68906+ programs implemented through ptrace will be defeated. If you
68907+ have been using the RBAC system, this option has already been
68908+ enabled for several years for all users, with the ability to make
68909+ fine-grained exceptions.
68910+
68911+ This option only affects the ability of non-root users to ptrace
68912+ processes that are not a descendent of the ptracing process.
68913+ This means that strace ./binary and gdb ./binary will still work,
68914+ but attaching to arbitrary processes will not. If the sysctl
68915+ option is enabled, a sysctl option with name "harden_ptrace" is
68916+ created.
68917+
68918+config GRKERNSEC_PTRACE_READEXEC
68919+ bool "Require read access to ptrace sensitive binaries"
68920+ default y if GRKERNSEC_CONFIG_AUTO
68921+ help
68922+ If you say Y here, unprivileged users will not be able to ptrace unreadable
68923+ binaries. This option is useful in environments that
68924+ remove the read bits (e.g. file mode 4711) from suid binaries to
68925+ prevent infoleaking of their contents. This option adds
68926+ consistency to the use of that file mode, as the binary could normally
68927+ be read out when run without privileges while ptracing.
68928+
68929+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
68930+ is created.
68931+
68932+config GRKERNSEC_SETXID
68933+ bool "Enforce consistent multithreaded privileges"
68934+ default y if GRKERNSEC_CONFIG_AUTO
68935+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
68936+ help
68937+ If you say Y here, a change from a root uid to a non-root uid
68938+ in a multithreaded application will cause the resulting uids,
68939+ gids, supplementary groups, and capabilities in that thread
68940+ to be propagated to the other threads of the process. In most
68941+ cases this is unnecessary, as glibc will emulate this behavior
68942+ on behalf of the application. Other libcs do not act in the
68943+ same way, allowing the other threads of the process to continue
68944+ running with root privileges. If the sysctl option is enabled,
68945+ a sysctl option with name "consistent_setxid" is created.
68946+
68947+config GRKERNSEC_HARDEN_IPC
68948+ bool "Disallow access to overly-permissive IPC objects"
68949+ default y if GRKERNSEC_CONFIG_AUTO
68950+ depends on SYSVIPC
68951+ help
68952+ If you say Y here, access to overly-permissive IPC objects (shared
68953+ memory, message queues, and semaphores) will be denied for processes
68954+ given the following criteria beyond normal permission checks:
68955+ 1) If the IPC object is world-accessible and the euid doesn't match
68956+ that of the creator or current uid for the IPC object
68957+ 2) If the IPC object is group-accessible and the egid doesn't
68958+ match that of the creator or current gid for the IPC object
68959+ It's a common error to grant too much permission to these objects,
68960+ with impact ranging from denial of service and information leaking to
68961+ privilege escalation. This feature was developed in response to
68962+ research by Tim Brown:
68963+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
68964+ who found hundreds of such insecure usages. Processes with
68965+ CAP_IPC_OWNER are still permitted to access these IPC objects.
68966+ If the sysctl option is enabled, a sysctl option with name
68967+ "harden_ipc" is created.
68968+
68969+config GRKERNSEC_TPE
68970+ bool "Trusted Path Execution (TPE)"
68971+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68972+ help
68973+ If you say Y here, you will be able to choose a gid to add to the
68974+ supplementary groups of users you want to mark as "untrusted."
68975+ These users will not be able to execute any files that are not in
68976+ root-owned directories writable only by root. If the sysctl option
68977+ is enabled, a sysctl option with name "tpe" is created.
68978+
68979+config GRKERNSEC_TPE_ALL
68980+ bool "Partially restrict all non-root users"
68981+ depends on GRKERNSEC_TPE
68982+ help
68983+ If you say Y here, all non-root users will be covered under
68984+ a weaker TPE restriction. This is separate from, and in addition to,
68985+ the main TPE options that you have selected elsewhere. Thus, if a
68986+ "trusted" GID is chosen, this restriction applies to even that GID.
68987+ Under this restriction, all non-root users will only be allowed to
68988+ execute files in directories they own that are not group or
68989+ world-writable, or in directories owned by root and writable only by
68990+ root. If the sysctl option is enabled, a sysctl option with name
68991+ "tpe_restrict_all" is created.
68992+
68993+config GRKERNSEC_TPE_INVERT
68994+ bool "Invert GID option"
68995+ depends on GRKERNSEC_TPE
68996+ help
68997+ If you say Y here, the group you specify in the TPE configuration will
68998+ decide what group TPE restrictions will be *disabled* for. This
68999+ option is useful if you want TPE restrictions to be applied to most
69000+ users on the system. If the sysctl option is enabled, a sysctl option
69001+ with name "tpe_invert" is created. Unlike other sysctl options, this
69002+ entry will default to on for backward-compatibility.
69003+
69004+config GRKERNSEC_TPE_GID
69005+ int
69006+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69007+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69008+
69009+config GRKERNSEC_TPE_UNTRUSTED_GID
69010+ int "GID for TPE-untrusted users"
69011+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69012+ default 1005
69013+ help
69014+ Setting this GID determines what group TPE restrictions will be
69015+ *enabled* for. If the sysctl option is enabled, a sysctl option
69016+ with name "tpe_gid" is created.
69017+
69018+config GRKERNSEC_TPE_TRUSTED_GID
69019+ int "GID for TPE-trusted users"
69020+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69021+ default 1005
69022+ help
69023+ Setting this GID determines what group TPE restrictions will be
69024+ *disabled* for. If the sysctl option is enabled, a sysctl option
69025+ with name "tpe_gid" is created.
69026+
69027+endmenu
69028+menu "Network Protections"
69029+depends on GRKERNSEC
69030+
69031+config GRKERNSEC_BLACKHOLE
69032+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69033+ default y if GRKERNSEC_CONFIG_AUTO
69034+ depends on NET
69035+ help
69036+ If you say Y here, neither TCP resets nor ICMP
69037+ destination-unreachable packets will be sent in response to packets
69038+ sent to ports for which no associated listening process exists.
69039+ It will also prevent the sending of ICMP protocol unreachable packets
69040+ in response to packets with unknown protocols.
69041+ This feature supports both IPV4 and IPV6 and exempts the
69042+ loopback interface from blackholing. Enabling this feature
69043+ makes a host more resilient to DoS attacks and reduces network
69044+ visibility against scanners.
69045+
69046+ The blackhole feature as-implemented is equivalent to the FreeBSD
69047+ blackhole feature, as it prevents RST responses to all packets, not
69048+ just SYNs. Under most application behavior this causes no
69049+ problems, but applications (like haproxy) may not close certain
69050+ connections in a way that cleanly terminates them on the remote
69051+ end, leaving the remote host in LAST_ACK state. Because of this
69052+ side-effect and to prevent intentional LAST_ACK DoSes, this
69053+ feature also adds automatic mitigation against such attacks.
69054+ The mitigation drastically reduces the amount of time a socket
69055+ can spend in LAST_ACK state. If you're using haproxy and not
69056+ all servers it connects to have this option enabled, consider
69057+ disabling this feature on the haproxy host.
69058+
69059+ If the sysctl option is enabled, two sysctl options with names
69060+ "ip_blackhole" and "lastack_retries" will be created.
69061+ While "ip_blackhole" takes the standard zero/non-zero on/off
69062+ toggle, "lastack_retries" uses the same kinds of values as
69063+ "tcp_retries1" and "tcp_retries2". The default value of 4
69064+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69065+ state.
69066+
69067+config GRKERNSEC_NO_SIMULT_CONNECT
69068+ bool "Disable TCP Simultaneous Connect"
69069+ default y if GRKERNSEC_CONFIG_AUTO
69070+ depends on NET
69071+ help
69072+ If you say Y here, a feature by Willy Tarreau will be enabled that
69073+ removes a weakness in Linux's strict implementation of TCP that
69074+ allows two clients to connect to each other without either entering
69075+ a listening state. The weakness allows an attacker to easily prevent
69076+ a client from connecting to a known server provided the source port
69077+ for the connection is guessed correctly.
69078+
69079+ As the weakness could be used to prevent an antivirus or IPS from
69080+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69081+ it should be eliminated by enabling this option. Though Linux is
69082+ one of few operating systems supporting simultaneous connect, it
69083+ has no legitimate use in practice and is rarely supported by firewalls.
69084+
69085+config GRKERNSEC_SOCKET
69086+ bool "Socket restrictions"
69087+ depends on NET
69088+ help
69089+ If you say Y here, you will be able to choose from several options.
69090+ If you assign a GID on your system and add it to the supplementary
69091+ groups of users you want to restrict socket access to, this patch
69092+ will perform up to three things, based on the option(s) you choose.
69093+
69094+config GRKERNSEC_SOCKET_ALL
69095+ bool "Deny any sockets to group"
69096+ depends on GRKERNSEC_SOCKET
69097+ help
69098+ If you say Y here, you will be able to choose a GID of whose users will
69099+ be unable to connect to other hosts from your machine or run server
69100+ applications from your machine. If the sysctl option is enabled, a
69101+ sysctl option with name "socket_all" is created.
69102+
69103+config GRKERNSEC_SOCKET_ALL_GID
69104+ int "GID to deny all sockets for"
69105+ depends on GRKERNSEC_SOCKET_ALL
69106+ default 1004
69107+ help
69108+ Here you can choose the GID to disable socket access for. Remember to
69109+ add the users you want socket access disabled for to the GID
69110+ specified here. If the sysctl option is enabled, a sysctl option
69111+ with name "socket_all_gid" is created.
69112+
69113+config GRKERNSEC_SOCKET_CLIENT
69114+ bool "Deny client sockets to group"
69115+ depends on GRKERNSEC_SOCKET
69116+ help
69117+ If you say Y here, you will be able to choose a GID of whose users will
69118+ be unable to connect to other hosts from your machine, but will be
69119+ able to run servers. If this option is enabled, all users in the group
69120+ you specify will have to use passive mode when initiating ftp transfers
69121+ from the shell on your machine. If the sysctl option is enabled, a
69122+ sysctl option with name "socket_client" is created.
69123+
69124+config GRKERNSEC_SOCKET_CLIENT_GID
69125+ int "GID to deny client sockets for"
69126+ depends on GRKERNSEC_SOCKET_CLIENT
69127+ default 1003
69128+ help
69129+ Here you can choose the GID to disable client socket access for.
69130+ Remember to add the users you want client socket access disabled for to
69131+ the GID specified here. If the sysctl option is enabled, a sysctl
69132+ option with name "socket_client_gid" is created.
69133+
69134+config GRKERNSEC_SOCKET_SERVER
69135+ bool "Deny server sockets to group"
69136+ depends on GRKERNSEC_SOCKET
69137+ help
69138+ If you say Y here, you will be able to choose a GID of whose users will
69139+ be unable to run server applications from your machine. If the sysctl
69140+ option is enabled, a sysctl option with name "socket_server" is created.
69141+
69142+config GRKERNSEC_SOCKET_SERVER_GID
69143+ int "GID to deny server sockets for"
69144+ depends on GRKERNSEC_SOCKET_SERVER
69145+ default 1002
69146+ help
69147+ Here you can choose the GID to disable server socket access for.
69148+ Remember to add the users you want server socket access disabled for to
69149+ the GID specified here. If the sysctl option is enabled, a sysctl
69150+ option with name "socket_server_gid" is created.
69151+
69152+endmenu
69153+
69154+menu "Physical Protections"
69155+depends on GRKERNSEC
69156+
69157+config GRKERNSEC_DENYUSB
69158+ bool "Deny new USB connections after toggle"
69159+ default y if GRKERNSEC_CONFIG_AUTO
69160+ depends on SYSCTL && USB_SUPPORT
69161+ help
69162+ If you say Y here, a new sysctl option with name "deny_new_usb"
69163+ will be created. Setting its value to 1 will prevent any new
69164+ USB devices from being recognized by the OS. Any attempted USB
69165+ device insertion will be logged. This option is intended to be
69166+ used against custom USB devices designed to exploit vulnerabilities
69167+ in various USB device drivers.
69168+
69169+ For greatest effectiveness, this sysctl should be set after any
69170+ relevant init scripts. This option is safe to enable in distros
69171+ as each user can choose whether or not to toggle the sysctl.
69172+
69173+config GRKERNSEC_DENYUSB_FORCE
69174+ bool "Reject all USB devices not connected at boot"
69175+ select USB
69176+ depends on GRKERNSEC_DENYUSB
69177+ help
69178+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69179+ that doesn't involve a sysctl entry. This option should only be
69180+ enabled if you're sure you want to deny all new USB connections
69181+ at runtime and don't want to modify init scripts. This should not
69182+ be enabled by distros. It forces the core USB code to be built
69183+ into the kernel image so that all devices connected at boot time
69184+ can be recognized and new USB device connections can be prevented
69185+ prior to init running.
69186+
69187+endmenu
69188+
69189+menu "Sysctl Support"
69190+depends on GRKERNSEC && SYSCTL
69191+
69192+config GRKERNSEC_SYSCTL
69193+ bool "Sysctl support"
69194+ default y if GRKERNSEC_CONFIG_AUTO
69195+ help
69196+ If you say Y here, you will be able to change the options that
69197+ grsecurity runs with at bootup, without having to recompile your
69198+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69199+ to enable (1) or disable (0) various features. All the sysctl entries
69200+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69201+ All features enabled in the kernel configuration are disabled at boot
69202+ if you do not say Y to the "Turn on features by default" option.
69203+ All options should be set at startup, and the grsec_lock entry should
69204+ be set to a non-zero value after all the options are set.
69205+ *THIS IS EXTREMELY IMPORTANT*
69206+
69207+config GRKERNSEC_SYSCTL_DISTRO
69208+ bool "Extra sysctl support for distro makers (READ HELP)"
69209+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69210+ help
69211+ If you say Y here, additional sysctl options will be created
69212+ for features that affect processes running as root. Therefore,
69213+ it is critical when using this option that the grsec_lock entry be
69214+ enabled after boot. Only distros with prebuilt kernel packages
69215+ with this option enabled that can ensure grsec_lock is enabled
69216+ after boot should use this option.
69217+ *Failure to set grsec_lock after boot makes all grsec features
69218+ this option covers useless*
69219+
69220+ Currently this option creates the following sysctl entries:
69221+ "Disable Privileged I/O": "disable_priv_io"
69222+
69223+config GRKERNSEC_SYSCTL_ON
69224+ bool "Turn on features by default"
69225+ default y if GRKERNSEC_CONFIG_AUTO
69226+ depends on GRKERNSEC_SYSCTL
69227+ help
69228+ If you say Y here, instead of having all features enabled in the
69229+ kernel configuration disabled at boot time, the features will be
69230+ enabled at boot time. It is recommended you say Y here unless
69231+ there is some reason you would want all sysctl-tunable features to
69232+ be disabled by default. As mentioned elsewhere, it is important
69233+ to enable the grsec_lock entry once you have finished modifying
69234+ the sysctl entries.
69235+
69236+endmenu
69237+menu "Logging Options"
69238+depends on GRKERNSEC
69239+
69240+config GRKERNSEC_FLOODTIME
69241+ int "Seconds in between log messages (minimum)"
69242+ default 10
69243+ help
69244+ This option allows you to enforce the number of seconds between
69245+ grsecurity log messages. The default should be suitable for most
69246+ people, however, if you choose to change it, choose a value small enough
69247+ to allow informative logs to be produced, but large enough to
69248+ prevent flooding.
69249+
69250+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69251+ any rate limiting on grsecurity log messages.
69252+
69253+config GRKERNSEC_FLOODBURST
69254+ int "Number of messages in a burst (maximum)"
69255+ default 6
69256+ help
69257+ This option allows you to choose the maximum number of messages allowed
69258+ within the flood time interval you chose in a separate option. The
69259+ default should be suitable for most people, however if you find that
69260+ many of your logs are being interpreted as flooding, you may want to
69261+ raise this value.
69262+
69263+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69264+ any rate limiting on grsecurity log messages.
69265+
69266+endmenu
69267diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69268new file mode 100644
69269index 0000000..30ababb
69270--- /dev/null
69271+++ b/grsecurity/Makefile
69272@@ -0,0 +1,54 @@
69273+# grsecurity – access control and security hardening for Linux
69274+# All code in this directory and various hooks located throughout the Linux kernel are
69275+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69276+# http://www.grsecurity.net spender@grsecurity.net
69277+#
69278+# This program is free software; you can redistribute it and/or
69279+# modify it under the terms of the GNU General Public License version 2
69280+# as published by the Free Software Foundation.
69281+#
69282+# This program is distributed in the hope that it will be useful,
69283+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69284+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69285+# GNU General Public License for more details.
69286+#
69287+# You should have received a copy of the GNU General Public License
69288+# along with this program; if not, write to the Free Software
69289+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69290+
69291+KBUILD_CFLAGS += -Werror
69292+
69293+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69294+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69295+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69296+ grsec_usb.o grsec_ipc.o grsec_proc.o
69297+
69298+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69299+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69300+ gracl_learn.o grsec_log.o gracl_policy.o
69301+ifdef CONFIG_COMPAT
69302+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69303+endif
69304+
69305+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69306+
69307+ifdef CONFIG_NET
69308+obj-y += grsec_sock.o
69309+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69310+endif
69311+
69312+ifndef CONFIG_GRKERNSEC
69313+obj-y += grsec_disabled.o
69314+endif
69315+
69316+ifdef CONFIG_GRKERNSEC_HIDESYM
69317+extra-y := grsec_hidesym.o
69318+$(obj)/grsec_hidesym.o:
69319+ @-chmod -f 500 /boot
69320+ @-chmod -f 500 /lib/modules
69321+ @-chmod -f 500 /lib64/modules
69322+ @-chmod -f 500 /lib32/modules
69323+ @-chmod -f 700 .
69324+ @-chmod -f 700 $(objtree)
69325+ @echo ' grsec: protected kernel image paths'
69326+endif
69327diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69328new file mode 100644
69329index 0000000..6c1e154
69330--- /dev/null
69331+++ b/grsecurity/gracl.c
69332@@ -0,0 +1,2749 @@
69333+#include <linux/kernel.h>
69334+#include <linux/module.h>
69335+#include <linux/sched.h>
69336+#include <linux/mm.h>
69337+#include <linux/file.h>
69338+#include <linux/fs.h>
69339+#include <linux/namei.h>
69340+#include <linux/mount.h>
69341+#include <linux/tty.h>
69342+#include <linux/proc_fs.h>
69343+#include <linux/lglock.h>
69344+#include <linux/slab.h>
69345+#include <linux/vmalloc.h>
69346+#include <linux/types.h>
69347+#include <linux/sysctl.h>
69348+#include <linux/netdevice.h>
69349+#include <linux/ptrace.h>
69350+#include <linux/gracl.h>
69351+#include <linux/gralloc.h>
69352+#include <linux/security.h>
69353+#include <linux/grinternal.h>
69354+#include <linux/pid_namespace.h>
69355+#include <linux/stop_machine.h>
69356+#include <linux/fdtable.h>
69357+#include <linux/percpu.h>
69358+#include <linux/lglock.h>
69359+#include <linux/hugetlb.h>
69360+#include <linux/posix-timers.h>
69361+#include <linux/prefetch.h>
69362+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69363+#include <linux/magic.h>
69364+#include <linux/pagemap.h>
69365+#include "../fs/btrfs/async-thread.h"
69366+#include "../fs/btrfs/ctree.h"
69367+#include "../fs/btrfs/btrfs_inode.h"
69368+#endif
69369+#include "../fs/mount.h"
69370+
69371+#include <asm/uaccess.h>
69372+#include <asm/errno.h>
69373+#include <asm/mman.h>
69374+
69375+#define FOR_EACH_ROLE_START(role) \
69376+ role = running_polstate.role_list; \
69377+ while (role) {
69378+
69379+#define FOR_EACH_ROLE_END(role) \
69380+ role = role->prev; \
69381+ }
69382+
69383+extern struct path gr_real_root;
69384+
69385+static struct gr_policy_state running_polstate;
69386+struct gr_policy_state *polstate = &running_polstate;
69387+extern struct gr_alloc_state *current_alloc_state;
69388+
69389+extern char *gr_shared_page[4];
69390+DEFINE_RWLOCK(gr_inode_lock);
69391+
69392+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69393+
69394+#ifdef CONFIG_NET
69395+extern struct vfsmount *sock_mnt;
69396+#endif
69397+
69398+extern struct vfsmount *pipe_mnt;
69399+extern struct vfsmount *shm_mnt;
69400+
69401+#ifdef CONFIG_HUGETLBFS
69402+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69403+#endif
69404+
69405+extern u16 acl_sp_role_value;
69406+extern struct acl_object_label *fakefs_obj_rw;
69407+extern struct acl_object_label *fakefs_obj_rwx;
69408+
69409+int gr_acl_is_enabled(void)
69410+{
69411+ return (gr_status & GR_READY);
69412+}
69413+
69414+void gr_enable_rbac_system(void)
69415+{
69416+ pax_open_kernel();
69417+ gr_status |= GR_READY;
69418+ pax_close_kernel();
69419+}
69420+
69421+int gr_rbac_disable(void *unused)
69422+{
69423+ pax_open_kernel();
69424+ gr_status &= ~GR_READY;
69425+ pax_close_kernel();
69426+
69427+ return 0;
69428+}
69429+
69430+static inline dev_t __get_dev(const struct dentry *dentry)
69431+{
69432+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69433+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69434+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69435+ else
69436+#endif
69437+ return dentry->d_sb->s_dev;
69438+}
69439+
69440+static inline u64 __get_ino(const struct dentry *dentry)
69441+{
69442+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69443+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69444+ return btrfs_ino(dentry->d_inode);
69445+ else
69446+#endif
69447+ return dentry->d_inode->i_ino;
69448+}
69449+
69450+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69451+{
69452+ return __get_dev(dentry);
69453+}
69454+
69455+u64 gr_get_ino_from_dentry(struct dentry *dentry)
69456+{
69457+ return __get_ino(dentry);
69458+}
69459+
69460+static char gr_task_roletype_to_char(struct task_struct *task)
69461+{
69462+ switch (task->role->roletype &
69463+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69464+ GR_ROLE_SPECIAL)) {
69465+ case GR_ROLE_DEFAULT:
69466+ return 'D';
69467+ case GR_ROLE_USER:
69468+ return 'U';
69469+ case GR_ROLE_GROUP:
69470+ return 'G';
69471+ case GR_ROLE_SPECIAL:
69472+ return 'S';
69473+ }
69474+
69475+ return 'X';
69476+}
69477+
69478+char gr_roletype_to_char(void)
69479+{
69480+ return gr_task_roletype_to_char(current);
69481+}
69482+
69483+__inline__ int
69484+gr_acl_tpe_check(void)
69485+{
69486+ if (unlikely(!(gr_status & GR_READY)))
69487+ return 0;
69488+ if (current->role->roletype & GR_ROLE_TPE)
69489+ return 1;
69490+ else
69491+ return 0;
69492+}
69493+
69494+int
69495+gr_handle_rawio(const struct inode *inode)
69496+{
69497+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69498+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69499+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69500+ !capable(CAP_SYS_RAWIO))
69501+ return 1;
69502+#endif
69503+ return 0;
69504+}
69505+
69506+int
69507+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69508+{
69509+ if (likely(lena != lenb))
69510+ return 0;
69511+
69512+ return !memcmp(a, b, lena);
69513+}
69514+
69515+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69516+{
69517+ *buflen -= namelen;
69518+ if (*buflen < 0)
69519+ return -ENAMETOOLONG;
69520+ *buffer -= namelen;
69521+ memcpy(*buffer, str, namelen);
69522+ return 0;
69523+}
69524+
69525+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69526+{
69527+ return prepend(buffer, buflen, name->name, name->len);
69528+}
69529+
69530+static int prepend_path(const struct path *path, struct path *root,
69531+ char **buffer, int *buflen)
69532+{
69533+ struct dentry *dentry = path->dentry;
69534+ struct vfsmount *vfsmnt = path->mnt;
69535+ struct mount *mnt = real_mount(vfsmnt);
69536+ bool slash = false;
69537+ int error = 0;
69538+
69539+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69540+ struct dentry * parent;
69541+
69542+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69543+ /* Global root? */
69544+ if (!mnt_has_parent(mnt)) {
69545+ goto out;
69546+ }
69547+ dentry = mnt->mnt_mountpoint;
69548+ mnt = mnt->mnt_parent;
69549+ vfsmnt = &mnt->mnt;
69550+ continue;
69551+ }
69552+ parent = dentry->d_parent;
69553+ prefetch(parent);
69554+ spin_lock(&dentry->d_lock);
69555+ error = prepend_name(buffer, buflen, &dentry->d_name);
69556+ spin_unlock(&dentry->d_lock);
69557+ if (!error)
69558+ error = prepend(buffer, buflen, "/", 1);
69559+ if (error)
69560+ break;
69561+
69562+ slash = true;
69563+ dentry = parent;
69564+ }
69565+
69566+out:
69567+ if (!error && !slash)
69568+ error = prepend(buffer, buflen, "/", 1);
69569+
69570+ return error;
69571+}
69572+
69573+/* this must be called with mount_lock and rename_lock held */
69574+
69575+static char *__our_d_path(const struct path *path, struct path *root,
69576+ char *buf, int buflen)
69577+{
69578+ char *res = buf + buflen;
69579+ int error;
69580+
69581+ prepend(&res, &buflen, "\0", 1);
69582+ error = prepend_path(path, root, &res, &buflen);
69583+ if (error)
69584+ return ERR_PTR(error);
69585+
69586+ return res;
69587+}
69588+
69589+static char *
69590+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69591+{
69592+ char *retval;
69593+
69594+ retval = __our_d_path(path, root, buf, buflen);
69595+ if (unlikely(IS_ERR(retval)))
69596+ retval = strcpy(buf, "<path too long>");
69597+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69598+ retval[1] = '\0';
69599+
69600+ return retval;
69601+}
69602+
69603+static char *
69604+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69605+ char *buf, int buflen)
69606+{
69607+ struct path path;
69608+ char *res;
69609+
69610+ path.dentry = (struct dentry *)dentry;
69611+ path.mnt = (struct vfsmount *)vfsmnt;
69612+
69613+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69614+ by the RBAC system */
69615+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69616+
69617+ return res;
69618+}
69619+
69620+static char *
69621+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69622+ char *buf, int buflen)
69623+{
69624+ char *res;
69625+ struct path path;
69626+ struct path root;
69627+ struct task_struct *reaper = init_pid_ns.child_reaper;
69628+
69629+ path.dentry = (struct dentry *)dentry;
69630+ path.mnt = (struct vfsmount *)vfsmnt;
69631+
69632+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69633+ get_fs_root(reaper->fs, &root);
69634+
69635+ read_seqlock_excl(&mount_lock);
69636+ write_seqlock(&rename_lock);
69637+ res = gen_full_path(&path, &root, buf, buflen);
69638+ write_sequnlock(&rename_lock);
69639+ read_sequnlock_excl(&mount_lock);
69640+
69641+ path_put(&root);
69642+ return res;
69643+}
69644+
69645+char *
69646+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69647+{
69648+ char *ret;
69649+ read_seqlock_excl(&mount_lock);
69650+ write_seqlock(&rename_lock);
69651+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69652+ PAGE_SIZE);
69653+ write_sequnlock(&rename_lock);
69654+ read_sequnlock_excl(&mount_lock);
69655+ return ret;
69656+}
69657+
69658+static char *
69659+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69660+{
69661+ char *ret;
69662+ char *buf;
69663+ int buflen;
69664+
69665+ read_seqlock_excl(&mount_lock);
69666+ write_seqlock(&rename_lock);
69667+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69668+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69669+ buflen = (int)(ret - buf);
69670+ if (buflen >= 5)
69671+ prepend(&ret, &buflen, "/proc", 5);
69672+ else
69673+ ret = strcpy(buf, "<path too long>");
69674+ write_sequnlock(&rename_lock);
69675+ read_sequnlock_excl(&mount_lock);
69676+ return ret;
69677+}
69678+
69679+char *
69680+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69681+{
69682+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69683+ PAGE_SIZE);
69684+}
69685+
69686+char *
69687+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69688+{
69689+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69690+ PAGE_SIZE);
69691+}
69692+
69693+char *
69694+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69695+{
69696+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69697+ PAGE_SIZE);
69698+}
69699+
69700+char *
69701+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69702+{
69703+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69704+ PAGE_SIZE);
69705+}
69706+
69707+char *
69708+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69709+{
69710+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69711+ PAGE_SIZE);
69712+}
69713+
69714+__inline__ __u32
69715+to_gr_audit(const __u32 reqmode)
69716+{
69717+ /* masks off auditable permission flags, then shifts them to create
69718+ auditing flags, and adds the special case of append auditing if
69719+ we're requesting write */
69720+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69721+}
69722+
69723+struct acl_role_label *
69724+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69725+ const gid_t gid)
69726+{
69727+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69728+ struct acl_role_label *match;
69729+ struct role_allowed_ip *ipp;
69730+ unsigned int x;
69731+ u32 curr_ip = task->signal->saved_ip;
69732+
69733+ match = state->acl_role_set.r_hash[index];
69734+
69735+ while (match) {
69736+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69737+ for (x = 0; x < match->domain_child_num; x++) {
69738+ if (match->domain_children[x] == uid)
69739+ goto found;
69740+ }
69741+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69742+ break;
69743+ match = match->next;
69744+ }
69745+found:
69746+ if (match == NULL) {
69747+ try_group:
69748+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69749+ match = state->acl_role_set.r_hash[index];
69750+
69751+ while (match) {
69752+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69753+ for (x = 0; x < match->domain_child_num; x++) {
69754+ if (match->domain_children[x] == gid)
69755+ goto found2;
69756+ }
69757+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69758+ break;
69759+ match = match->next;
69760+ }
69761+found2:
69762+ if (match == NULL)
69763+ match = state->default_role;
69764+ if (match->allowed_ips == NULL)
69765+ return match;
69766+ else {
69767+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69768+ if (likely
69769+ ((ntohl(curr_ip) & ipp->netmask) ==
69770+ (ntohl(ipp->addr) & ipp->netmask)))
69771+ return match;
69772+ }
69773+ match = state->default_role;
69774+ }
69775+ } else if (match->allowed_ips == NULL) {
69776+ return match;
69777+ } else {
69778+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69779+ if (likely
69780+ ((ntohl(curr_ip) & ipp->netmask) ==
69781+ (ntohl(ipp->addr) & ipp->netmask)))
69782+ return match;
69783+ }
69784+ goto try_group;
69785+ }
69786+
69787+ return match;
69788+}
69789+
69790+static struct acl_role_label *
69791+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69792+ const gid_t gid)
69793+{
69794+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69795+}
69796+
69797+struct acl_subject_label *
69798+lookup_acl_subj_label(const u64 ino, const dev_t dev,
69799+ const struct acl_role_label *role)
69800+{
69801+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69802+ struct acl_subject_label *match;
69803+
69804+ match = role->subj_hash[index];
69805+
69806+ while (match && (match->inode != ino || match->device != dev ||
69807+ (match->mode & GR_DELETED))) {
69808+ match = match->next;
69809+ }
69810+
69811+ if (match && !(match->mode & GR_DELETED))
69812+ return match;
69813+ else
69814+ return NULL;
69815+}
69816+
69817+struct acl_subject_label *
69818+lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev,
69819+ const struct acl_role_label *role)
69820+{
69821+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69822+ struct acl_subject_label *match;
69823+
69824+ match = role->subj_hash[index];
69825+
69826+ while (match && (match->inode != ino || match->device != dev ||
69827+ !(match->mode & GR_DELETED))) {
69828+ match = match->next;
69829+ }
69830+
69831+ if (match && (match->mode & GR_DELETED))
69832+ return match;
69833+ else
69834+ return NULL;
69835+}
69836+
69837+static struct acl_object_label *
69838+lookup_acl_obj_label(const u64 ino, const dev_t dev,
69839+ const struct acl_subject_label *subj)
69840+{
69841+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69842+ struct acl_object_label *match;
69843+
69844+ match = subj->obj_hash[index];
69845+
69846+ while (match && (match->inode != ino || match->device != dev ||
69847+ (match->mode & GR_DELETED))) {
69848+ match = match->next;
69849+ }
69850+
69851+ if (match && !(match->mode & GR_DELETED))
69852+ return match;
69853+ else
69854+ return NULL;
69855+}
69856+
69857+static struct acl_object_label *
69858+lookup_acl_obj_label_create(const u64 ino, const dev_t dev,
69859+ const struct acl_subject_label *subj)
69860+{
69861+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
69862+ struct acl_object_label *match;
69863+
69864+ match = subj->obj_hash[index];
69865+
69866+ while (match && (match->inode != ino || match->device != dev ||
69867+ !(match->mode & GR_DELETED))) {
69868+ match = match->next;
69869+ }
69870+
69871+ if (match && (match->mode & GR_DELETED))
69872+ return match;
69873+
69874+ match = subj->obj_hash[index];
69875+
69876+ while (match && (match->inode != ino || match->device != dev ||
69877+ (match->mode & GR_DELETED))) {
69878+ match = match->next;
69879+ }
69880+
69881+ if (match && !(match->mode & GR_DELETED))
69882+ return match;
69883+ else
69884+ return NULL;
69885+}
69886+
69887+struct name_entry *
69888+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
69889+{
69890+ unsigned int len = strlen(name);
69891+ unsigned int key = full_name_hash(name, len);
69892+ unsigned int index = key % state->name_set.n_size;
69893+ struct name_entry *match;
69894+
69895+ match = state->name_set.n_hash[index];
69896+
69897+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
69898+ match = match->next;
69899+
69900+ return match;
69901+}
69902+
69903+static struct name_entry *
69904+lookup_name_entry(const char *name)
69905+{
69906+ return __lookup_name_entry(&running_polstate, name);
69907+}
69908+
69909+static struct name_entry *
69910+lookup_name_entry_create(const char *name)
69911+{
69912+ unsigned int len = strlen(name);
69913+ unsigned int key = full_name_hash(name, len);
69914+ unsigned int index = key % running_polstate.name_set.n_size;
69915+ struct name_entry *match;
69916+
69917+ match = running_polstate.name_set.n_hash[index];
69918+
69919+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69920+ !match->deleted))
69921+ match = match->next;
69922+
69923+ if (match && match->deleted)
69924+ return match;
69925+
69926+ match = running_polstate.name_set.n_hash[index];
69927+
69928+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
69929+ match->deleted))
69930+ match = match->next;
69931+
69932+ if (match && !match->deleted)
69933+ return match;
69934+ else
69935+ return NULL;
69936+}
69937+
69938+static struct inodev_entry *
69939+lookup_inodev_entry(const u64 ino, const dev_t dev)
69940+{
69941+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
69942+ struct inodev_entry *match;
69943+
69944+ match = running_polstate.inodev_set.i_hash[index];
69945+
69946+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
69947+ match = match->next;
69948+
69949+ return match;
69950+}
69951+
69952+void
69953+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
69954+{
69955+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
69956+ state->inodev_set.i_size);
69957+ struct inodev_entry **curr;
69958+
69959+ entry->prev = NULL;
69960+
69961+ curr = &state->inodev_set.i_hash[index];
69962+ if (*curr != NULL)
69963+ (*curr)->prev = entry;
69964+
69965+ entry->next = *curr;
69966+ *curr = entry;
69967+
69968+ return;
69969+}
69970+
69971+static void
69972+insert_inodev_entry(struct inodev_entry *entry)
69973+{
69974+ __insert_inodev_entry(&running_polstate, entry);
69975+}
69976+
69977+void
69978+insert_acl_obj_label(struct acl_object_label *obj,
69979+ struct acl_subject_label *subj)
69980+{
69981+ unsigned int index =
69982+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
69983+ struct acl_object_label **curr;
69984+
69985+ obj->prev = NULL;
69986+
69987+ curr = &subj->obj_hash[index];
69988+ if (*curr != NULL)
69989+ (*curr)->prev = obj;
69990+
69991+ obj->next = *curr;
69992+ *curr = obj;
69993+
69994+ return;
69995+}
69996+
69997+void
69998+insert_acl_subj_label(struct acl_subject_label *obj,
69999+ struct acl_role_label *role)
70000+{
70001+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70002+ struct acl_subject_label **curr;
70003+
70004+ obj->prev = NULL;
70005+
70006+ curr = &role->subj_hash[index];
70007+ if (*curr != NULL)
70008+ (*curr)->prev = obj;
70009+
70010+ obj->next = *curr;
70011+ *curr = obj;
70012+
70013+ return;
70014+}
70015+
70016+/* derived from glibc fnmatch() 0: match, 1: no match*/
70017+
70018+static int
70019+glob_match(const char *p, const char *n)
70020+{
70021+ char c;
70022+
70023+ while ((c = *p++) != '\0') {
70024+ switch (c) {
70025+ case '?':
70026+ if (*n == '\0')
70027+ return 1;
70028+ else if (*n == '/')
70029+ return 1;
70030+ break;
70031+ case '\\':
70032+ if (*n != c)
70033+ return 1;
70034+ break;
70035+ case '*':
70036+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70037+ if (*n == '/')
70038+ return 1;
70039+ else if (c == '?') {
70040+ if (*n == '\0')
70041+ return 1;
70042+ else
70043+ ++n;
70044+ }
70045+ }
70046+ if (c == '\0') {
70047+ return 0;
70048+ } else {
70049+ const char *endp;
70050+
70051+ if ((endp = strchr(n, '/')) == NULL)
70052+ endp = n + strlen(n);
70053+
70054+ if (c == '[') {
70055+ for (--p; n < endp; ++n)
70056+ if (!glob_match(p, n))
70057+ return 0;
70058+ } else if (c == '/') {
70059+ while (*n != '\0' && *n != '/')
70060+ ++n;
70061+ if (*n == '/' && !glob_match(p, n + 1))
70062+ return 0;
70063+ } else {
70064+ for (--p; n < endp; ++n)
70065+ if (*n == c && !glob_match(p, n))
70066+ return 0;
70067+ }
70068+
70069+ return 1;
70070+ }
70071+ case '[':
70072+ {
70073+ int not;
70074+ char cold;
70075+
70076+ if (*n == '\0' || *n == '/')
70077+ return 1;
70078+
70079+ not = (*p == '!' || *p == '^');
70080+ if (not)
70081+ ++p;
70082+
70083+ c = *p++;
70084+ for (;;) {
70085+ unsigned char fn = (unsigned char)*n;
70086+
70087+ if (c == '\0')
70088+ return 1;
70089+ else {
70090+ if (c == fn)
70091+ goto matched;
70092+ cold = c;
70093+ c = *p++;
70094+
70095+ if (c == '-' && *p != ']') {
70096+ unsigned char cend = *p++;
70097+
70098+ if (cend == '\0')
70099+ return 1;
70100+
70101+ if (cold <= fn && fn <= cend)
70102+ goto matched;
70103+
70104+ c = *p++;
70105+ }
70106+ }
70107+
70108+ if (c == ']')
70109+ break;
70110+ }
70111+ if (!not)
70112+ return 1;
70113+ break;
70114+ matched:
70115+ while (c != ']') {
70116+ if (c == '\0')
70117+ return 1;
70118+
70119+ c = *p++;
70120+ }
70121+ if (not)
70122+ return 1;
70123+ }
70124+ break;
70125+ default:
70126+ if (c != *n)
70127+ return 1;
70128+ }
70129+
70130+ ++n;
70131+ }
70132+
70133+ if (*n == '\0')
70134+ return 0;
70135+
70136+ if (*n == '/')
70137+ return 0;
70138+
70139+ return 1;
70140+}
70141+
70142+static struct acl_object_label *
70143+chk_glob_label(struct acl_object_label *globbed,
70144+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70145+{
70146+ struct acl_object_label *tmp;
70147+
70148+ if (*path == NULL)
70149+ *path = gr_to_filename_nolock(dentry, mnt);
70150+
70151+ tmp = globbed;
70152+
70153+ while (tmp) {
70154+ if (!glob_match(tmp->filename, *path))
70155+ return tmp;
70156+ tmp = tmp->next;
70157+ }
70158+
70159+ return NULL;
70160+}
70161+
70162+static struct acl_object_label *
70163+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70164+ const u64 curr_ino, const dev_t curr_dev,
70165+ const struct acl_subject_label *subj, char **path, const int checkglob)
70166+{
70167+ struct acl_subject_label *tmpsubj;
70168+ struct acl_object_label *retval;
70169+ struct acl_object_label *retval2;
70170+
70171+ tmpsubj = (struct acl_subject_label *) subj;
70172+ read_lock(&gr_inode_lock);
70173+ do {
70174+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70175+ if (retval) {
70176+ if (checkglob && retval->globbed) {
70177+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70178+ if (retval2)
70179+ retval = retval2;
70180+ }
70181+ break;
70182+ }
70183+ } while ((tmpsubj = tmpsubj->parent_subject));
70184+ read_unlock(&gr_inode_lock);
70185+
70186+ return retval;
70187+}
70188+
70189+static __inline__ struct acl_object_label *
70190+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70191+ struct dentry *curr_dentry,
70192+ const struct acl_subject_label *subj, char **path, const int checkglob)
70193+{
70194+ int newglob = checkglob;
70195+ u64 inode;
70196+ dev_t device;
70197+
70198+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70199+ as we don't want a / * rule to match instead of the / object
70200+ don't do this for create lookups that call this function though, since they're looking up
70201+ on the parent and thus need globbing checks on all paths
70202+ */
70203+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70204+ newglob = GR_NO_GLOB;
70205+
70206+ spin_lock(&curr_dentry->d_lock);
70207+ inode = __get_ino(curr_dentry);
70208+ device = __get_dev(curr_dentry);
70209+ spin_unlock(&curr_dentry->d_lock);
70210+
70211+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70212+}
70213+
70214+#ifdef CONFIG_HUGETLBFS
70215+static inline bool
70216+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70217+{
70218+ int i;
70219+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70220+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70221+ return true;
70222+ }
70223+
70224+ return false;
70225+}
70226+#endif
70227+
70228+static struct acl_object_label *
70229+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70230+ const struct acl_subject_label *subj, char *path, const int checkglob)
70231+{
70232+ struct dentry *dentry = (struct dentry *) l_dentry;
70233+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70234+ struct mount *real_mnt = real_mount(mnt);
70235+ struct acl_object_label *retval;
70236+ struct dentry *parent;
70237+
70238+ read_seqlock_excl(&mount_lock);
70239+ write_seqlock(&rename_lock);
70240+
70241+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70242+#ifdef CONFIG_NET
70243+ mnt == sock_mnt ||
70244+#endif
70245+#ifdef CONFIG_HUGETLBFS
70246+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70247+#endif
70248+ /* ignore Eric Biederman */
70249+ IS_PRIVATE(l_dentry->d_inode))) {
70250+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70251+ goto out;
70252+ }
70253+
70254+ for (;;) {
70255+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70256+ break;
70257+
70258+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70259+ if (!mnt_has_parent(real_mnt))
70260+ break;
70261+
70262+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70263+ if (retval != NULL)
70264+ goto out;
70265+
70266+ dentry = real_mnt->mnt_mountpoint;
70267+ real_mnt = real_mnt->mnt_parent;
70268+ mnt = &real_mnt->mnt;
70269+ continue;
70270+ }
70271+
70272+ parent = dentry->d_parent;
70273+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70274+ if (retval != NULL)
70275+ goto out;
70276+
70277+ dentry = parent;
70278+ }
70279+
70280+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70281+
70282+ /* gr_real_root is pinned so we don't have to hold a reference */
70283+ if (retval == NULL)
70284+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70285+out:
70286+ write_sequnlock(&rename_lock);
70287+ read_sequnlock_excl(&mount_lock);
70288+
70289+ BUG_ON(retval == NULL);
70290+
70291+ return retval;
70292+}
70293+
70294+static __inline__ struct acl_object_label *
70295+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70296+ const struct acl_subject_label *subj)
70297+{
70298+ char *path = NULL;
70299+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70300+}
70301+
70302+static __inline__ struct acl_object_label *
70303+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70304+ const struct acl_subject_label *subj)
70305+{
70306+ char *path = NULL;
70307+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70308+}
70309+
70310+static __inline__ struct acl_object_label *
70311+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70312+ const struct acl_subject_label *subj, char *path)
70313+{
70314+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70315+}
70316+
70317+struct acl_subject_label *
70318+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70319+ const struct acl_role_label *role)
70320+{
70321+ struct dentry *dentry = (struct dentry *) l_dentry;
70322+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70323+ struct mount *real_mnt = real_mount(mnt);
70324+ struct acl_subject_label *retval;
70325+ struct dentry *parent;
70326+
70327+ read_seqlock_excl(&mount_lock);
70328+ write_seqlock(&rename_lock);
70329+
70330+ for (;;) {
70331+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70332+ break;
70333+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70334+ if (!mnt_has_parent(real_mnt))
70335+ break;
70336+
70337+ spin_lock(&dentry->d_lock);
70338+ read_lock(&gr_inode_lock);
70339+ retval =
70340+ lookup_acl_subj_label(__get_ino(dentry),
70341+ __get_dev(dentry), role);
70342+ read_unlock(&gr_inode_lock);
70343+ spin_unlock(&dentry->d_lock);
70344+ if (retval != NULL)
70345+ goto out;
70346+
70347+ dentry = real_mnt->mnt_mountpoint;
70348+ real_mnt = real_mnt->mnt_parent;
70349+ mnt = &real_mnt->mnt;
70350+ continue;
70351+ }
70352+
70353+ spin_lock(&dentry->d_lock);
70354+ read_lock(&gr_inode_lock);
70355+ retval = lookup_acl_subj_label(__get_ino(dentry),
70356+ __get_dev(dentry), role);
70357+ read_unlock(&gr_inode_lock);
70358+ parent = dentry->d_parent;
70359+ spin_unlock(&dentry->d_lock);
70360+
70361+ if (retval != NULL)
70362+ goto out;
70363+
70364+ dentry = parent;
70365+ }
70366+
70367+ spin_lock(&dentry->d_lock);
70368+ read_lock(&gr_inode_lock);
70369+ retval = lookup_acl_subj_label(__get_ino(dentry),
70370+ __get_dev(dentry), role);
70371+ read_unlock(&gr_inode_lock);
70372+ spin_unlock(&dentry->d_lock);
70373+
70374+ if (unlikely(retval == NULL)) {
70375+ /* gr_real_root is pinned, we don't need to hold a reference */
70376+ read_lock(&gr_inode_lock);
70377+ retval = lookup_acl_subj_label(__get_ino(gr_real_root.dentry),
70378+ __get_dev(gr_real_root.dentry), role);
70379+ read_unlock(&gr_inode_lock);
70380+ }
70381+out:
70382+ write_sequnlock(&rename_lock);
70383+ read_sequnlock_excl(&mount_lock);
70384+
70385+ BUG_ON(retval == NULL);
70386+
70387+ return retval;
70388+}
70389+
70390+void
70391+assign_special_role(const char *rolename)
70392+{
70393+ struct acl_object_label *obj;
70394+ struct acl_role_label *r;
70395+ struct acl_role_label *assigned = NULL;
70396+ struct task_struct *tsk;
70397+ struct file *filp;
70398+
70399+ FOR_EACH_ROLE_START(r)
70400+ if (!strcmp(rolename, r->rolename) &&
70401+ (r->roletype & GR_ROLE_SPECIAL)) {
70402+ assigned = r;
70403+ break;
70404+ }
70405+ FOR_EACH_ROLE_END(r)
70406+
70407+ if (!assigned)
70408+ return;
70409+
70410+ read_lock(&tasklist_lock);
70411+ read_lock(&grsec_exec_file_lock);
70412+
70413+ tsk = current->real_parent;
70414+ if (tsk == NULL)
70415+ goto out_unlock;
70416+
70417+ filp = tsk->exec_file;
70418+ if (filp == NULL)
70419+ goto out_unlock;
70420+
70421+ tsk->is_writable = 0;
70422+ tsk->inherited = 0;
70423+
70424+ tsk->acl_sp_role = 1;
70425+ tsk->acl_role_id = ++acl_sp_role_value;
70426+ tsk->role = assigned;
70427+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70428+
70429+ /* ignore additional mmap checks for processes that are writable
70430+ by the default ACL */
70431+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70432+ if (unlikely(obj->mode & GR_WRITE))
70433+ tsk->is_writable = 1;
70434+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70435+ if (unlikely(obj->mode & GR_WRITE))
70436+ tsk->is_writable = 1;
70437+
70438+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70439+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70440+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70441+#endif
70442+
70443+out_unlock:
70444+ read_unlock(&grsec_exec_file_lock);
70445+ read_unlock(&tasklist_lock);
70446+ return;
70447+}
70448+
70449+
70450+static void
70451+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70452+{
70453+ struct task_struct *task = current;
70454+ const struct cred *cred = current_cred();
70455+
70456+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70457+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70458+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70459+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70460+
70461+ return;
70462+}
70463+
70464+static void
70465+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70466+{
70467+ struct task_struct *task = current;
70468+ const struct cred *cred = current_cred();
70469+
70470+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70471+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70472+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70473+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70474+
70475+ return;
70476+}
70477+
70478+static void
70479+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70480+{
70481+ struct task_struct *task = current;
70482+ const struct cred *cred = current_cred();
70483+
70484+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70485+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70486+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70487+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70488+
70489+ return;
70490+}
70491+
70492+static void
70493+gr_set_proc_res(struct task_struct *task)
70494+{
70495+ struct acl_subject_label *proc;
70496+ unsigned short i;
70497+
70498+ proc = task->acl;
70499+
70500+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70501+ return;
70502+
70503+ for (i = 0; i < RLIM_NLIMITS; i++) {
70504+ unsigned long rlim_cur, rlim_max;
70505+
70506+ if (!(proc->resmask & (1U << i)))
70507+ continue;
70508+
70509+ rlim_cur = proc->res[i].rlim_cur;
70510+ rlim_max = proc->res[i].rlim_max;
70511+
70512+ if (i == RLIMIT_NOFILE) {
70513+ unsigned long saved_sysctl_nr_open = sysctl_nr_open;
70514+ if (rlim_cur > saved_sysctl_nr_open)
70515+ rlim_cur = saved_sysctl_nr_open;
70516+ if (rlim_max > saved_sysctl_nr_open)
70517+ rlim_max = saved_sysctl_nr_open;
70518+ }
70519+
70520+ task->signal->rlim[i].rlim_cur = rlim_cur;
70521+ task->signal->rlim[i].rlim_max = rlim_max;
70522+
70523+ if (i == RLIMIT_CPU)
70524+ update_rlimit_cpu(task, rlim_cur);
70525+ }
70526+
70527+ return;
70528+}
70529+
70530+/* both of the below must be called with
70531+ rcu_read_lock();
70532+ read_lock(&tasklist_lock);
70533+ read_lock(&grsec_exec_file_lock);
70534+ except in the case of gr_set_role_label() (for __gr_get_subject_for_task)
70535+*/
70536+
70537+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback)
70538+{
70539+ char *tmpname;
70540+ struct acl_subject_label *tmpsubj;
70541+ struct file *filp;
70542+ struct name_entry *nmatch;
70543+
70544+ filp = task->exec_file;
70545+ if (filp == NULL)
70546+ return NULL;
70547+
70548+ /* the following is to apply the correct subject
70549+ on binaries running when the RBAC system
70550+ is enabled, when the binaries have been
70551+ replaced or deleted since their execution
70552+ -----
70553+ when the RBAC system starts, the inode/dev
70554+ from exec_file will be one the RBAC system
70555+ is unaware of. It only knows the inode/dev
70556+ of the present file on disk, or the absence
70557+ of it.
70558+ */
70559+
70560+ if (filename)
70561+ nmatch = __lookup_name_entry(state, filename);
70562+ else {
70563+ preempt_disable();
70564+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70565+
70566+ nmatch = __lookup_name_entry(state, tmpname);
70567+ preempt_enable();
70568+ }
70569+ tmpsubj = NULL;
70570+ if (nmatch) {
70571+ if (nmatch->deleted)
70572+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70573+ else
70574+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70575+ }
70576+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70577+ then we fall back to a normal lookup based on the binary's ino/dev
70578+ */
70579+ if (tmpsubj == NULL && fallback)
70580+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70581+
70582+ return tmpsubj;
70583+}
70584+
70585+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename, int fallback)
70586+{
70587+ return __gr_get_subject_for_task(&running_polstate, task, filename, fallback);
70588+}
70589+
70590+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70591+{
70592+ struct acl_object_label *obj;
70593+ struct file *filp;
70594+
70595+ filp = task->exec_file;
70596+
70597+ task->acl = subj;
70598+ task->is_writable = 0;
70599+ /* ignore additional mmap checks for processes that are writable
70600+ by the default ACL */
70601+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70602+ if (unlikely(obj->mode & GR_WRITE))
70603+ task->is_writable = 1;
70604+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70605+ if (unlikely(obj->mode & GR_WRITE))
70606+ task->is_writable = 1;
70607+
70608+ gr_set_proc_res(task);
70609+
70610+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70611+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70612+#endif
70613+}
70614+
70615+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70616+{
70617+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70618+}
70619+
70620+__u32
70621+gr_search_file(const struct dentry * dentry, const __u32 mode,
70622+ const struct vfsmount * mnt)
70623+{
70624+ __u32 retval = mode;
70625+ struct acl_subject_label *curracl;
70626+ struct acl_object_label *currobj;
70627+
70628+ if (unlikely(!(gr_status & GR_READY)))
70629+ return (mode & ~GR_AUDITS);
70630+
70631+ curracl = current->acl;
70632+
70633+ currobj = chk_obj_label(dentry, mnt, curracl);
70634+ retval = currobj->mode & mode;
70635+
70636+ /* if we're opening a specified transfer file for writing
70637+ (e.g. /dev/initctl), then transfer our role to init
70638+ */
70639+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70640+ current->role->roletype & GR_ROLE_PERSIST)) {
70641+ struct task_struct *task = init_pid_ns.child_reaper;
70642+
70643+ if (task->role != current->role) {
70644+ struct acl_subject_label *subj;
70645+
70646+ task->acl_sp_role = 0;
70647+ task->acl_role_id = current->acl_role_id;
70648+ task->role = current->role;
70649+ rcu_read_lock();
70650+ read_lock(&grsec_exec_file_lock);
70651+ subj = gr_get_subject_for_task(task, NULL, 1);
70652+ gr_apply_subject_to_task(task, subj);
70653+ read_unlock(&grsec_exec_file_lock);
70654+ rcu_read_unlock();
70655+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70656+ }
70657+ }
70658+
70659+ if (unlikely
70660+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70661+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70662+ __u32 new_mode = mode;
70663+
70664+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70665+
70666+ retval = new_mode;
70667+
70668+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70669+ new_mode |= GR_INHERIT;
70670+
70671+ if (!(mode & GR_NOLEARN))
70672+ gr_log_learn(dentry, mnt, new_mode);
70673+ }
70674+
70675+ return retval;
70676+}
70677+
70678+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70679+ const struct dentry *parent,
70680+ const struct vfsmount *mnt)
70681+{
70682+ struct name_entry *match;
70683+ struct acl_object_label *matchpo;
70684+ struct acl_subject_label *curracl;
70685+ char *path;
70686+
70687+ if (unlikely(!(gr_status & GR_READY)))
70688+ return NULL;
70689+
70690+ preempt_disable();
70691+ path = gr_to_filename_rbac(new_dentry, mnt);
70692+ match = lookup_name_entry_create(path);
70693+
70694+ curracl = current->acl;
70695+
70696+ if (match) {
70697+ read_lock(&gr_inode_lock);
70698+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70699+ read_unlock(&gr_inode_lock);
70700+
70701+ if (matchpo) {
70702+ preempt_enable();
70703+ return matchpo;
70704+ }
70705+ }
70706+
70707+ // lookup parent
70708+
70709+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70710+
70711+ preempt_enable();
70712+ return matchpo;
70713+}
70714+
70715+__u32
70716+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70717+ const struct vfsmount * mnt, const __u32 mode)
70718+{
70719+ struct acl_object_label *matchpo;
70720+ __u32 retval;
70721+
70722+ if (unlikely(!(gr_status & GR_READY)))
70723+ return (mode & ~GR_AUDITS);
70724+
70725+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70726+
70727+ retval = matchpo->mode & mode;
70728+
70729+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70730+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70731+ __u32 new_mode = mode;
70732+
70733+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70734+
70735+ gr_log_learn(new_dentry, mnt, new_mode);
70736+ return new_mode;
70737+ }
70738+
70739+ return retval;
70740+}
70741+
70742+__u32
70743+gr_check_link(const struct dentry * new_dentry,
70744+ const struct dentry * parent_dentry,
70745+ const struct vfsmount * parent_mnt,
70746+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70747+{
70748+ struct acl_object_label *obj;
70749+ __u32 oldmode, newmode;
70750+ __u32 needmode;
70751+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70752+ GR_DELETE | GR_INHERIT;
70753+
70754+ if (unlikely(!(gr_status & GR_READY)))
70755+ return (GR_CREATE | GR_LINK);
70756+
70757+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70758+ oldmode = obj->mode;
70759+
70760+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70761+ newmode = obj->mode;
70762+
70763+ needmode = newmode & checkmodes;
70764+
70765+ // old name for hardlink must have at least the permissions of the new name
70766+ if ((oldmode & needmode) != needmode)
70767+ goto bad;
70768+
70769+ // if old name had restrictions/auditing, make sure the new name does as well
70770+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70771+
70772+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70773+ if (is_privileged_binary(old_dentry))
70774+ needmode |= GR_SETID;
70775+
70776+ if ((newmode & needmode) != needmode)
70777+ goto bad;
70778+
70779+ // enforce minimum permissions
70780+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70781+ return newmode;
70782+bad:
70783+ needmode = oldmode;
70784+ if (is_privileged_binary(old_dentry))
70785+ needmode |= GR_SETID;
70786+
70787+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70788+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70789+ return (GR_CREATE | GR_LINK);
70790+ } else if (newmode & GR_SUPPRESS)
70791+ return GR_SUPPRESS;
70792+ else
70793+ return 0;
70794+}
70795+
70796+int
70797+gr_check_hidden_task(const struct task_struct *task)
70798+{
70799+ if (unlikely(!(gr_status & GR_READY)))
70800+ return 0;
70801+
70802+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70803+ return 1;
70804+
70805+ return 0;
70806+}
70807+
70808+int
70809+gr_check_protected_task(const struct task_struct *task)
70810+{
70811+ if (unlikely(!(gr_status & GR_READY) || !task))
70812+ return 0;
70813+
70814+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70815+ task->acl != current->acl)
70816+ return 1;
70817+
70818+ return 0;
70819+}
70820+
70821+int
70822+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70823+{
70824+ struct task_struct *p;
70825+ int ret = 0;
70826+
70827+ if (unlikely(!(gr_status & GR_READY) || !pid))
70828+ return ret;
70829+
70830+ read_lock(&tasklist_lock);
70831+ do_each_pid_task(pid, type, p) {
70832+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70833+ p->acl != current->acl) {
70834+ ret = 1;
70835+ goto out;
70836+ }
70837+ } while_each_pid_task(pid, type, p);
70838+out:
70839+ read_unlock(&tasklist_lock);
70840+
70841+ return ret;
70842+}
70843+
70844+void
70845+gr_copy_label(struct task_struct *tsk)
70846+{
70847+ struct task_struct *p = current;
70848+
70849+ tsk->inherited = p->inherited;
70850+ tsk->acl_sp_role = 0;
70851+ tsk->acl_role_id = p->acl_role_id;
70852+ tsk->acl = p->acl;
70853+ tsk->role = p->role;
70854+ tsk->signal->used_accept = 0;
70855+ tsk->signal->curr_ip = p->signal->curr_ip;
70856+ tsk->signal->saved_ip = p->signal->saved_ip;
70857+ if (p->exec_file)
70858+ get_file(p->exec_file);
70859+ tsk->exec_file = p->exec_file;
70860+ tsk->is_writable = p->is_writable;
70861+ if (unlikely(p->signal->used_accept)) {
70862+ p->signal->curr_ip = 0;
70863+ p->signal->saved_ip = 0;
70864+ }
70865+
70866+ return;
70867+}
70868+
70869+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
70870+
70871+int
70872+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70873+{
70874+ unsigned int i;
70875+ __u16 num;
70876+ uid_t *uidlist;
70877+ uid_t curuid;
70878+ int realok = 0;
70879+ int effectiveok = 0;
70880+ int fsok = 0;
70881+ uid_t globalreal, globaleffective, globalfs;
70882+
70883+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
70884+ struct user_struct *user;
70885+
70886+ if (!uid_valid(real))
70887+ goto skipit;
70888+
70889+ /* find user based on global namespace */
70890+
70891+ globalreal = GR_GLOBAL_UID(real);
70892+
70893+ user = find_user(make_kuid(&init_user_ns, globalreal));
70894+ if (user == NULL)
70895+ goto skipit;
70896+
70897+ if (gr_process_kernel_setuid_ban(user)) {
70898+ /* for find_user */
70899+ free_uid(user);
70900+ return 1;
70901+ }
70902+
70903+ /* for find_user */
70904+ free_uid(user);
70905+
70906+skipit:
70907+#endif
70908+
70909+ if (unlikely(!(gr_status & GR_READY)))
70910+ return 0;
70911+
70912+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70913+ gr_log_learn_uid_change(real, effective, fs);
70914+
70915+ num = current->acl->user_trans_num;
70916+ uidlist = current->acl->user_transitions;
70917+
70918+ if (uidlist == NULL)
70919+ return 0;
70920+
70921+ if (!uid_valid(real)) {
70922+ realok = 1;
70923+ globalreal = (uid_t)-1;
70924+ } else {
70925+ globalreal = GR_GLOBAL_UID(real);
70926+ }
70927+ if (!uid_valid(effective)) {
70928+ effectiveok = 1;
70929+ globaleffective = (uid_t)-1;
70930+ } else {
70931+ globaleffective = GR_GLOBAL_UID(effective);
70932+ }
70933+ if (!uid_valid(fs)) {
70934+ fsok = 1;
70935+ globalfs = (uid_t)-1;
70936+ } else {
70937+ globalfs = GR_GLOBAL_UID(fs);
70938+ }
70939+
70940+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
70941+ for (i = 0; i < num; i++) {
70942+ curuid = uidlist[i];
70943+ if (globalreal == curuid)
70944+ realok = 1;
70945+ if (globaleffective == curuid)
70946+ effectiveok = 1;
70947+ if (globalfs == curuid)
70948+ fsok = 1;
70949+ }
70950+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
70951+ for (i = 0; i < num; i++) {
70952+ curuid = uidlist[i];
70953+ if (globalreal == curuid)
70954+ break;
70955+ if (globaleffective == curuid)
70956+ break;
70957+ if (globalfs == curuid)
70958+ break;
70959+ }
70960+ /* not in deny list */
70961+ if (i == num) {
70962+ realok = 1;
70963+ effectiveok = 1;
70964+ fsok = 1;
70965+ }
70966+ }
70967+
70968+ if (realok && effectiveok && fsok)
70969+ return 0;
70970+ else {
70971+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
70972+ return 1;
70973+ }
70974+}
70975+
70976+int
70977+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70978+{
70979+ unsigned int i;
70980+ __u16 num;
70981+ gid_t *gidlist;
70982+ gid_t curgid;
70983+ int realok = 0;
70984+ int effectiveok = 0;
70985+ int fsok = 0;
70986+ gid_t globalreal, globaleffective, globalfs;
70987+
70988+ if (unlikely(!(gr_status & GR_READY)))
70989+ return 0;
70990+
70991+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
70992+ gr_log_learn_gid_change(real, effective, fs);
70993+
70994+ num = current->acl->group_trans_num;
70995+ gidlist = current->acl->group_transitions;
70996+
70997+ if (gidlist == NULL)
70998+ return 0;
70999+
71000+ if (!gid_valid(real)) {
71001+ realok = 1;
71002+ globalreal = (gid_t)-1;
71003+ } else {
71004+ globalreal = GR_GLOBAL_GID(real);
71005+ }
71006+ if (!gid_valid(effective)) {
71007+ effectiveok = 1;
71008+ globaleffective = (gid_t)-1;
71009+ } else {
71010+ globaleffective = GR_GLOBAL_GID(effective);
71011+ }
71012+ if (!gid_valid(fs)) {
71013+ fsok = 1;
71014+ globalfs = (gid_t)-1;
71015+ } else {
71016+ globalfs = GR_GLOBAL_GID(fs);
71017+ }
71018+
71019+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71020+ for (i = 0; i < num; i++) {
71021+ curgid = gidlist[i];
71022+ if (globalreal == curgid)
71023+ realok = 1;
71024+ if (globaleffective == curgid)
71025+ effectiveok = 1;
71026+ if (globalfs == curgid)
71027+ fsok = 1;
71028+ }
71029+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71030+ for (i = 0; i < num; i++) {
71031+ curgid = gidlist[i];
71032+ if (globalreal == curgid)
71033+ break;
71034+ if (globaleffective == curgid)
71035+ break;
71036+ if (globalfs == curgid)
71037+ break;
71038+ }
71039+ /* not in deny list */
71040+ if (i == num) {
71041+ realok = 1;
71042+ effectiveok = 1;
71043+ fsok = 1;
71044+ }
71045+ }
71046+
71047+ if (realok && effectiveok && fsok)
71048+ return 0;
71049+ else {
71050+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71051+ return 1;
71052+ }
71053+}
71054+
71055+extern int gr_acl_is_capable(const int cap);
71056+
71057+void
71058+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71059+{
71060+ struct acl_role_label *role = task->role;
71061+ struct acl_role_label *origrole = role;
71062+ struct acl_subject_label *subj = NULL;
71063+ struct acl_object_label *obj;
71064+ struct file *filp;
71065+ uid_t uid;
71066+ gid_t gid;
71067+
71068+ if (unlikely(!(gr_status & GR_READY)))
71069+ return;
71070+
71071+ uid = GR_GLOBAL_UID(kuid);
71072+ gid = GR_GLOBAL_GID(kgid);
71073+
71074+ filp = task->exec_file;
71075+
71076+ /* kernel process, we'll give them the kernel role */
71077+ if (unlikely(!filp)) {
71078+ task->role = running_polstate.kernel_role;
71079+ task->acl = running_polstate.kernel_role->root_label;
71080+ return;
71081+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71082+ /* save the current ip at time of role lookup so that the proper
71083+ IP will be learned for role_allowed_ip */
71084+ task->signal->saved_ip = task->signal->curr_ip;
71085+ role = lookup_acl_role_label(task, uid, gid);
71086+ }
71087+
71088+ /* don't change the role if we're not a privileged process */
71089+ if (role && task->role != role &&
71090+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71091+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71092+ return;
71093+
71094+ task->role = role;
71095+
71096+ if (task->inherited) {
71097+ /* if we reached our subject through inheritance, then first see
71098+ if there's a subject of the same name in the new role that has
71099+ an object that would result in the same inherited subject
71100+ */
71101+ subj = gr_get_subject_for_task(task, task->acl->filename, 0);
71102+ if (subj) {
71103+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, subj);
71104+ if (!(obj->mode & GR_INHERIT))
71105+ subj = NULL;
71106+ }
71107+
71108+ }
71109+ if (subj == NULL) {
71110+ /* otherwise:
71111+ perform subject lookup in possibly new role
71112+ we can use this result below in the case where role == task->role
71113+ */
71114+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71115+ }
71116+
71117+ /* if we changed uid/gid, but result in the same role
71118+ and are using inheritance, don't lose the inherited subject
71119+ if current subject is other than what normal lookup
71120+ would result in, we arrived via inheritance, don't
71121+ lose subject
71122+ */
71123+ if (role != origrole || (!(task->acl->mode & GR_INHERITLEARN) &&
71124+ (subj == task->acl)))
71125+ task->acl = subj;
71126+
71127+ /* leave task->inherited unaffected */
71128+
71129+ task->is_writable = 0;
71130+
71131+ /* ignore additional mmap checks for processes that are writable
71132+ by the default ACL */
71133+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71134+ if (unlikely(obj->mode & GR_WRITE))
71135+ task->is_writable = 1;
71136+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71137+ if (unlikely(obj->mode & GR_WRITE))
71138+ task->is_writable = 1;
71139+
71140+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71141+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71142+#endif
71143+
71144+ gr_set_proc_res(task);
71145+
71146+ return;
71147+}
71148+
71149+int
71150+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71151+ const int unsafe_flags)
71152+{
71153+ struct task_struct *task = current;
71154+ struct acl_subject_label *newacl;
71155+ struct acl_object_label *obj;
71156+ __u32 retmode;
71157+
71158+ if (unlikely(!(gr_status & GR_READY)))
71159+ return 0;
71160+
71161+ newacl = chk_subj_label(dentry, mnt, task->role);
71162+
71163+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71164+ did an exec
71165+ */
71166+ rcu_read_lock();
71167+ read_lock(&tasklist_lock);
71168+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71169+ (task->parent->acl->mode & GR_POVERRIDE))) {
71170+ read_unlock(&tasklist_lock);
71171+ rcu_read_unlock();
71172+ goto skip_check;
71173+ }
71174+ read_unlock(&tasklist_lock);
71175+ rcu_read_unlock();
71176+
71177+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71178+ !(task->role->roletype & GR_ROLE_GOD) &&
71179+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71180+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71181+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71182+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71183+ else
71184+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71185+ return -EACCES;
71186+ }
71187+
71188+skip_check:
71189+
71190+ obj = chk_obj_label(dentry, mnt, task->acl);
71191+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71192+
71193+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71194+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71195+ if (obj->nested)
71196+ task->acl = obj->nested;
71197+ else
71198+ task->acl = newacl;
71199+ task->inherited = 0;
71200+ } else {
71201+ task->inherited = 1;
71202+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71203+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71204+ }
71205+
71206+ task->is_writable = 0;
71207+
71208+ /* ignore additional mmap checks for processes that are writable
71209+ by the default ACL */
71210+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71211+ if (unlikely(obj->mode & GR_WRITE))
71212+ task->is_writable = 1;
71213+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71214+ if (unlikely(obj->mode & GR_WRITE))
71215+ task->is_writable = 1;
71216+
71217+ gr_set_proc_res(task);
71218+
71219+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71220+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71221+#endif
71222+ return 0;
71223+}
71224+
71225+/* always called with valid inodev ptr */
71226+static void
71227+do_handle_delete(struct inodev_entry *inodev, const u64 ino, const dev_t dev)
71228+{
71229+ struct acl_object_label *matchpo;
71230+ struct acl_subject_label *matchps;
71231+ struct acl_subject_label *subj;
71232+ struct acl_role_label *role;
71233+ unsigned int x;
71234+
71235+ FOR_EACH_ROLE_START(role)
71236+ FOR_EACH_SUBJECT_START(role, subj, x)
71237+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71238+ matchpo->mode |= GR_DELETED;
71239+ FOR_EACH_SUBJECT_END(subj,x)
71240+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71241+ /* nested subjects aren't in the role's subj_hash table */
71242+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71243+ matchpo->mode |= GR_DELETED;
71244+ FOR_EACH_NESTED_SUBJECT_END(subj)
71245+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71246+ matchps->mode |= GR_DELETED;
71247+ FOR_EACH_ROLE_END(role)
71248+
71249+ inodev->nentry->deleted = 1;
71250+
71251+ return;
71252+}
71253+
71254+void
71255+gr_handle_delete(const u64 ino, const dev_t dev)
71256+{
71257+ struct inodev_entry *inodev;
71258+
71259+ if (unlikely(!(gr_status & GR_READY)))
71260+ return;
71261+
71262+ write_lock(&gr_inode_lock);
71263+ inodev = lookup_inodev_entry(ino, dev);
71264+ if (inodev != NULL)
71265+ do_handle_delete(inodev, ino, dev);
71266+ write_unlock(&gr_inode_lock);
71267+
71268+ return;
71269+}
71270+
71271+static void
71272+update_acl_obj_label(const u64 oldinode, const dev_t olddevice,
71273+ const u64 newinode, const dev_t newdevice,
71274+ struct acl_subject_label *subj)
71275+{
71276+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71277+ struct acl_object_label *match;
71278+
71279+ match = subj->obj_hash[index];
71280+
71281+ while (match && (match->inode != oldinode ||
71282+ match->device != olddevice ||
71283+ !(match->mode & GR_DELETED)))
71284+ match = match->next;
71285+
71286+ if (match && (match->inode == oldinode)
71287+ && (match->device == olddevice)
71288+ && (match->mode & GR_DELETED)) {
71289+ if (match->prev == NULL) {
71290+ subj->obj_hash[index] = match->next;
71291+ if (match->next != NULL)
71292+ match->next->prev = NULL;
71293+ } else {
71294+ match->prev->next = match->next;
71295+ if (match->next != NULL)
71296+ match->next->prev = match->prev;
71297+ }
71298+ match->prev = NULL;
71299+ match->next = NULL;
71300+ match->inode = newinode;
71301+ match->device = newdevice;
71302+ match->mode &= ~GR_DELETED;
71303+
71304+ insert_acl_obj_label(match, subj);
71305+ }
71306+
71307+ return;
71308+}
71309+
71310+static void
71311+update_acl_subj_label(const u64 oldinode, const dev_t olddevice,
71312+ const u64 newinode, const dev_t newdevice,
71313+ struct acl_role_label *role)
71314+{
71315+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71316+ struct acl_subject_label *match;
71317+
71318+ match = role->subj_hash[index];
71319+
71320+ while (match && (match->inode != oldinode ||
71321+ match->device != olddevice ||
71322+ !(match->mode & GR_DELETED)))
71323+ match = match->next;
71324+
71325+ if (match && (match->inode == oldinode)
71326+ && (match->device == olddevice)
71327+ && (match->mode & GR_DELETED)) {
71328+ if (match->prev == NULL) {
71329+ role->subj_hash[index] = match->next;
71330+ if (match->next != NULL)
71331+ match->next->prev = NULL;
71332+ } else {
71333+ match->prev->next = match->next;
71334+ if (match->next != NULL)
71335+ match->next->prev = match->prev;
71336+ }
71337+ match->prev = NULL;
71338+ match->next = NULL;
71339+ match->inode = newinode;
71340+ match->device = newdevice;
71341+ match->mode &= ~GR_DELETED;
71342+
71343+ insert_acl_subj_label(match, role);
71344+ }
71345+
71346+ return;
71347+}
71348+
71349+static void
71350+update_inodev_entry(const u64 oldinode, const dev_t olddevice,
71351+ const u64 newinode, const dev_t newdevice)
71352+{
71353+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71354+ struct inodev_entry *match;
71355+
71356+ match = running_polstate.inodev_set.i_hash[index];
71357+
71358+ while (match && (match->nentry->inode != oldinode ||
71359+ match->nentry->device != olddevice || !match->nentry->deleted))
71360+ match = match->next;
71361+
71362+ if (match && (match->nentry->inode == oldinode)
71363+ && (match->nentry->device == olddevice) &&
71364+ match->nentry->deleted) {
71365+ if (match->prev == NULL) {
71366+ running_polstate.inodev_set.i_hash[index] = match->next;
71367+ if (match->next != NULL)
71368+ match->next->prev = NULL;
71369+ } else {
71370+ match->prev->next = match->next;
71371+ if (match->next != NULL)
71372+ match->next->prev = match->prev;
71373+ }
71374+ match->prev = NULL;
71375+ match->next = NULL;
71376+ match->nentry->inode = newinode;
71377+ match->nentry->device = newdevice;
71378+ match->nentry->deleted = 0;
71379+
71380+ insert_inodev_entry(match);
71381+ }
71382+
71383+ return;
71384+}
71385+
71386+static void
71387+__do_handle_create(const struct name_entry *matchn, u64 ino, dev_t dev)
71388+{
71389+ struct acl_subject_label *subj;
71390+ struct acl_role_label *role;
71391+ unsigned int x;
71392+
71393+ FOR_EACH_ROLE_START(role)
71394+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71395+
71396+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71397+ if ((subj->inode == ino) && (subj->device == dev)) {
71398+ subj->inode = ino;
71399+ subj->device = dev;
71400+ }
71401+ /* nested subjects aren't in the role's subj_hash table */
71402+ update_acl_obj_label(matchn->inode, matchn->device,
71403+ ino, dev, subj);
71404+ FOR_EACH_NESTED_SUBJECT_END(subj)
71405+ FOR_EACH_SUBJECT_START(role, subj, x)
71406+ update_acl_obj_label(matchn->inode, matchn->device,
71407+ ino, dev, subj);
71408+ FOR_EACH_SUBJECT_END(subj,x)
71409+ FOR_EACH_ROLE_END(role)
71410+
71411+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71412+
71413+ return;
71414+}
71415+
71416+static void
71417+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71418+ const struct vfsmount *mnt)
71419+{
71420+ u64 ino = __get_ino(dentry);
71421+ dev_t dev = __get_dev(dentry);
71422+
71423+ __do_handle_create(matchn, ino, dev);
71424+
71425+ return;
71426+}
71427+
71428+void
71429+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71430+{
71431+ struct name_entry *matchn;
71432+
71433+ if (unlikely(!(gr_status & GR_READY)))
71434+ return;
71435+
71436+ preempt_disable();
71437+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71438+
71439+ if (unlikely((unsigned long)matchn)) {
71440+ write_lock(&gr_inode_lock);
71441+ do_handle_create(matchn, dentry, mnt);
71442+ write_unlock(&gr_inode_lock);
71443+ }
71444+ preempt_enable();
71445+
71446+ return;
71447+}
71448+
71449+void
71450+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71451+{
71452+ struct name_entry *matchn;
71453+
71454+ if (unlikely(!(gr_status & GR_READY)))
71455+ return;
71456+
71457+ preempt_disable();
71458+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71459+
71460+ if (unlikely((unsigned long)matchn)) {
71461+ write_lock(&gr_inode_lock);
71462+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71463+ write_unlock(&gr_inode_lock);
71464+ }
71465+ preempt_enable();
71466+
71467+ return;
71468+}
71469+
71470+void
71471+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71472+ struct dentry *old_dentry,
71473+ struct dentry *new_dentry,
71474+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71475+{
71476+ struct name_entry *matchn;
71477+ struct name_entry *matchn2 = NULL;
71478+ struct inodev_entry *inodev;
71479+ struct inode *inode = new_dentry->d_inode;
71480+ u64 old_ino = __get_ino(old_dentry);
71481+ dev_t old_dev = __get_dev(old_dentry);
71482+ unsigned int exchange = flags & RENAME_EXCHANGE;
71483+
71484+ /* vfs_rename swaps the name and parent link for old_dentry and
71485+ new_dentry
71486+ at this point, old_dentry has the new name, parent link, and inode
71487+ for the renamed file
71488+ if a file is being replaced by a rename, new_dentry has the inode
71489+ and name for the replaced file
71490+ */
71491+
71492+ if (unlikely(!(gr_status & GR_READY)))
71493+ return;
71494+
71495+ preempt_disable();
71496+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71497+
71498+ /* exchange cases:
71499+ a filename exists for the source, but not dest
71500+ do a recreate on source
71501+ a filename exists for the dest, but not source
71502+ do a recreate on dest
71503+ a filename exists for both source and dest
71504+ delete source and dest, then create source and dest
71505+ a filename exists for neither source nor dest
71506+ no updates needed
71507+
71508+ the name entry lookups get us the old inode/dev associated with
71509+ each name, so do the deletes first (if possible) so that when
71510+ we do the create, we pick up on the right entries
71511+ */
71512+
71513+ if (exchange)
71514+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71515+
71516+ /* we wouldn't have to check d_inode if it weren't for
71517+ NFS silly-renaming
71518+ */
71519+
71520+ write_lock(&gr_inode_lock);
71521+ if (unlikely((replace || exchange) && inode)) {
71522+ u64 new_ino = __get_ino(new_dentry);
71523+ dev_t new_dev = __get_dev(new_dentry);
71524+
71525+ inodev = lookup_inodev_entry(new_ino, new_dev);
71526+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71527+ do_handle_delete(inodev, new_ino, new_dev);
71528+ }
71529+
71530+ inodev = lookup_inodev_entry(old_ino, old_dev);
71531+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71532+ do_handle_delete(inodev, old_ino, old_dev);
71533+
71534+ if (unlikely(matchn != NULL))
71535+ do_handle_create(matchn, old_dentry, mnt);
71536+
71537+ if (unlikely(matchn2 != NULL))
71538+ do_handle_create(matchn2, new_dentry, mnt);
71539+
71540+ write_unlock(&gr_inode_lock);
71541+ preempt_enable();
71542+
71543+ return;
71544+}
71545+
71546+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71547+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71548+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71549+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71550+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71551+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71552+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71553+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71554+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71555+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71556+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71557+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71558+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71559+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71560+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71561+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71562+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71563+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71564+};
71565+
71566+void
71567+gr_learn_resource(const struct task_struct *task,
71568+ const int res, const unsigned long wanted, const int gt)
71569+{
71570+ struct acl_subject_label *acl;
71571+ const struct cred *cred;
71572+
71573+ if (unlikely((gr_status & GR_READY) &&
71574+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71575+ goto skip_reslog;
71576+
71577+ gr_log_resource(task, res, wanted, gt);
71578+skip_reslog:
71579+
71580+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71581+ return;
71582+
71583+ acl = task->acl;
71584+
71585+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71586+ !(acl->resmask & (1U << (unsigned short) res))))
71587+ return;
71588+
71589+ if (wanted >= acl->res[res].rlim_cur) {
71590+ unsigned long res_add;
71591+
71592+ res_add = wanted + res_learn_bumps[res];
71593+
71594+ acl->res[res].rlim_cur = res_add;
71595+
71596+ if (wanted > acl->res[res].rlim_max)
71597+ acl->res[res].rlim_max = res_add;
71598+
71599+ /* only log the subject filename, since resource logging is supported for
71600+ single-subject learning only */
71601+ rcu_read_lock();
71602+ cred = __task_cred(task);
71603+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71604+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71605+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71606+ "", (unsigned long) res, &task->signal->saved_ip);
71607+ rcu_read_unlock();
71608+ }
71609+
71610+ return;
71611+}
71612+EXPORT_SYMBOL_GPL(gr_learn_resource);
71613+#endif
71614+
71615+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71616+void
71617+pax_set_initial_flags(struct linux_binprm *bprm)
71618+{
71619+ struct task_struct *task = current;
71620+ struct acl_subject_label *proc;
71621+ unsigned long flags;
71622+
71623+ if (unlikely(!(gr_status & GR_READY)))
71624+ return;
71625+
71626+ flags = pax_get_flags(task);
71627+
71628+ proc = task->acl;
71629+
71630+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71631+ flags &= ~MF_PAX_PAGEEXEC;
71632+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71633+ flags &= ~MF_PAX_SEGMEXEC;
71634+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71635+ flags &= ~MF_PAX_RANDMMAP;
71636+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71637+ flags &= ~MF_PAX_EMUTRAMP;
71638+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71639+ flags &= ~MF_PAX_MPROTECT;
71640+
71641+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71642+ flags |= MF_PAX_PAGEEXEC;
71643+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71644+ flags |= MF_PAX_SEGMEXEC;
71645+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71646+ flags |= MF_PAX_RANDMMAP;
71647+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71648+ flags |= MF_PAX_EMUTRAMP;
71649+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71650+ flags |= MF_PAX_MPROTECT;
71651+
71652+ pax_set_flags(task, flags);
71653+
71654+ return;
71655+}
71656+#endif
71657+
71658+int
71659+gr_handle_proc_ptrace(struct task_struct *task)
71660+{
71661+ struct file *filp;
71662+ struct task_struct *tmp = task;
71663+ struct task_struct *curtemp = current;
71664+ __u32 retmode;
71665+
71666+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71667+ if (unlikely(!(gr_status & GR_READY)))
71668+ return 0;
71669+#endif
71670+
71671+ read_lock(&tasklist_lock);
71672+ read_lock(&grsec_exec_file_lock);
71673+ filp = task->exec_file;
71674+
71675+ while (task_pid_nr(tmp) > 0) {
71676+ if (tmp == curtemp)
71677+ break;
71678+ tmp = tmp->real_parent;
71679+ }
71680+
71681+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71682+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71683+ read_unlock(&grsec_exec_file_lock);
71684+ read_unlock(&tasklist_lock);
71685+ return 1;
71686+ }
71687+
71688+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71689+ if (!(gr_status & GR_READY)) {
71690+ read_unlock(&grsec_exec_file_lock);
71691+ read_unlock(&tasklist_lock);
71692+ return 0;
71693+ }
71694+#endif
71695+
71696+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71697+ read_unlock(&grsec_exec_file_lock);
71698+ read_unlock(&tasklist_lock);
71699+
71700+ if (retmode & GR_NOPTRACE)
71701+ return 1;
71702+
71703+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71704+ && (current->acl != task->acl || (current->acl != current->role->root_label
71705+ && task_pid_nr(current) != task_pid_nr(task))))
71706+ return 1;
71707+
71708+ return 0;
71709+}
71710+
71711+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71712+{
71713+ if (unlikely(!(gr_status & GR_READY)))
71714+ return;
71715+
71716+ if (!(current->role->roletype & GR_ROLE_GOD))
71717+ return;
71718+
71719+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71720+ p->role->rolename, gr_task_roletype_to_char(p),
71721+ p->acl->filename);
71722+}
71723+
71724+int
71725+gr_handle_ptrace(struct task_struct *task, const long request)
71726+{
71727+ struct task_struct *tmp = task;
71728+ struct task_struct *curtemp = current;
71729+ __u32 retmode;
71730+
71731+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71732+ if (unlikely(!(gr_status & GR_READY)))
71733+ return 0;
71734+#endif
71735+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71736+ read_lock(&tasklist_lock);
71737+ while (task_pid_nr(tmp) > 0) {
71738+ if (tmp == curtemp)
71739+ break;
71740+ tmp = tmp->real_parent;
71741+ }
71742+
71743+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71744+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71745+ read_unlock(&tasklist_lock);
71746+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71747+ return 1;
71748+ }
71749+ read_unlock(&tasklist_lock);
71750+ }
71751+
71752+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71753+ if (!(gr_status & GR_READY))
71754+ return 0;
71755+#endif
71756+
71757+ read_lock(&grsec_exec_file_lock);
71758+ if (unlikely(!task->exec_file)) {
71759+ read_unlock(&grsec_exec_file_lock);
71760+ return 0;
71761+ }
71762+
71763+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71764+ read_unlock(&grsec_exec_file_lock);
71765+
71766+ if (retmode & GR_NOPTRACE) {
71767+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71768+ return 1;
71769+ }
71770+
71771+ if (retmode & GR_PTRACERD) {
71772+ switch (request) {
71773+ case PTRACE_SEIZE:
71774+ case PTRACE_POKETEXT:
71775+ case PTRACE_POKEDATA:
71776+ case PTRACE_POKEUSR:
71777+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71778+ case PTRACE_SETREGS:
71779+ case PTRACE_SETFPREGS:
71780+#endif
71781+#ifdef CONFIG_X86
71782+ case PTRACE_SETFPXREGS:
71783+#endif
71784+#ifdef CONFIG_ALTIVEC
71785+ case PTRACE_SETVRREGS:
71786+#endif
71787+ return 1;
71788+ default:
71789+ return 0;
71790+ }
71791+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71792+ !(current->role->roletype & GR_ROLE_GOD) &&
71793+ (current->acl != task->acl)) {
71794+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71795+ return 1;
71796+ }
71797+
71798+ return 0;
71799+}
71800+
71801+static int is_writable_mmap(const struct file *filp)
71802+{
71803+ struct task_struct *task = current;
71804+ struct acl_object_label *obj, *obj2;
71805+
71806+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71807+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71808+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71809+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71810+ task->role->root_label);
71811+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71812+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71813+ return 1;
71814+ }
71815+ }
71816+ return 0;
71817+}
71818+
71819+int
71820+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71821+{
71822+ __u32 mode;
71823+
71824+ if (unlikely(!file || !(prot & PROT_EXEC)))
71825+ return 1;
71826+
71827+ if (is_writable_mmap(file))
71828+ return 0;
71829+
71830+ mode =
71831+ gr_search_file(file->f_path.dentry,
71832+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71833+ file->f_path.mnt);
71834+
71835+ if (!gr_tpe_allow(file))
71836+ return 0;
71837+
71838+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71839+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71840+ return 0;
71841+ } else if (unlikely(!(mode & GR_EXEC))) {
71842+ return 0;
71843+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71844+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71845+ return 1;
71846+ }
71847+
71848+ return 1;
71849+}
71850+
71851+int
71852+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
71853+{
71854+ __u32 mode;
71855+
71856+ if (unlikely(!file || !(prot & PROT_EXEC)))
71857+ return 1;
71858+
71859+ if (is_writable_mmap(file))
71860+ return 0;
71861+
71862+ mode =
71863+ gr_search_file(file->f_path.dentry,
71864+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71865+ file->f_path.mnt);
71866+
71867+ if (!gr_tpe_allow(file))
71868+ return 0;
71869+
71870+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71871+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71872+ return 0;
71873+ } else if (unlikely(!(mode & GR_EXEC))) {
71874+ return 0;
71875+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71876+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71877+ return 1;
71878+ }
71879+
71880+ return 1;
71881+}
71882+
71883+void
71884+gr_acl_handle_psacct(struct task_struct *task, const long code)
71885+{
71886+ unsigned long runtime, cputime;
71887+ cputime_t utime, stime;
71888+ unsigned int wday, cday;
71889+ __u8 whr, chr;
71890+ __u8 wmin, cmin;
71891+ __u8 wsec, csec;
71892+ struct timespec curtime, starttime;
71893+
71894+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
71895+ !(task->acl->mode & GR_PROCACCT)))
71896+ return;
71897+
71898+ curtime = ns_to_timespec(ktime_get_ns());
71899+ starttime = ns_to_timespec(task->start_time);
71900+ runtime = curtime.tv_sec - starttime.tv_sec;
71901+ wday = runtime / (60 * 60 * 24);
71902+ runtime -= wday * (60 * 60 * 24);
71903+ whr = runtime / (60 * 60);
71904+ runtime -= whr * (60 * 60);
71905+ wmin = runtime / 60;
71906+ runtime -= wmin * 60;
71907+ wsec = runtime;
71908+
71909+ task_cputime(task, &utime, &stime);
71910+ cputime = cputime_to_secs(utime + stime);
71911+ cday = cputime / (60 * 60 * 24);
71912+ cputime -= cday * (60 * 60 * 24);
71913+ chr = cputime / (60 * 60);
71914+ cputime -= chr * (60 * 60);
71915+ cmin = cputime / 60;
71916+ cputime -= cmin * 60;
71917+ csec = cputime;
71918+
71919+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
71920+
71921+ return;
71922+}
71923+
71924+#ifdef CONFIG_TASKSTATS
71925+int gr_is_taskstats_denied(int pid)
71926+{
71927+ struct task_struct *task;
71928+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71929+ const struct cred *cred;
71930+#endif
71931+ int ret = 0;
71932+
71933+ /* restrict taskstats viewing to un-chrooted root users
71934+ who have the 'view' subject flag if the RBAC system is enabled
71935+ */
71936+
71937+ rcu_read_lock();
71938+ read_lock(&tasklist_lock);
71939+ task = find_task_by_vpid(pid);
71940+ if (task) {
71941+#ifdef CONFIG_GRKERNSEC_CHROOT
71942+ if (proc_is_chrooted(task))
71943+ ret = -EACCES;
71944+#endif
71945+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71946+ cred = __task_cred(task);
71947+#ifdef CONFIG_GRKERNSEC_PROC_USER
71948+ if (gr_is_global_nonroot(cred->uid))
71949+ ret = -EACCES;
71950+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71951+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
71952+ ret = -EACCES;
71953+#endif
71954+#endif
71955+ if (gr_status & GR_READY) {
71956+ if (!(task->acl->mode & GR_VIEW))
71957+ ret = -EACCES;
71958+ }
71959+ } else
71960+ ret = -ENOENT;
71961+
71962+ read_unlock(&tasklist_lock);
71963+ rcu_read_unlock();
71964+
71965+ return ret;
71966+}
71967+#endif
71968+
71969+/* AUXV entries are filled via a descendant of search_binary_handler
71970+ after we've already applied the subject for the target
71971+*/
71972+int gr_acl_enable_at_secure(void)
71973+{
71974+ if (unlikely(!(gr_status & GR_READY)))
71975+ return 0;
71976+
71977+ if (current->acl->mode & GR_ATSECURE)
71978+ return 1;
71979+
71980+ return 0;
71981+}
71982+
71983+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const u64 ino)
71984+{
71985+ struct task_struct *task = current;
71986+ struct dentry *dentry = file->f_path.dentry;
71987+ struct vfsmount *mnt = file->f_path.mnt;
71988+ struct acl_object_label *obj, *tmp;
71989+ struct acl_subject_label *subj;
71990+ unsigned int bufsize;
71991+ int is_not_root;
71992+ char *path;
71993+ dev_t dev = __get_dev(dentry);
71994+
71995+ if (unlikely(!(gr_status & GR_READY)))
71996+ return 1;
71997+
71998+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71999+ return 1;
72000+
72001+ /* ignore Eric Biederman */
72002+ if (IS_PRIVATE(dentry->d_inode))
72003+ return 1;
72004+
72005+ subj = task->acl;
72006+ read_lock(&gr_inode_lock);
72007+ do {
72008+ obj = lookup_acl_obj_label(ino, dev, subj);
72009+ if (obj != NULL) {
72010+ read_unlock(&gr_inode_lock);
72011+ return (obj->mode & GR_FIND) ? 1 : 0;
72012+ }
72013+ } while ((subj = subj->parent_subject));
72014+ read_unlock(&gr_inode_lock);
72015+
72016+ /* this is purely an optimization since we're looking for an object
72017+ for the directory we're doing a readdir on
72018+ if it's possible for any globbed object to match the entry we're
72019+ filling into the directory, then the object we find here will be
72020+ an anchor point with attached globbed objects
72021+ */
72022+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72023+ if (obj->globbed == NULL)
72024+ return (obj->mode & GR_FIND) ? 1 : 0;
72025+
72026+ is_not_root = ((obj->filename[0] == '/') &&
72027+ (obj->filename[1] == '\0')) ? 0 : 1;
72028+ bufsize = PAGE_SIZE - namelen - is_not_root;
72029+
72030+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72031+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72032+ return 1;
72033+
72034+ preempt_disable();
72035+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72036+ bufsize);
72037+
72038+ bufsize = strlen(path);
72039+
72040+ /* if base is "/", don't append an additional slash */
72041+ if (is_not_root)
72042+ *(path + bufsize) = '/';
72043+ memcpy(path + bufsize + is_not_root, name, namelen);
72044+ *(path + bufsize + namelen + is_not_root) = '\0';
72045+
72046+ tmp = obj->globbed;
72047+ while (tmp) {
72048+ if (!glob_match(tmp->filename, path)) {
72049+ preempt_enable();
72050+ return (tmp->mode & GR_FIND) ? 1 : 0;
72051+ }
72052+ tmp = tmp->next;
72053+ }
72054+ preempt_enable();
72055+ return (obj->mode & GR_FIND) ? 1 : 0;
72056+}
72057+
72058+void gr_put_exec_file(struct task_struct *task)
72059+{
72060+ struct file *filp;
72061+
72062+ write_lock(&grsec_exec_file_lock);
72063+ filp = task->exec_file;
72064+ task->exec_file = NULL;
72065+ write_unlock(&grsec_exec_file_lock);
72066+
72067+ if (filp)
72068+ fput(filp);
72069+
72070+ return;
72071+}
72072+
72073+
72074+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72075+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72076+#endif
72077+#ifdef CONFIG_SECURITY
72078+EXPORT_SYMBOL_GPL(gr_check_user_change);
72079+EXPORT_SYMBOL_GPL(gr_check_group_change);
72080+#endif
72081+
72082diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72083new file mode 100644
72084index 0000000..18ffbbd
72085--- /dev/null
72086+++ b/grsecurity/gracl_alloc.c
72087@@ -0,0 +1,105 @@
72088+#include <linux/kernel.h>
72089+#include <linux/mm.h>
72090+#include <linux/slab.h>
72091+#include <linux/vmalloc.h>
72092+#include <linux/gracl.h>
72093+#include <linux/grsecurity.h>
72094+
72095+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72096+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72097+
72098+static __inline__ int
72099+alloc_pop(void)
72100+{
72101+ if (current_alloc_state->alloc_stack_next == 1)
72102+ return 0;
72103+
72104+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72105+
72106+ current_alloc_state->alloc_stack_next--;
72107+
72108+ return 1;
72109+}
72110+
72111+static __inline__ int
72112+alloc_push(void *buf)
72113+{
72114+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72115+ return 1;
72116+
72117+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72118+
72119+ current_alloc_state->alloc_stack_next++;
72120+
72121+ return 0;
72122+}
72123+
72124+void *
72125+acl_alloc(unsigned long len)
72126+{
72127+ void *ret = NULL;
72128+
72129+ if (!len || len > PAGE_SIZE)
72130+ goto out;
72131+
72132+ ret = kmalloc(len, GFP_KERNEL);
72133+
72134+ if (ret) {
72135+ if (alloc_push(ret)) {
72136+ kfree(ret);
72137+ ret = NULL;
72138+ }
72139+ }
72140+
72141+out:
72142+ return ret;
72143+}
72144+
72145+void *
72146+acl_alloc_num(unsigned long num, unsigned long len)
72147+{
72148+ if (!len || (num > (PAGE_SIZE / len)))
72149+ return NULL;
72150+
72151+ return acl_alloc(num * len);
72152+}
72153+
72154+void
72155+acl_free_all(void)
72156+{
72157+ if (!current_alloc_state->alloc_stack)
72158+ return;
72159+
72160+ while (alloc_pop()) ;
72161+
72162+ if (current_alloc_state->alloc_stack) {
72163+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72164+ kfree(current_alloc_state->alloc_stack);
72165+ else
72166+ vfree(current_alloc_state->alloc_stack);
72167+ }
72168+
72169+ current_alloc_state->alloc_stack = NULL;
72170+ current_alloc_state->alloc_stack_size = 1;
72171+ current_alloc_state->alloc_stack_next = 1;
72172+
72173+ return;
72174+}
72175+
72176+int
72177+acl_alloc_stack_init(unsigned long size)
72178+{
72179+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72180+ current_alloc_state->alloc_stack =
72181+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72182+ else
72183+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72184+
72185+ current_alloc_state->alloc_stack_size = size;
72186+ current_alloc_state->alloc_stack_next = 1;
72187+
72188+ if (!current_alloc_state->alloc_stack)
72189+ return 0;
72190+ else
72191+ return 1;
72192+}
72193diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72194new file mode 100644
72195index 0000000..1a94c11
72196--- /dev/null
72197+++ b/grsecurity/gracl_cap.c
72198@@ -0,0 +1,127 @@
72199+#include <linux/kernel.h>
72200+#include <linux/module.h>
72201+#include <linux/sched.h>
72202+#include <linux/gracl.h>
72203+#include <linux/grsecurity.h>
72204+#include <linux/grinternal.h>
72205+
72206+extern const char *captab_log[];
72207+extern int captab_log_entries;
72208+
72209+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72210+{
72211+ struct acl_subject_label *curracl;
72212+
72213+ if (!gr_acl_is_enabled())
72214+ return 1;
72215+
72216+ curracl = task->acl;
72217+
72218+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72219+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72220+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72221+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72222+ gr_to_filename(task->exec_file->f_path.dentry,
72223+ task->exec_file->f_path.mnt) : curracl->filename,
72224+ curracl->filename, 0UL,
72225+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72226+ return 1;
72227+ }
72228+
72229+ return 0;
72230+}
72231+
72232+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72233+{
72234+ struct acl_subject_label *curracl;
72235+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72236+ kernel_cap_t cap_audit = __cap_empty_set;
72237+
72238+ if (!gr_acl_is_enabled())
72239+ return 1;
72240+
72241+ curracl = task->acl;
72242+
72243+ cap_drop = curracl->cap_lower;
72244+ cap_mask = curracl->cap_mask;
72245+ cap_audit = curracl->cap_invert_audit;
72246+
72247+ while ((curracl = curracl->parent_subject)) {
72248+ /* if the cap isn't specified in the current computed mask but is specified in the
72249+ current level subject, and is lowered in the current level subject, then add
72250+ it to the set of dropped capabilities
72251+ otherwise, add the current level subject's mask to the current computed mask
72252+ */
72253+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72254+ cap_raise(cap_mask, cap);
72255+ if (cap_raised(curracl->cap_lower, cap))
72256+ cap_raise(cap_drop, cap);
72257+ if (cap_raised(curracl->cap_invert_audit, cap))
72258+ cap_raise(cap_audit, cap);
72259+ }
72260+ }
72261+
72262+ if (!cap_raised(cap_drop, cap)) {
72263+ if (cap_raised(cap_audit, cap))
72264+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72265+ return 1;
72266+ }
72267+
72268+ /* only learn the capability use if the process has the capability in the
72269+ general case, the two uses in sys.c of gr_learn_cap are an exception
72270+ to this rule to ensure any role transition involves what the full-learned
72271+ policy believes in a privileged process
72272+ */
72273+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72274+ return 1;
72275+
72276+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72277+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72278+
72279+ return 0;
72280+}
72281+
72282+int
72283+gr_acl_is_capable(const int cap)
72284+{
72285+ return gr_task_acl_is_capable(current, current_cred(), cap);
72286+}
72287+
72288+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72289+{
72290+ struct acl_subject_label *curracl;
72291+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72292+
72293+ if (!gr_acl_is_enabled())
72294+ return 1;
72295+
72296+ curracl = task->acl;
72297+
72298+ cap_drop = curracl->cap_lower;
72299+ cap_mask = curracl->cap_mask;
72300+
72301+ while ((curracl = curracl->parent_subject)) {
72302+ /* if the cap isn't specified in the current computed mask but is specified in the
72303+ current level subject, and is lowered in the current level subject, then add
72304+ it to the set of dropped capabilities
72305+ otherwise, add the current level subject's mask to the current computed mask
72306+ */
72307+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72308+ cap_raise(cap_mask, cap);
72309+ if (cap_raised(curracl->cap_lower, cap))
72310+ cap_raise(cap_drop, cap);
72311+ }
72312+ }
72313+
72314+ if (!cap_raised(cap_drop, cap))
72315+ return 1;
72316+
72317+ return 0;
72318+}
72319+
72320+int
72321+gr_acl_is_capable_nolog(const int cap)
72322+{
72323+ return gr_task_acl_is_capable_nolog(current, cap);
72324+}
72325+
72326diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72327new file mode 100644
72328index 0000000..a43dd06
72329--- /dev/null
72330+++ b/grsecurity/gracl_compat.c
72331@@ -0,0 +1,269 @@
72332+#include <linux/kernel.h>
72333+#include <linux/gracl.h>
72334+#include <linux/compat.h>
72335+#include <linux/gracl_compat.h>
72336+
72337+#include <asm/uaccess.h>
72338+
72339+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72340+{
72341+ struct gr_arg_wrapper_compat uwrapcompat;
72342+
72343+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72344+ return -EFAULT;
72345+
72346+ if ((uwrapcompat.version != GRSECURITY_VERSION) ||
72347+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72348+ return -EINVAL;
72349+
72350+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72351+ uwrap->version = uwrapcompat.version;
72352+ uwrap->size = sizeof(struct gr_arg);
72353+
72354+ return 0;
72355+}
72356+
72357+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72358+{
72359+ struct gr_arg_compat argcompat;
72360+
72361+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72362+ return -EFAULT;
72363+
72364+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72365+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72366+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72367+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72368+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72369+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72370+
72371+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72372+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72373+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72374+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72375+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72376+ arg->segv_device = argcompat.segv_device;
72377+ arg->segv_inode = argcompat.segv_inode;
72378+ arg->segv_uid = argcompat.segv_uid;
72379+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72380+ arg->mode = argcompat.mode;
72381+
72382+ return 0;
72383+}
72384+
72385+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72386+{
72387+ struct acl_object_label_compat objcompat;
72388+
72389+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72390+ return -EFAULT;
72391+
72392+ obj->filename = compat_ptr(objcompat.filename);
72393+ obj->inode = objcompat.inode;
72394+ obj->device = objcompat.device;
72395+ obj->mode = objcompat.mode;
72396+
72397+ obj->nested = compat_ptr(objcompat.nested);
72398+ obj->globbed = compat_ptr(objcompat.globbed);
72399+
72400+ obj->prev = compat_ptr(objcompat.prev);
72401+ obj->next = compat_ptr(objcompat.next);
72402+
72403+ return 0;
72404+}
72405+
72406+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72407+{
72408+ unsigned int i;
72409+ struct acl_subject_label_compat subjcompat;
72410+
72411+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72412+ return -EFAULT;
72413+
72414+ subj->filename = compat_ptr(subjcompat.filename);
72415+ subj->inode = subjcompat.inode;
72416+ subj->device = subjcompat.device;
72417+ subj->mode = subjcompat.mode;
72418+ subj->cap_mask = subjcompat.cap_mask;
72419+ subj->cap_lower = subjcompat.cap_lower;
72420+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72421+
72422+ for (i = 0; i < GR_NLIMITS; i++) {
72423+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72424+ subj->res[i].rlim_cur = RLIM_INFINITY;
72425+ else
72426+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72427+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72428+ subj->res[i].rlim_max = RLIM_INFINITY;
72429+ else
72430+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72431+ }
72432+ subj->resmask = subjcompat.resmask;
72433+
72434+ subj->user_trans_type = subjcompat.user_trans_type;
72435+ subj->group_trans_type = subjcompat.group_trans_type;
72436+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72437+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72438+ subj->user_trans_num = subjcompat.user_trans_num;
72439+ subj->group_trans_num = subjcompat.group_trans_num;
72440+
72441+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72442+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72443+ subj->ip_type = subjcompat.ip_type;
72444+ subj->ips = compat_ptr(subjcompat.ips);
72445+ subj->ip_num = subjcompat.ip_num;
72446+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72447+
72448+ subj->crashes = subjcompat.crashes;
72449+ subj->expires = subjcompat.expires;
72450+
72451+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72452+ subj->hash = compat_ptr(subjcompat.hash);
72453+ subj->prev = compat_ptr(subjcompat.prev);
72454+ subj->next = compat_ptr(subjcompat.next);
72455+
72456+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72457+ subj->obj_hash_size = subjcompat.obj_hash_size;
72458+ subj->pax_flags = subjcompat.pax_flags;
72459+
72460+ return 0;
72461+}
72462+
72463+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72464+{
72465+ struct acl_role_label_compat rolecompat;
72466+
72467+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72468+ return -EFAULT;
72469+
72470+ role->rolename = compat_ptr(rolecompat.rolename);
72471+ role->uidgid = rolecompat.uidgid;
72472+ role->roletype = rolecompat.roletype;
72473+
72474+ role->auth_attempts = rolecompat.auth_attempts;
72475+ role->expires = rolecompat.expires;
72476+
72477+ role->root_label = compat_ptr(rolecompat.root_label);
72478+ role->hash = compat_ptr(rolecompat.hash);
72479+
72480+ role->prev = compat_ptr(rolecompat.prev);
72481+ role->next = compat_ptr(rolecompat.next);
72482+
72483+ role->transitions = compat_ptr(rolecompat.transitions);
72484+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72485+ role->domain_children = compat_ptr(rolecompat.domain_children);
72486+ role->domain_child_num = rolecompat.domain_child_num;
72487+
72488+ role->umask = rolecompat.umask;
72489+
72490+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72491+ role->subj_hash_size = rolecompat.subj_hash_size;
72492+
72493+ return 0;
72494+}
72495+
72496+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72497+{
72498+ struct role_allowed_ip_compat roleip_compat;
72499+
72500+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72501+ return -EFAULT;
72502+
72503+ roleip->addr = roleip_compat.addr;
72504+ roleip->netmask = roleip_compat.netmask;
72505+
72506+ roleip->prev = compat_ptr(roleip_compat.prev);
72507+ roleip->next = compat_ptr(roleip_compat.next);
72508+
72509+ return 0;
72510+}
72511+
72512+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72513+{
72514+ struct role_transition_compat trans_compat;
72515+
72516+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72517+ return -EFAULT;
72518+
72519+ trans->rolename = compat_ptr(trans_compat.rolename);
72520+
72521+ trans->prev = compat_ptr(trans_compat.prev);
72522+ trans->next = compat_ptr(trans_compat.next);
72523+
72524+ return 0;
72525+
72526+}
72527+
72528+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72529+{
72530+ struct gr_hash_struct_compat hash_compat;
72531+
72532+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72533+ return -EFAULT;
72534+
72535+ hash->table = compat_ptr(hash_compat.table);
72536+ hash->nametable = compat_ptr(hash_compat.nametable);
72537+ hash->first = compat_ptr(hash_compat.first);
72538+
72539+ hash->table_size = hash_compat.table_size;
72540+ hash->used_size = hash_compat.used_size;
72541+
72542+ hash->type = hash_compat.type;
72543+
72544+ return 0;
72545+}
72546+
72547+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72548+{
72549+ compat_uptr_t ptrcompat;
72550+
72551+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72552+ return -EFAULT;
72553+
72554+ *(void **)ptr = compat_ptr(ptrcompat);
72555+
72556+ return 0;
72557+}
72558+
72559+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72560+{
72561+ struct acl_ip_label_compat ip_compat;
72562+
72563+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72564+ return -EFAULT;
72565+
72566+ ip->iface = compat_ptr(ip_compat.iface);
72567+ ip->addr = ip_compat.addr;
72568+ ip->netmask = ip_compat.netmask;
72569+ ip->low = ip_compat.low;
72570+ ip->high = ip_compat.high;
72571+ ip->mode = ip_compat.mode;
72572+ ip->type = ip_compat.type;
72573+
72574+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72575+
72576+ ip->prev = compat_ptr(ip_compat.prev);
72577+ ip->next = compat_ptr(ip_compat.next);
72578+
72579+ return 0;
72580+}
72581+
72582+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72583+{
72584+ struct sprole_pw_compat pw_compat;
72585+
72586+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72587+ return -EFAULT;
72588+
72589+ pw->rolename = compat_ptr(pw_compat.rolename);
72590+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72591+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72592+
72593+ return 0;
72594+}
72595+
72596+size_t get_gr_arg_wrapper_size_compat(void)
72597+{
72598+ return sizeof(struct gr_arg_wrapper_compat);
72599+}
72600+
72601diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72602new file mode 100644
72603index 0000000..8ee8e4f
72604--- /dev/null
72605+++ b/grsecurity/gracl_fs.c
72606@@ -0,0 +1,447 @@
72607+#include <linux/kernel.h>
72608+#include <linux/sched.h>
72609+#include <linux/types.h>
72610+#include <linux/fs.h>
72611+#include <linux/file.h>
72612+#include <linux/stat.h>
72613+#include <linux/grsecurity.h>
72614+#include <linux/grinternal.h>
72615+#include <linux/gracl.h>
72616+
72617+umode_t
72618+gr_acl_umask(void)
72619+{
72620+ if (unlikely(!gr_acl_is_enabled()))
72621+ return 0;
72622+
72623+ return current->role->umask;
72624+}
72625+
72626+__u32
72627+gr_acl_handle_hidden_file(const struct dentry * dentry,
72628+ const struct vfsmount * mnt)
72629+{
72630+ __u32 mode;
72631+
72632+ if (unlikely(d_is_negative(dentry)))
72633+ return GR_FIND;
72634+
72635+ mode =
72636+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72637+
72638+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72639+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72640+ return mode;
72641+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72642+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72643+ return 0;
72644+ } else if (unlikely(!(mode & GR_FIND)))
72645+ return 0;
72646+
72647+ return GR_FIND;
72648+}
72649+
72650+__u32
72651+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72652+ int acc_mode)
72653+{
72654+ __u32 reqmode = GR_FIND;
72655+ __u32 mode;
72656+
72657+ if (unlikely(d_is_negative(dentry)))
72658+ return reqmode;
72659+
72660+ if (acc_mode & MAY_APPEND)
72661+ reqmode |= GR_APPEND;
72662+ else if (acc_mode & MAY_WRITE)
72663+ reqmode |= GR_WRITE;
72664+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72665+ reqmode |= GR_READ;
72666+
72667+ mode =
72668+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72669+ mnt);
72670+
72671+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72672+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72673+ reqmode & GR_READ ? " reading" : "",
72674+ reqmode & GR_WRITE ? " writing" : reqmode &
72675+ GR_APPEND ? " appending" : "");
72676+ return reqmode;
72677+ } else
72678+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72679+ {
72680+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72681+ reqmode & GR_READ ? " reading" : "",
72682+ reqmode & GR_WRITE ? " writing" : reqmode &
72683+ GR_APPEND ? " appending" : "");
72684+ return 0;
72685+ } else if (unlikely((mode & reqmode) != reqmode))
72686+ return 0;
72687+
72688+ return reqmode;
72689+}
72690+
72691+__u32
72692+gr_acl_handle_creat(const struct dentry * dentry,
72693+ const struct dentry * p_dentry,
72694+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72695+ const int imode)
72696+{
72697+ __u32 reqmode = GR_WRITE | GR_CREATE;
72698+ __u32 mode;
72699+
72700+ if (acc_mode & MAY_APPEND)
72701+ reqmode |= GR_APPEND;
72702+ // if a directory was required or the directory already exists, then
72703+ // don't count this open as a read
72704+ if ((acc_mode & MAY_READ) &&
72705+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72706+ reqmode |= GR_READ;
72707+ if ((open_flags & O_CREAT) &&
72708+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72709+ reqmode |= GR_SETID;
72710+
72711+ mode =
72712+ gr_check_create(dentry, p_dentry, p_mnt,
72713+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72714+
72715+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72716+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72717+ reqmode & GR_READ ? " reading" : "",
72718+ reqmode & GR_WRITE ? " writing" : reqmode &
72719+ GR_APPEND ? " appending" : "");
72720+ return reqmode;
72721+ } else
72722+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72723+ {
72724+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72725+ reqmode & GR_READ ? " reading" : "",
72726+ reqmode & GR_WRITE ? " writing" : reqmode &
72727+ GR_APPEND ? " appending" : "");
72728+ return 0;
72729+ } else if (unlikely((mode & reqmode) != reqmode))
72730+ return 0;
72731+
72732+ return reqmode;
72733+}
72734+
72735+__u32
72736+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72737+ const int fmode)
72738+{
72739+ __u32 mode, reqmode = GR_FIND;
72740+
72741+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72742+ reqmode |= GR_EXEC;
72743+ if (fmode & S_IWOTH)
72744+ reqmode |= GR_WRITE;
72745+ if (fmode & S_IROTH)
72746+ reqmode |= GR_READ;
72747+
72748+ mode =
72749+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72750+ mnt);
72751+
72752+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72753+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72754+ reqmode & GR_READ ? " reading" : "",
72755+ reqmode & GR_WRITE ? " writing" : "",
72756+ reqmode & GR_EXEC ? " executing" : "");
72757+ return reqmode;
72758+ } else
72759+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72760+ {
72761+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72762+ reqmode & GR_READ ? " reading" : "",
72763+ reqmode & GR_WRITE ? " writing" : "",
72764+ reqmode & GR_EXEC ? " executing" : "");
72765+ return 0;
72766+ } else if (unlikely((mode & reqmode) != reqmode))
72767+ return 0;
72768+
72769+ return reqmode;
72770+}
72771+
72772+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72773+{
72774+ __u32 mode;
72775+
72776+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72777+
72778+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72779+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72780+ return mode;
72781+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72782+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72783+ return 0;
72784+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72785+ return 0;
72786+
72787+ return (reqmode);
72788+}
72789+
72790+__u32
72791+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72792+{
72793+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72794+}
72795+
72796+__u32
72797+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72798+{
72799+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72800+}
72801+
72802+__u32
72803+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72804+{
72805+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72806+}
72807+
72808+__u32
72809+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72810+{
72811+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72812+}
72813+
72814+__u32
72815+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72816+ umode_t *modeptr)
72817+{
72818+ umode_t mode;
72819+
72820+ *modeptr &= ~gr_acl_umask();
72821+ mode = *modeptr;
72822+
72823+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72824+ return 1;
72825+
72826+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72827+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72828+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72829+ GR_CHMOD_ACL_MSG);
72830+ } else {
72831+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72832+ }
72833+}
72834+
72835+__u32
72836+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72837+{
72838+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72839+}
72840+
72841+__u32
72842+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72843+{
72844+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72845+}
72846+
72847+__u32
72848+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
72849+{
72850+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
72851+}
72852+
72853+__u32
72854+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
72855+{
72856+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
72857+}
72858+
72859+__u32
72860+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
72861+{
72862+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
72863+ GR_UNIXCONNECT_ACL_MSG);
72864+}
72865+
72866+/* hardlinks require at minimum create and link permission,
72867+ any additional privilege required is based on the
72868+ privilege of the file being linked to
72869+*/
72870+__u32
72871+gr_acl_handle_link(const struct dentry * new_dentry,
72872+ const struct dentry * parent_dentry,
72873+ const struct vfsmount * parent_mnt,
72874+ const struct dentry * old_dentry,
72875+ const struct vfsmount * old_mnt, const struct filename *to)
72876+{
72877+ __u32 mode;
72878+ __u32 needmode = GR_CREATE | GR_LINK;
72879+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
72880+
72881+ mode =
72882+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
72883+ old_mnt);
72884+
72885+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
72886+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72887+ return mode;
72888+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72889+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
72890+ return 0;
72891+ } else if (unlikely((mode & needmode) != needmode))
72892+ return 0;
72893+
72894+ return 1;
72895+}
72896+
72897+__u32
72898+gr_acl_handle_symlink(const struct dentry * new_dentry,
72899+ const struct dentry * parent_dentry,
72900+ const struct vfsmount * parent_mnt, const struct filename *from)
72901+{
72902+ __u32 needmode = GR_WRITE | GR_CREATE;
72903+ __u32 mode;
72904+
72905+ mode =
72906+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
72907+ GR_CREATE | GR_AUDIT_CREATE |
72908+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
72909+
72910+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
72911+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72912+ return mode;
72913+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
72914+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
72915+ return 0;
72916+ } else if (unlikely((mode & needmode) != needmode))
72917+ return 0;
72918+
72919+ return (GR_WRITE | GR_CREATE);
72920+}
72921+
72922+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
72923+{
72924+ __u32 mode;
72925+
72926+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72927+
72928+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72929+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
72930+ return mode;
72931+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72932+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
72933+ return 0;
72934+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72935+ return 0;
72936+
72937+ return (reqmode);
72938+}
72939+
72940+__u32
72941+gr_acl_handle_mknod(const struct dentry * new_dentry,
72942+ const struct dentry * parent_dentry,
72943+ const struct vfsmount * parent_mnt,
72944+ const int mode)
72945+{
72946+ __u32 reqmode = GR_WRITE | GR_CREATE;
72947+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72948+ reqmode |= GR_SETID;
72949+
72950+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72951+ reqmode, GR_MKNOD_ACL_MSG);
72952+}
72953+
72954+__u32
72955+gr_acl_handle_mkdir(const struct dentry *new_dentry,
72956+ const struct dentry *parent_dentry,
72957+ const struct vfsmount *parent_mnt)
72958+{
72959+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
72960+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
72961+}
72962+
72963+#define RENAME_CHECK_SUCCESS(old, new) \
72964+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
72965+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
72966+
72967+int
72968+gr_acl_handle_rename(struct dentry *new_dentry,
72969+ struct dentry *parent_dentry,
72970+ const struct vfsmount *parent_mnt,
72971+ struct dentry *old_dentry,
72972+ struct inode *old_parent_inode,
72973+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
72974+{
72975+ __u32 comp1, comp2;
72976+ int error = 0;
72977+
72978+ if (unlikely(!gr_acl_is_enabled()))
72979+ return 0;
72980+
72981+ if (flags & RENAME_EXCHANGE) {
72982+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72983+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72984+ GR_SUPPRESS, parent_mnt);
72985+ comp2 =
72986+ gr_search_file(old_dentry,
72987+ GR_READ | GR_WRITE | GR_AUDIT_READ |
72988+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
72989+ } else if (d_is_negative(new_dentry)) {
72990+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
72991+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
72992+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
72993+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
72994+ GR_DELETE | GR_AUDIT_DELETE |
72995+ GR_AUDIT_READ | GR_AUDIT_WRITE |
72996+ GR_SUPPRESS, old_mnt);
72997+ } else {
72998+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
72999+ GR_CREATE | GR_DELETE |
73000+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73001+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73002+ GR_SUPPRESS, parent_mnt);
73003+ comp2 =
73004+ gr_search_file(old_dentry,
73005+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73006+ GR_DELETE | GR_AUDIT_DELETE |
73007+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73008+ }
73009+
73010+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73011+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73012+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73013+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73014+ && !(comp2 & GR_SUPPRESS)) {
73015+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73016+ error = -EACCES;
73017+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73018+ error = -EACCES;
73019+
73020+ return error;
73021+}
73022+
73023+void
73024+gr_acl_handle_exit(void)
73025+{
73026+ u16 id;
73027+ char *rolename;
73028+
73029+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73030+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73031+ id = current->acl_role_id;
73032+ rolename = current->role->rolename;
73033+ gr_set_acls(1);
73034+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73035+ }
73036+
73037+ gr_put_exec_file(current);
73038+ return;
73039+}
73040+
73041+int
73042+gr_acl_handle_procpidmem(const struct task_struct *task)
73043+{
73044+ if (unlikely(!gr_acl_is_enabled()))
73045+ return 0;
73046+
73047+ if (task != current && (task->acl->mode & GR_PROTPROCFD) &&
73048+ !(current->acl->mode & GR_POVERRIDE) &&
73049+ !(current->role->roletype & GR_ROLE_GOD))
73050+ return -EACCES;
73051+
73052+ return 0;
73053+}
73054diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73055new file mode 100644
73056index 0000000..f056b81
73057--- /dev/null
73058+++ b/grsecurity/gracl_ip.c
73059@@ -0,0 +1,386 @@
73060+#include <linux/kernel.h>
73061+#include <asm/uaccess.h>
73062+#include <asm/errno.h>
73063+#include <net/sock.h>
73064+#include <linux/file.h>
73065+#include <linux/fs.h>
73066+#include <linux/net.h>
73067+#include <linux/in.h>
73068+#include <linux/skbuff.h>
73069+#include <linux/ip.h>
73070+#include <linux/udp.h>
73071+#include <linux/types.h>
73072+#include <linux/sched.h>
73073+#include <linux/netdevice.h>
73074+#include <linux/inetdevice.h>
73075+#include <linux/gracl.h>
73076+#include <linux/grsecurity.h>
73077+#include <linux/grinternal.h>
73078+
73079+#define GR_BIND 0x01
73080+#define GR_CONNECT 0x02
73081+#define GR_INVERT 0x04
73082+#define GR_BINDOVERRIDE 0x08
73083+#define GR_CONNECTOVERRIDE 0x10
73084+#define GR_SOCK_FAMILY 0x20
73085+
73086+static const char * gr_protocols[IPPROTO_MAX] = {
73087+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73088+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73089+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73090+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73091+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73092+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73093+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73094+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73095+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73096+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73097+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73098+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73099+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73100+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73101+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73102+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73103+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73104+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73105+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73106+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73107+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73108+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73109+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73110+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73111+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73112+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73113+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73114+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73115+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73116+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73117+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73118+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73119+ };
73120+
73121+static const char * gr_socktypes[SOCK_MAX] = {
73122+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73123+ "unknown:7", "unknown:8", "unknown:9", "packet"
73124+ };
73125+
73126+static const char * gr_sockfamilies[AF_MAX+1] = {
73127+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73128+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73129+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73130+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73131+ };
73132+
73133+const char *
73134+gr_proto_to_name(unsigned char proto)
73135+{
73136+ return gr_protocols[proto];
73137+}
73138+
73139+const char *
73140+gr_socktype_to_name(unsigned char type)
73141+{
73142+ return gr_socktypes[type];
73143+}
73144+
73145+const char *
73146+gr_sockfamily_to_name(unsigned char family)
73147+{
73148+ return gr_sockfamilies[family];
73149+}
73150+
73151+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73152+
73153+int
73154+gr_search_socket(const int domain, const int type, const int protocol)
73155+{
73156+ struct acl_subject_label *curr;
73157+ const struct cred *cred = current_cred();
73158+
73159+ if (unlikely(!gr_acl_is_enabled()))
73160+ goto exit;
73161+
73162+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73163+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73164+ goto exit; // let the kernel handle it
73165+
73166+ curr = current->acl;
73167+
73168+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73169+ /* the family is allowed, if this is PF_INET allow it only if
73170+ the extra sock type/protocol checks pass */
73171+ if (domain == PF_INET)
73172+ goto inet_check;
73173+ goto exit;
73174+ } else {
73175+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73176+ __u32 fakeip = 0;
73177+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73178+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73179+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73180+ gr_to_filename(current->exec_file->f_path.dentry,
73181+ current->exec_file->f_path.mnt) :
73182+ curr->filename, curr->filename,
73183+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73184+ &current->signal->saved_ip);
73185+ goto exit;
73186+ }
73187+ goto exit_fail;
73188+ }
73189+
73190+inet_check:
73191+ /* the rest of this checking is for IPv4 only */
73192+ if (!curr->ips)
73193+ goto exit;
73194+
73195+ if ((curr->ip_type & (1U << type)) &&
73196+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73197+ goto exit;
73198+
73199+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73200+ /* we don't place acls on raw sockets , and sometimes
73201+ dgram/ip sockets are opened for ioctl and not
73202+ bind/connect, so we'll fake a bind learn log */
73203+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73204+ __u32 fakeip = 0;
73205+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73206+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73207+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73208+ gr_to_filename(current->exec_file->f_path.dentry,
73209+ current->exec_file->f_path.mnt) :
73210+ curr->filename, curr->filename,
73211+ &fakeip, 0, type,
73212+ protocol, GR_CONNECT, &current->signal->saved_ip);
73213+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73214+ __u32 fakeip = 0;
73215+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73216+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73217+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73218+ gr_to_filename(current->exec_file->f_path.dentry,
73219+ current->exec_file->f_path.mnt) :
73220+ curr->filename, curr->filename,
73221+ &fakeip, 0, type,
73222+ protocol, GR_BIND, &current->signal->saved_ip);
73223+ }
73224+ /* we'll log when they use connect or bind */
73225+ goto exit;
73226+ }
73227+
73228+exit_fail:
73229+ if (domain == PF_INET)
73230+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73231+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73232+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73233+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73234+ gr_socktype_to_name(type), protocol);
73235+
73236+ return 0;
73237+exit:
73238+ return 1;
73239+}
73240+
73241+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73242+{
73243+ if ((ip->mode & mode) &&
73244+ (ip_port >= ip->low) &&
73245+ (ip_port <= ip->high) &&
73246+ ((ntohl(ip_addr) & our_netmask) ==
73247+ (ntohl(our_addr) & our_netmask))
73248+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73249+ && (ip->type & (1U << type))) {
73250+ if (ip->mode & GR_INVERT)
73251+ return 2; // specifically denied
73252+ else
73253+ return 1; // allowed
73254+ }
73255+
73256+ return 0; // not specifically allowed, may continue parsing
73257+}
73258+
73259+static int
73260+gr_search_connectbind(const int full_mode, struct sock *sk,
73261+ struct sockaddr_in *addr, const int type)
73262+{
73263+ char iface[IFNAMSIZ] = {0};
73264+ struct acl_subject_label *curr;
73265+ struct acl_ip_label *ip;
73266+ struct inet_sock *isk;
73267+ struct net_device *dev;
73268+ struct in_device *idev;
73269+ unsigned long i;
73270+ int ret;
73271+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73272+ __u32 ip_addr = 0;
73273+ __u32 our_addr;
73274+ __u32 our_netmask;
73275+ char *p;
73276+ __u16 ip_port = 0;
73277+ const struct cred *cred = current_cred();
73278+
73279+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73280+ return 0;
73281+
73282+ curr = current->acl;
73283+ isk = inet_sk(sk);
73284+
73285+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73286+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73287+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73288+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73289+ struct sockaddr_in saddr;
73290+ int err;
73291+
73292+ saddr.sin_family = AF_INET;
73293+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73294+ saddr.sin_port = isk->inet_sport;
73295+
73296+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73297+ if (err)
73298+ return err;
73299+
73300+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73301+ if (err)
73302+ return err;
73303+ }
73304+
73305+ if (!curr->ips)
73306+ return 0;
73307+
73308+ ip_addr = addr->sin_addr.s_addr;
73309+ ip_port = ntohs(addr->sin_port);
73310+
73311+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73312+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73313+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73314+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73315+ gr_to_filename(current->exec_file->f_path.dentry,
73316+ current->exec_file->f_path.mnt) :
73317+ curr->filename, curr->filename,
73318+ &ip_addr, ip_port, type,
73319+ sk->sk_protocol, mode, &current->signal->saved_ip);
73320+ return 0;
73321+ }
73322+
73323+ for (i = 0; i < curr->ip_num; i++) {
73324+ ip = *(curr->ips + i);
73325+ if (ip->iface != NULL) {
73326+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73327+ p = strchr(iface, ':');
73328+ if (p != NULL)
73329+ *p = '\0';
73330+ dev = dev_get_by_name(sock_net(sk), iface);
73331+ if (dev == NULL)
73332+ continue;
73333+ idev = in_dev_get(dev);
73334+ if (idev == NULL) {
73335+ dev_put(dev);
73336+ continue;
73337+ }
73338+ rcu_read_lock();
73339+ for_ifa(idev) {
73340+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73341+ our_addr = ifa->ifa_address;
73342+ our_netmask = 0xffffffff;
73343+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73344+ if (ret == 1) {
73345+ rcu_read_unlock();
73346+ in_dev_put(idev);
73347+ dev_put(dev);
73348+ return 0;
73349+ } else if (ret == 2) {
73350+ rcu_read_unlock();
73351+ in_dev_put(idev);
73352+ dev_put(dev);
73353+ goto denied;
73354+ }
73355+ }
73356+ } endfor_ifa(idev);
73357+ rcu_read_unlock();
73358+ in_dev_put(idev);
73359+ dev_put(dev);
73360+ } else {
73361+ our_addr = ip->addr;
73362+ our_netmask = ip->netmask;
73363+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73364+ if (ret == 1)
73365+ return 0;
73366+ else if (ret == 2)
73367+ goto denied;
73368+ }
73369+ }
73370+
73371+denied:
73372+ if (mode == GR_BIND)
73373+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73374+ else if (mode == GR_CONNECT)
73375+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73376+
73377+ return -EACCES;
73378+}
73379+
73380+int
73381+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73382+{
73383+ /* always allow disconnection of dgram sockets with connect */
73384+ if (addr->sin_family == AF_UNSPEC)
73385+ return 0;
73386+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73387+}
73388+
73389+int
73390+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73391+{
73392+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73393+}
73394+
73395+int gr_search_listen(struct socket *sock)
73396+{
73397+ struct sock *sk = sock->sk;
73398+ struct sockaddr_in addr;
73399+
73400+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73401+ addr.sin_port = inet_sk(sk)->inet_sport;
73402+
73403+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73404+}
73405+
73406+int gr_search_accept(struct socket *sock)
73407+{
73408+ struct sock *sk = sock->sk;
73409+ struct sockaddr_in addr;
73410+
73411+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73412+ addr.sin_port = inet_sk(sk)->inet_sport;
73413+
73414+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73415+}
73416+
73417+int
73418+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73419+{
73420+ if (addr)
73421+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73422+ else {
73423+ struct sockaddr_in sin;
73424+ const struct inet_sock *inet = inet_sk(sk);
73425+
73426+ sin.sin_addr.s_addr = inet->inet_daddr;
73427+ sin.sin_port = inet->inet_dport;
73428+
73429+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73430+ }
73431+}
73432+
73433+int
73434+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73435+{
73436+ struct sockaddr_in sin;
73437+
73438+ if (unlikely(skb->len < sizeof (struct udphdr)))
73439+ return 0; // skip this packet
73440+
73441+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73442+ sin.sin_port = udp_hdr(skb)->source;
73443+
73444+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73445+}
73446diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73447new file mode 100644
73448index 0000000..25f54ef
73449--- /dev/null
73450+++ b/grsecurity/gracl_learn.c
73451@@ -0,0 +1,207 @@
73452+#include <linux/kernel.h>
73453+#include <linux/mm.h>
73454+#include <linux/sched.h>
73455+#include <linux/poll.h>
73456+#include <linux/string.h>
73457+#include <linux/file.h>
73458+#include <linux/types.h>
73459+#include <linux/vmalloc.h>
73460+#include <linux/grinternal.h>
73461+
73462+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73463+ size_t count, loff_t *ppos);
73464+extern int gr_acl_is_enabled(void);
73465+
73466+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73467+static int gr_learn_attached;
73468+
73469+/* use a 512k buffer */
73470+#define LEARN_BUFFER_SIZE (512 * 1024)
73471+
73472+static DEFINE_SPINLOCK(gr_learn_lock);
73473+static DEFINE_MUTEX(gr_learn_user_mutex);
73474+
73475+/* we need to maintain two buffers, so that the kernel context of grlearn
73476+ uses a semaphore around the userspace copying, and the other kernel contexts
73477+ use a spinlock when copying into the buffer, since they cannot sleep
73478+*/
73479+static char *learn_buffer;
73480+static char *learn_buffer_user;
73481+static int learn_buffer_len;
73482+static int learn_buffer_user_len;
73483+
73484+static ssize_t
73485+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73486+{
73487+ DECLARE_WAITQUEUE(wait, current);
73488+ ssize_t retval = 0;
73489+
73490+ add_wait_queue(&learn_wait, &wait);
73491+ set_current_state(TASK_INTERRUPTIBLE);
73492+ do {
73493+ mutex_lock(&gr_learn_user_mutex);
73494+ spin_lock(&gr_learn_lock);
73495+ if (learn_buffer_len)
73496+ break;
73497+ spin_unlock(&gr_learn_lock);
73498+ mutex_unlock(&gr_learn_user_mutex);
73499+ if (file->f_flags & O_NONBLOCK) {
73500+ retval = -EAGAIN;
73501+ goto out;
73502+ }
73503+ if (signal_pending(current)) {
73504+ retval = -ERESTARTSYS;
73505+ goto out;
73506+ }
73507+
73508+ schedule();
73509+ } while (1);
73510+
73511+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73512+ learn_buffer_user_len = learn_buffer_len;
73513+ retval = learn_buffer_len;
73514+ learn_buffer_len = 0;
73515+
73516+ spin_unlock(&gr_learn_lock);
73517+
73518+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73519+ retval = -EFAULT;
73520+
73521+ mutex_unlock(&gr_learn_user_mutex);
73522+out:
73523+ set_current_state(TASK_RUNNING);
73524+ remove_wait_queue(&learn_wait, &wait);
73525+ return retval;
73526+}
73527+
73528+static unsigned int
73529+poll_learn(struct file * file, poll_table * wait)
73530+{
73531+ poll_wait(file, &learn_wait, wait);
73532+
73533+ if (learn_buffer_len)
73534+ return (POLLIN | POLLRDNORM);
73535+
73536+ return 0;
73537+}
73538+
73539+void
73540+gr_clear_learn_entries(void)
73541+{
73542+ char *tmp;
73543+
73544+ mutex_lock(&gr_learn_user_mutex);
73545+ spin_lock(&gr_learn_lock);
73546+ tmp = learn_buffer;
73547+ learn_buffer = NULL;
73548+ spin_unlock(&gr_learn_lock);
73549+ if (tmp)
73550+ vfree(tmp);
73551+ if (learn_buffer_user != NULL) {
73552+ vfree(learn_buffer_user);
73553+ learn_buffer_user = NULL;
73554+ }
73555+ learn_buffer_len = 0;
73556+ mutex_unlock(&gr_learn_user_mutex);
73557+
73558+ return;
73559+}
73560+
73561+void
73562+gr_add_learn_entry(const char *fmt, ...)
73563+{
73564+ va_list args;
73565+ unsigned int len;
73566+
73567+ if (!gr_learn_attached)
73568+ return;
73569+
73570+ spin_lock(&gr_learn_lock);
73571+
73572+ /* leave a gap at the end so we know when it's "full" but don't have to
73573+ compute the exact length of the string we're trying to append
73574+ */
73575+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73576+ spin_unlock(&gr_learn_lock);
73577+ wake_up_interruptible(&learn_wait);
73578+ return;
73579+ }
73580+ if (learn_buffer == NULL) {
73581+ spin_unlock(&gr_learn_lock);
73582+ return;
73583+ }
73584+
73585+ va_start(args, fmt);
73586+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73587+ va_end(args);
73588+
73589+ learn_buffer_len += len + 1;
73590+
73591+ spin_unlock(&gr_learn_lock);
73592+ wake_up_interruptible(&learn_wait);
73593+
73594+ return;
73595+}
73596+
73597+static int
73598+open_learn(struct inode *inode, struct file *file)
73599+{
73600+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73601+ return -EBUSY;
73602+ if (file->f_mode & FMODE_READ) {
73603+ int retval = 0;
73604+ mutex_lock(&gr_learn_user_mutex);
73605+ if (learn_buffer == NULL)
73606+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73607+ if (learn_buffer_user == NULL)
73608+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73609+ if (learn_buffer == NULL) {
73610+ retval = -ENOMEM;
73611+ goto out_error;
73612+ }
73613+ if (learn_buffer_user == NULL) {
73614+ retval = -ENOMEM;
73615+ goto out_error;
73616+ }
73617+ learn_buffer_len = 0;
73618+ learn_buffer_user_len = 0;
73619+ gr_learn_attached = 1;
73620+out_error:
73621+ mutex_unlock(&gr_learn_user_mutex);
73622+ return retval;
73623+ }
73624+ return 0;
73625+}
73626+
73627+static int
73628+close_learn(struct inode *inode, struct file *file)
73629+{
73630+ if (file->f_mode & FMODE_READ) {
73631+ char *tmp = NULL;
73632+ mutex_lock(&gr_learn_user_mutex);
73633+ spin_lock(&gr_learn_lock);
73634+ tmp = learn_buffer;
73635+ learn_buffer = NULL;
73636+ spin_unlock(&gr_learn_lock);
73637+ if (tmp)
73638+ vfree(tmp);
73639+ if (learn_buffer_user != NULL) {
73640+ vfree(learn_buffer_user);
73641+ learn_buffer_user = NULL;
73642+ }
73643+ learn_buffer_len = 0;
73644+ learn_buffer_user_len = 0;
73645+ gr_learn_attached = 0;
73646+ mutex_unlock(&gr_learn_user_mutex);
73647+ }
73648+
73649+ return 0;
73650+}
73651+
73652+const struct file_operations grsec_fops = {
73653+ .read = read_learn,
73654+ .write = write_grsec_handler,
73655+ .open = open_learn,
73656+ .release = close_learn,
73657+ .poll = poll_learn,
73658+};
73659diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73660new file mode 100644
73661index 0000000..fd26052
73662--- /dev/null
73663+++ b/grsecurity/gracl_policy.c
73664@@ -0,0 +1,1781 @@
73665+#include <linux/kernel.h>
73666+#include <linux/module.h>
73667+#include <linux/sched.h>
73668+#include <linux/mm.h>
73669+#include <linux/file.h>
73670+#include <linux/fs.h>
73671+#include <linux/namei.h>
73672+#include <linux/mount.h>
73673+#include <linux/tty.h>
73674+#include <linux/proc_fs.h>
73675+#include <linux/lglock.h>
73676+#include <linux/slab.h>
73677+#include <linux/vmalloc.h>
73678+#include <linux/types.h>
73679+#include <linux/sysctl.h>
73680+#include <linux/netdevice.h>
73681+#include <linux/ptrace.h>
73682+#include <linux/gracl.h>
73683+#include <linux/gralloc.h>
73684+#include <linux/security.h>
73685+#include <linux/grinternal.h>
73686+#include <linux/pid_namespace.h>
73687+#include <linux/stop_machine.h>
73688+#include <linux/fdtable.h>
73689+#include <linux/percpu.h>
73690+#include <linux/lglock.h>
73691+#include <linux/hugetlb.h>
73692+#include <linux/posix-timers.h>
73693+#include "../fs/mount.h"
73694+
73695+#include <asm/uaccess.h>
73696+#include <asm/errno.h>
73697+#include <asm/mman.h>
73698+
73699+extern struct gr_policy_state *polstate;
73700+
73701+#define FOR_EACH_ROLE_START(role) \
73702+ role = polstate->role_list; \
73703+ while (role) {
73704+
73705+#define FOR_EACH_ROLE_END(role) \
73706+ role = role->prev; \
73707+ }
73708+
73709+struct path gr_real_root;
73710+
73711+extern struct gr_alloc_state *current_alloc_state;
73712+
73713+u16 acl_sp_role_value;
73714+
73715+static DEFINE_MUTEX(gr_dev_mutex);
73716+
73717+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73718+extern void gr_clear_learn_entries(void);
73719+
73720+struct gr_arg *gr_usermode __read_only;
73721+unsigned char *gr_system_salt __read_only;
73722+unsigned char *gr_system_sum __read_only;
73723+
73724+static unsigned int gr_auth_attempts = 0;
73725+static unsigned long gr_auth_expires = 0UL;
73726+
73727+struct acl_object_label *fakefs_obj_rw;
73728+struct acl_object_label *fakefs_obj_rwx;
73729+
73730+extern int gr_init_uidset(void);
73731+extern void gr_free_uidset(void);
73732+extern void gr_remove_uid(uid_t uid);
73733+extern int gr_find_uid(uid_t uid);
73734+
73735+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename, int fallback);
73736+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73737+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73738+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73739+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73740+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73741+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73742+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73743+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73744+extern struct acl_subject_label *lookup_acl_subj_label(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73745+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const u64 ino, const dev_t dev, const struct acl_role_label *role);
73746+extern void assign_special_role(const char *rolename);
73747+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73748+extern int gr_rbac_disable(void *unused);
73749+extern void gr_enable_rbac_system(void);
73750+
73751+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73752+{
73753+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73754+ return -EFAULT;
73755+
73756+ return 0;
73757+}
73758+
73759+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73760+{
73761+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73762+ return -EFAULT;
73763+
73764+ return 0;
73765+}
73766+
73767+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73768+{
73769+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73770+ return -EFAULT;
73771+
73772+ return 0;
73773+}
73774+
73775+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73776+{
73777+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73778+ return -EFAULT;
73779+
73780+ return 0;
73781+}
73782+
73783+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73784+{
73785+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73786+ return -EFAULT;
73787+
73788+ return 0;
73789+}
73790+
73791+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73792+{
73793+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73794+ return -EFAULT;
73795+
73796+ return 0;
73797+}
73798+
73799+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73800+{
73801+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73802+ return -EFAULT;
73803+
73804+ return 0;
73805+}
73806+
73807+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73808+{
73809+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73810+ return -EFAULT;
73811+
73812+ return 0;
73813+}
73814+
73815+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73816+{
73817+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73818+ return -EFAULT;
73819+
73820+ return 0;
73821+}
73822+
73823+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73824+{
73825+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73826+ return -EFAULT;
73827+
73828+ if ((uwrap->version != GRSECURITY_VERSION) ||
73829+ (uwrap->size != sizeof(struct gr_arg)))
73830+ return -EINVAL;
73831+
73832+ return 0;
73833+}
73834+
73835+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73836+{
73837+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73838+ return -EFAULT;
73839+
73840+ return 0;
73841+}
73842+
73843+static size_t get_gr_arg_wrapper_size_normal(void)
73844+{
73845+ return sizeof(struct gr_arg_wrapper);
73846+}
73847+
73848+#ifdef CONFIG_COMPAT
73849+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
73850+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
73851+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
73852+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
73853+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
73854+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
73855+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
73856+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
73857+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
73858+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
73859+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
73860+extern size_t get_gr_arg_wrapper_size_compat(void);
73861+
73862+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
73863+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
73864+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
73865+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
73866+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
73867+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
73868+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
73869+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
73870+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
73871+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
73872+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
73873+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
73874+
73875+#else
73876+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
73877+#define copy_gr_arg copy_gr_arg_normal
73878+#define copy_gr_hash_struct copy_gr_hash_struct_normal
73879+#define copy_acl_object_label copy_acl_object_label_normal
73880+#define copy_acl_subject_label copy_acl_subject_label_normal
73881+#define copy_acl_role_label copy_acl_role_label_normal
73882+#define copy_acl_ip_label copy_acl_ip_label_normal
73883+#define copy_pointer_from_array copy_pointer_from_array_normal
73884+#define copy_sprole_pw copy_sprole_pw_normal
73885+#define copy_role_transition copy_role_transition_normal
73886+#define copy_role_allowed_ip copy_role_allowed_ip_normal
73887+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
73888+#endif
73889+
73890+static struct acl_subject_label *
73891+lookup_subject_map(const struct acl_subject_label *userp)
73892+{
73893+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
73894+ struct subject_map *match;
73895+
73896+ match = polstate->subj_map_set.s_hash[index];
73897+
73898+ while (match && match->user != userp)
73899+ match = match->next;
73900+
73901+ if (match != NULL)
73902+ return match->kernel;
73903+ else
73904+ return NULL;
73905+}
73906+
73907+static void
73908+insert_subj_map_entry(struct subject_map *subjmap)
73909+{
73910+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
73911+ struct subject_map **curr;
73912+
73913+ subjmap->prev = NULL;
73914+
73915+ curr = &polstate->subj_map_set.s_hash[index];
73916+ if (*curr != NULL)
73917+ (*curr)->prev = subjmap;
73918+
73919+ subjmap->next = *curr;
73920+ *curr = subjmap;
73921+
73922+ return;
73923+}
73924+
73925+static void
73926+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
73927+{
73928+ unsigned int index =
73929+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
73930+ struct acl_role_label **curr;
73931+ struct acl_role_label *tmp, *tmp2;
73932+
73933+ curr = &polstate->acl_role_set.r_hash[index];
73934+
73935+ /* simple case, slot is empty, just set it to our role */
73936+ if (*curr == NULL) {
73937+ *curr = role;
73938+ } else {
73939+ /* example:
73940+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
73941+ 2 -> 3
73942+ */
73943+ /* first check to see if we can already be reached via this slot */
73944+ tmp = *curr;
73945+ while (tmp && tmp != role)
73946+ tmp = tmp->next;
73947+ if (tmp == role) {
73948+ /* we don't need to add ourselves to this slot's chain */
73949+ return;
73950+ }
73951+ /* we need to add ourselves to this chain, two cases */
73952+ if (role->next == NULL) {
73953+ /* simple case, append the current chain to our role */
73954+ role->next = *curr;
73955+ *curr = role;
73956+ } else {
73957+ /* 1 -> 2 -> 3 -> 4
73958+ 2 -> 3 -> 4
73959+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
73960+ */
73961+ /* trickier case: walk our role's chain until we find
73962+ the role for the start of the current slot's chain */
73963+ tmp = role;
73964+ tmp2 = *curr;
73965+ while (tmp->next && tmp->next != tmp2)
73966+ tmp = tmp->next;
73967+ if (tmp->next == tmp2) {
73968+ /* from example above, we found 3, so just
73969+ replace this slot's chain with ours */
73970+ *curr = role;
73971+ } else {
73972+ /* we didn't find a subset of our role's chain
73973+ in the current slot's chain, so append their
73974+ chain to ours, and set us as the first role in
73975+ the slot's chain
73976+
73977+ we could fold this case with the case above,
73978+ but making it explicit for clarity
73979+ */
73980+ tmp->next = tmp2;
73981+ *curr = role;
73982+ }
73983+ }
73984+ }
73985+
73986+ return;
73987+}
73988+
73989+static void
73990+insert_acl_role_label(struct acl_role_label *role)
73991+{
73992+ int i;
73993+
73994+ if (polstate->role_list == NULL) {
73995+ polstate->role_list = role;
73996+ role->prev = NULL;
73997+ } else {
73998+ role->prev = polstate->role_list;
73999+ polstate->role_list = role;
74000+ }
74001+
74002+ /* used for hash chains */
74003+ role->next = NULL;
74004+
74005+ if (role->roletype & GR_ROLE_DOMAIN) {
74006+ for (i = 0; i < role->domain_child_num; i++)
74007+ __insert_acl_role_label(role, role->domain_children[i]);
74008+ } else
74009+ __insert_acl_role_label(role, role->uidgid);
74010+}
74011+
74012+static int
74013+insert_name_entry(char *name, const u64 inode, const dev_t device, __u8 deleted)
74014+{
74015+ struct name_entry **curr, *nentry;
74016+ struct inodev_entry *ientry;
74017+ unsigned int len = strlen(name);
74018+ unsigned int key = full_name_hash(name, len);
74019+ unsigned int index = key % polstate->name_set.n_size;
74020+
74021+ curr = &polstate->name_set.n_hash[index];
74022+
74023+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74024+ curr = &((*curr)->next);
74025+
74026+ if (*curr != NULL)
74027+ return 1;
74028+
74029+ nentry = acl_alloc(sizeof (struct name_entry));
74030+ if (nentry == NULL)
74031+ return 0;
74032+ ientry = acl_alloc(sizeof (struct inodev_entry));
74033+ if (ientry == NULL)
74034+ return 0;
74035+ ientry->nentry = nentry;
74036+
74037+ nentry->key = key;
74038+ nentry->name = name;
74039+ nentry->inode = inode;
74040+ nentry->device = device;
74041+ nentry->len = len;
74042+ nentry->deleted = deleted;
74043+
74044+ nentry->prev = NULL;
74045+ curr = &polstate->name_set.n_hash[index];
74046+ if (*curr != NULL)
74047+ (*curr)->prev = nentry;
74048+ nentry->next = *curr;
74049+ *curr = nentry;
74050+
74051+ /* insert us into the table searchable by inode/dev */
74052+ __insert_inodev_entry(polstate, ientry);
74053+
74054+ return 1;
74055+}
74056+
74057+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74058+
74059+static void *
74060+create_table(__u32 * len, int elementsize)
74061+{
74062+ unsigned int table_sizes[] = {
74063+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74064+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74065+ 4194301, 8388593, 16777213, 33554393, 67108859
74066+ };
74067+ void *newtable = NULL;
74068+ unsigned int pwr = 0;
74069+
74070+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74071+ table_sizes[pwr] <= *len)
74072+ pwr++;
74073+
74074+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74075+ return newtable;
74076+
74077+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74078+ newtable =
74079+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74080+ else
74081+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74082+
74083+ *len = table_sizes[pwr];
74084+
74085+ return newtable;
74086+}
74087+
74088+static int
74089+init_variables(const struct gr_arg *arg, bool reload)
74090+{
74091+ struct task_struct *reaper = init_pid_ns.child_reaper;
74092+ unsigned int stacksize;
74093+
74094+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74095+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74096+ polstate->name_set.n_size = arg->role_db.num_objects;
74097+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74098+
74099+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74100+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74101+ return 1;
74102+
74103+ if (!reload) {
74104+ if (!gr_init_uidset())
74105+ return 1;
74106+ }
74107+
74108+ /* set up the stack that holds allocation info */
74109+
74110+ stacksize = arg->role_db.num_pointers + 5;
74111+
74112+ if (!acl_alloc_stack_init(stacksize))
74113+ return 1;
74114+
74115+ if (!reload) {
74116+ /* grab reference for the real root dentry and vfsmount */
74117+ get_fs_root(reaper->fs, &gr_real_root);
74118+
74119+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74120+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74121+#endif
74122+
74123+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74124+ if (fakefs_obj_rw == NULL)
74125+ return 1;
74126+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74127+
74128+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74129+ if (fakefs_obj_rwx == NULL)
74130+ return 1;
74131+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74132+ }
74133+
74134+ polstate->subj_map_set.s_hash =
74135+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74136+ polstate->acl_role_set.r_hash =
74137+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74138+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74139+ polstate->inodev_set.i_hash =
74140+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74141+
74142+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74143+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74144+ return 1;
74145+
74146+ memset(polstate->subj_map_set.s_hash, 0,
74147+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74148+ memset(polstate->acl_role_set.r_hash, 0,
74149+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74150+ memset(polstate->name_set.n_hash, 0,
74151+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74152+ memset(polstate->inodev_set.i_hash, 0,
74153+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74154+
74155+ return 0;
74156+}
74157+
74158+/* free information not needed after startup
74159+ currently contains user->kernel pointer mappings for subjects
74160+*/
74161+
74162+static void
74163+free_init_variables(void)
74164+{
74165+ __u32 i;
74166+
74167+ if (polstate->subj_map_set.s_hash) {
74168+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74169+ if (polstate->subj_map_set.s_hash[i]) {
74170+ kfree(polstate->subj_map_set.s_hash[i]);
74171+ polstate->subj_map_set.s_hash[i] = NULL;
74172+ }
74173+ }
74174+
74175+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74176+ PAGE_SIZE)
74177+ kfree(polstate->subj_map_set.s_hash);
74178+ else
74179+ vfree(polstate->subj_map_set.s_hash);
74180+ }
74181+
74182+ return;
74183+}
74184+
74185+static void
74186+free_variables(bool reload)
74187+{
74188+ struct acl_subject_label *s;
74189+ struct acl_role_label *r;
74190+ struct task_struct *task, *task2;
74191+ unsigned int x;
74192+
74193+ if (!reload) {
74194+ gr_clear_learn_entries();
74195+
74196+ read_lock(&tasklist_lock);
74197+ do_each_thread(task2, task) {
74198+ task->acl_sp_role = 0;
74199+ task->acl_role_id = 0;
74200+ task->inherited = 0;
74201+ task->acl = NULL;
74202+ task->role = NULL;
74203+ } while_each_thread(task2, task);
74204+ read_unlock(&tasklist_lock);
74205+
74206+ kfree(fakefs_obj_rw);
74207+ fakefs_obj_rw = NULL;
74208+ kfree(fakefs_obj_rwx);
74209+ fakefs_obj_rwx = NULL;
74210+
74211+ /* release the reference to the real root dentry and vfsmount */
74212+ path_put(&gr_real_root);
74213+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74214+ }
74215+
74216+ /* free all object hash tables */
74217+
74218+ FOR_EACH_ROLE_START(r)
74219+ if (r->subj_hash == NULL)
74220+ goto next_role;
74221+ FOR_EACH_SUBJECT_START(r, s, x)
74222+ if (s->obj_hash == NULL)
74223+ break;
74224+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74225+ kfree(s->obj_hash);
74226+ else
74227+ vfree(s->obj_hash);
74228+ FOR_EACH_SUBJECT_END(s, x)
74229+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74230+ if (s->obj_hash == NULL)
74231+ break;
74232+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74233+ kfree(s->obj_hash);
74234+ else
74235+ vfree(s->obj_hash);
74236+ FOR_EACH_NESTED_SUBJECT_END(s)
74237+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74238+ kfree(r->subj_hash);
74239+ else
74240+ vfree(r->subj_hash);
74241+ r->subj_hash = NULL;
74242+next_role:
74243+ FOR_EACH_ROLE_END(r)
74244+
74245+ acl_free_all();
74246+
74247+ if (polstate->acl_role_set.r_hash) {
74248+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74249+ PAGE_SIZE)
74250+ kfree(polstate->acl_role_set.r_hash);
74251+ else
74252+ vfree(polstate->acl_role_set.r_hash);
74253+ }
74254+ if (polstate->name_set.n_hash) {
74255+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74256+ PAGE_SIZE)
74257+ kfree(polstate->name_set.n_hash);
74258+ else
74259+ vfree(polstate->name_set.n_hash);
74260+ }
74261+
74262+ if (polstate->inodev_set.i_hash) {
74263+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74264+ PAGE_SIZE)
74265+ kfree(polstate->inodev_set.i_hash);
74266+ else
74267+ vfree(polstate->inodev_set.i_hash);
74268+ }
74269+
74270+ if (!reload)
74271+ gr_free_uidset();
74272+
74273+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74274+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74275+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74276+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74277+
74278+ polstate->default_role = NULL;
74279+ polstate->kernel_role = NULL;
74280+ polstate->role_list = NULL;
74281+
74282+ return;
74283+}
74284+
74285+static struct acl_subject_label *
74286+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74287+
74288+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74289+{
74290+ unsigned int len = strnlen_user(*name, maxlen);
74291+ char *tmp;
74292+
74293+ if (!len || len >= maxlen)
74294+ return -EINVAL;
74295+
74296+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74297+ return -ENOMEM;
74298+
74299+ if (copy_from_user(tmp, *name, len))
74300+ return -EFAULT;
74301+
74302+ tmp[len-1] = '\0';
74303+ *name = tmp;
74304+
74305+ return 0;
74306+}
74307+
74308+static int
74309+copy_user_glob(struct acl_object_label *obj)
74310+{
74311+ struct acl_object_label *g_tmp, **guser;
74312+ int error;
74313+
74314+ if (obj->globbed == NULL)
74315+ return 0;
74316+
74317+ guser = &obj->globbed;
74318+ while (*guser) {
74319+ g_tmp = (struct acl_object_label *)
74320+ acl_alloc(sizeof (struct acl_object_label));
74321+ if (g_tmp == NULL)
74322+ return -ENOMEM;
74323+
74324+ if (copy_acl_object_label(g_tmp, *guser))
74325+ return -EFAULT;
74326+
74327+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74328+ if (error)
74329+ return error;
74330+
74331+ *guser = g_tmp;
74332+ guser = &(g_tmp->next);
74333+ }
74334+
74335+ return 0;
74336+}
74337+
74338+static int
74339+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74340+ struct acl_role_label *role)
74341+{
74342+ struct acl_object_label *o_tmp;
74343+ int ret;
74344+
74345+ while (userp) {
74346+ if ((o_tmp = (struct acl_object_label *)
74347+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74348+ return -ENOMEM;
74349+
74350+ if (copy_acl_object_label(o_tmp, userp))
74351+ return -EFAULT;
74352+
74353+ userp = o_tmp->prev;
74354+
74355+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74356+ if (ret)
74357+ return ret;
74358+
74359+ insert_acl_obj_label(o_tmp, subj);
74360+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74361+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74362+ return -ENOMEM;
74363+
74364+ ret = copy_user_glob(o_tmp);
74365+ if (ret)
74366+ return ret;
74367+
74368+ if (o_tmp->nested) {
74369+ int already_copied;
74370+
74371+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74372+ if (IS_ERR(o_tmp->nested))
74373+ return PTR_ERR(o_tmp->nested);
74374+
74375+ /* insert into nested subject list if we haven't copied this one yet
74376+ to prevent duplicate entries */
74377+ if (!already_copied) {
74378+ o_tmp->nested->next = role->hash->first;
74379+ role->hash->first = o_tmp->nested;
74380+ }
74381+ }
74382+ }
74383+
74384+ return 0;
74385+}
74386+
74387+static __u32
74388+count_user_subjs(struct acl_subject_label *userp)
74389+{
74390+ struct acl_subject_label s_tmp;
74391+ __u32 num = 0;
74392+
74393+ while (userp) {
74394+ if (copy_acl_subject_label(&s_tmp, userp))
74395+ break;
74396+
74397+ userp = s_tmp.prev;
74398+ }
74399+
74400+ return num;
74401+}
74402+
74403+static int
74404+copy_user_allowedips(struct acl_role_label *rolep)
74405+{
74406+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74407+
74408+ ruserip = rolep->allowed_ips;
74409+
74410+ while (ruserip) {
74411+ rlast = rtmp;
74412+
74413+ if ((rtmp = (struct role_allowed_ip *)
74414+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74415+ return -ENOMEM;
74416+
74417+ if (copy_role_allowed_ip(rtmp, ruserip))
74418+ return -EFAULT;
74419+
74420+ ruserip = rtmp->prev;
74421+
74422+ if (!rlast) {
74423+ rtmp->prev = NULL;
74424+ rolep->allowed_ips = rtmp;
74425+ } else {
74426+ rlast->next = rtmp;
74427+ rtmp->prev = rlast;
74428+ }
74429+
74430+ if (!ruserip)
74431+ rtmp->next = NULL;
74432+ }
74433+
74434+ return 0;
74435+}
74436+
74437+static int
74438+copy_user_transitions(struct acl_role_label *rolep)
74439+{
74440+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74441+ int error;
74442+
74443+ rusertp = rolep->transitions;
74444+
74445+ while (rusertp) {
74446+ rlast = rtmp;
74447+
74448+ if ((rtmp = (struct role_transition *)
74449+ acl_alloc(sizeof (struct role_transition))) == NULL)
74450+ return -ENOMEM;
74451+
74452+ if (copy_role_transition(rtmp, rusertp))
74453+ return -EFAULT;
74454+
74455+ rusertp = rtmp->prev;
74456+
74457+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74458+ if (error)
74459+ return error;
74460+
74461+ if (!rlast) {
74462+ rtmp->prev = NULL;
74463+ rolep->transitions = rtmp;
74464+ } else {
74465+ rlast->next = rtmp;
74466+ rtmp->prev = rlast;
74467+ }
74468+
74469+ if (!rusertp)
74470+ rtmp->next = NULL;
74471+ }
74472+
74473+ return 0;
74474+}
74475+
74476+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74477+{
74478+ struct acl_object_label o_tmp;
74479+ __u32 num = 0;
74480+
74481+ while (userp) {
74482+ if (copy_acl_object_label(&o_tmp, userp))
74483+ break;
74484+
74485+ userp = o_tmp.prev;
74486+ num++;
74487+ }
74488+
74489+ return num;
74490+}
74491+
74492+static struct acl_subject_label *
74493+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74494+{
74495+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74496+ __u32 num_objs;
74497+ struct acl_ip_label **i_tmp, *i_utmp2;
74498+ struct gr_hash_struct ghash;
74499+ struct subject_map *subjmap;
74500+ unsigned int i_num;
74501+ int err;
74502+
74503+ if (already_copied != NULL)
74504+ *already_copied = 0;
74505+
74506+ s_tmp = lookup_subject_map(userp);
74507+
74508+ /* we've already copied this subject into the kernel, just return
74509+ the reference to it, and don't copy it over again
74510+ */
74511+ if (s_tmp) {
74512+ if (already_copied != NULL)
74513+ *already_copied = 1;
74514+ return(s_tmp);
74515+ }
74516+
74517+ if ((s_tmp = (struct acl_subject_label *)
74518+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74519+ return ERR_PTR(-ENOMEM);
74520+
74521+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74522+ if (subjmap == NULL)
74523+ return ERR_PTR(-ENOMEM);
74524+
74525+ subjmap->user = userp;
74526+ subjmap->kernel = s_tmp;
74527+ insert_subj_map_entry(subjmap);
74528+
74529+ if (copy_acl_subject_label(s_tmp, userp))
74530+ return ERR_PTR(-EFAULT);
74531+
74532+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74533+ if (err)
74534+ return ERR_PTR(err);
74535+
74536+ if (!strcmp(s_tmp->filename, "/"))
74537+ role->root_label = s_tmp;
74538+
74539+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74540+ return ERR_PTR(-EFAULT);
74541+
74542+ /* copy user and group transition tables */
74543+
74544+ if (s_tmp->user_trans_num) {
74545+ uid_t *uidlist;
74546+
74547+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74548+ if (uidlist == NULL)
74549+ return ERR_PTR(-ENOMEM);
74550+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74551+ return ERR_PTR(-EFAULT);
74552+
74553+ s_tmp->user_transitions = uidlist;
74554+ }
74555+
74556+ if (s_tmp->group_trans_num) {
74557+ gid_t *gidlist;
74558+
74559+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74560+ if (gidlist == NULL)
74561+ return ERR_PTR(-ENOMEM);
74562+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74563+ return ERR_PTR(-EFAULT);
74564+
74565+ s_tmp->group_transitions = gidlist;
74566+ }
74567+
74568+ /* set up object hash table */
74569+ num_objs = count_user_objs(ghash.first);
74570+
74571+ s_tmp->obj_hash_size = num_objs;
74572+ s_tmp->obj_hash =
74573+ (struct acl_object_label **)
74574+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74575+
74576+ if (!s_tmp->obj_hash)
74577+ return ERR_PTR(-ENOMEM);
74578+
74579+ memset(s_tmp->obj_hash, 0,
74580+ s_tmp->obj_hash_size *
74581+ sizeof (struct acl_object_label *));
74582+
74583+ /* add in objects */
74584+ err = copy_user_objs(ghash.first, s_tmp, role);
74585+
74586+ if (err)
74587+ return ERR_PTR(err);
74588+
74589+ /* set pointer for parent subject */
74590+ if (s_tmp->parent_subject) {
74591+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74592+
74593+ if (IS_ERR(s_tmp2))
74594+ return s_tmp2;
74595+
74596+ s_tmp->parent_subject = s_tmp2;
74597+ }
74598+
74599+ /* add in ip acls */
74600+
74601+ if (!s_tmp->ip_num) {
74602+ s_tmp->ips = NULL;
74603+ goto insert;
74604+ }
74605+
74606+ i_tmp =
74607+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74608+ sizeof (struct acl_ip_label *));
74609+
74610+ if (!i_tmp)
74611+ return ERR_PTR(-ENOMEM);
74612+
74613+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74614+ *(i_tmp + i_num) =
74615+ (struct acl_ip_label *)
74616+ acl_alloc(sizeof (struct acl_ip_label));
74617+ if (!*(i_tmp + i_num))
74618+ return ERR_PTR(-ENOMEM);
74619+
74620+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74621+ return ERR_PTR(-EFAULT);
74622+
74623+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74624+ return ERR_PTR(-EFAULT);
74625+
74626+ if ((*(i_tmp + i_num))->iface == NULL)
74627+ continue;
74628+
74629+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74630+ if (err)
74631+ return ERR_PTR(err);
74632+ }
74633+
74634+ s_tmp->ips = i_tmp;
74635+
74636+insert:
74637+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74638+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74639+ return ERR_PTR(-ENOMEM);
74640+
74641+ return s_tmp;
74642+}
74643+
74644+static int
74645+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74646+{
74647+ struct acl_subject_label s_pre;
74648+ struct acl_subject_label * ret;
74649+ int err;
74650+
74651+ while (userp) {
74652+ if (copy_acl_subject_label(&s_pre, userp))
74653+ return -EFAULT;
74654+
74655+ ret = do_copy_user_subj(userp, role, NULL);
74656+
74657+ err = PTR_ERR(ret);
74658+ if (IS_ERR(ret))
74659+ return err;
74660+
74661+ insert_acl_subj_label(ret, role);
74662+
74663+ userp = s_pre.prev;
74664+ }
74665+
74666+ return 0;
74667+}
74668+
74669+static int
74670+copy_user_acl(struct gr_arg *arg)
74671+{
74672+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74673+ struct acl_subject_label *subj_list;
74674+ struct sprole_pw *sptmp;
74675+ struct gr_hash_struct *ghash;
74676+ uid_t *domainlist;
74677+ unsigned int r_num;
74678+ int err = 0;
74679+ __u16 i;
74680+ __u32 num_subjs;
74681+
74682+ /* we need a default and kernel role */
74683+ if (arg->role_db.num_roles < 2)
74684+ return -EINVAL;
74685+
74686+ /* copy special role authentication info from userspace */
74687+
74688+ polstate->num_sprole_pws = arg->num_sprole_pws;
74689+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74690+
74691+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74692+ return -ENOMEM;
74693+
74694+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74695+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74696+ if (!sptmp)
74697+ return -ENOMEM;
74698+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74699+ return -EFAULT;
74700+
74701+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74702+ if (err)
74703+ return err;
74704+
74705+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74706+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74707+#endif
74708+
74709+ polstate->acl_special_roles[i] = sptmp;
74710+ }
74711+
74712+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74713+
74714+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74715+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74716+
74717+ if (!r_tmp)
74718+ return -ENOMEM;
74719+
74720+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74721+ return -EFAULT;
74722+
74723+ if (copy_acl_role_label(r_tmp, r_utmp2))
74724+ return -EFAULT;
74725+
74726+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74727+ if (err)
74728+ return err;
74729+
74730+ if (!strcmp(r_tmp->rolename, "default")
74731+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74732+ polstate->default_role = r_tmp;
74733+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74734+ polstate->kernel_role = r_tmp;
74735+ }
74736+
74737+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74738+ return -ENOMEM;
74739+
74740+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74741+ return -EFAULT;
74742+
74743+ r_tmp->hash = ghash;
74744+
74745+ num_subjs = count_user_subjs(r_tmp->hash->first);
74746+
74747+ r_tmp->subj_hash_size = num_subjs;
74748+ r_tmp->subj_hash =
74749+ (struct acl_subject_label **)
74750+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74751+
74752+ if (!r_tmp->subj_hash)
74753+ return -ENOMEM;
74754+
74755+ err = copy_user_allowedips(r_tmp);
74756+ if (err)
74757+ return err;
74758+
74759+ /* copy domain info */
74760+ if (r_tmp->domain_children != NULL) {
74761+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74762+ if (domainlist == NULL)
74763+ return -ENOMEM;
74764+
74765+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74766+ return -EFAULT;
74767+
74768+ r_tmp->domain_children = domainlist;
74769+ }
74770+
74771+ err = copy_user_transitions(r_tmp);
74772+ if (err)
74773+ return err;
74774+
74775+ memset(r_tmp->subj_hash, 0,
74776+ r_tmp->subj_hash_size *
74777+ sizeof (struct acl_subject_label *));
74778+
74779+ /* acquire the list of subjects, then NULL out
74780+ the list prior to parsing the subjects for this role,
74781+ as during this parsing the list is replaced with a list
74782+ of *nested* subjects for the role
74783+ */
74784+ subj_list = r_tmp->hash->first;
74785+
74786+ /* set nested subject list to null */
74787+ r_tmp->hash->first = NULL;
74788+
74789+ err = copy_user_subjs(subj_list, r_tmp);
74790+
74791+ if (err)
74792+ return err;
74793+
74794+ insert_acl_role_label(r_tmp);
74795+ }
74796+
74797+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74798+ return -EINVAL;
74799+
74800+ return err;
74801+}
74802+
74803+static int gracl_reload_apply_policies(void *reload)
74804+{
74805+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74806+ struct task_struct *task, *task2;
74807+ struct acl_role_label *role, *rtmp;
74808+ struct acl_subject_label *subj;
74809+ const struct cred *cred;
74810+ int role_applied;
74811+ int ret = 0;
74812+
74813+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74814+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74815+
74816+ /* first make sure we'll be able to apply the new policy cleanly */
74817+ do_each_thread(task2, task) {
74818+ if (task->exec_file == NULL)
74819+ continue;
74820+ role_applied = 0;
74821+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74822+ /* preserve special roles */
74823+ FOR_EACH_ROLE_START(role)
74824+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74825+ rtmp = task->role;
74826+ task->role = role;
74827+ role_applied = 1;
74828+ break;
74829+ }
74830+ FOR_EACH_ROLE_END(role)
74831+ }
74832+ if (!role_applied) {
74833+ cred = __task_cred(task);
74834+ rtmp = task->role;
74835+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74836+ }
74837+ /* this handles non-nested inherited subjects, nested subjects will still
74838+ be dropped currently */
74839+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74840+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL, 1);
74841+ /* change the role back so that we've made no modifications to the policy */
74842+ task->role = rtmp;
74843+
74844+ if (subj == NULL || task->tmpacl == NULL) {
74845+ ret = -EINVAL;
74846+ goto out;
74847+ }
74848+ } while_each_thread(task2, task);
74849+
74850+ /* now actually apply the policy */
74851+
74852+ do_each_thread(task2, task) {
74853+ if (task->exec_file) {
74854+ role_applied = 0;
74855+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74856+ /* preserve special roles */
74857+ FOR_EACH_ROLE_START(role)
74858+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74859+ task->role = role;
74860+ role_applied = 1;
74861+ break;
74862+ }
74863+ FOR_EACH_ROLE_END(role)
74864+ }
74865+ if (!role_applied) {
74866+ cred = __task_cred(task);
74867+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74868+ }
74869+ /* this handles non-nested inherited subjects, nested subjects will still
74870+ be dropped currently */
74871+ if (!reload_state->oldmode && task->inherited)
74872+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename, 1);
74873+ else {
74874+ /* looked up and tagged to the task previously */
74875+ subj = task->tmpacl;
74876+ }
74877+ /* subj will be non-null */
74878+ __gr_apply_subject_to_task(polstate, task, subj);
74879+ if (reload_state->oldmode) {
74880+ task->acl_role_id = 0;
74881+ task->acl_sp_role = 0;
74882+ task->inherited = 0;
74883+ }
74884+ } else {
74885+ // it's a kernel process
74886+ task->role = polstate->kernel_role;
74887+ task->acl = polstate->kernel_role->root_label;
74888+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
74889+ task->acl->mode &= ~GR_PROCFIND;
74890+#endif
74891+ }
74892+ } while_each_thread(task2, task);
74893+
74894+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
74895+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
74896+
74897+out:
74898+
74899+ return ret;
74900+}
74901+
74902+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
74903+{
74904+ struct gr_reload_state new_reload_state = { };
74905+ int err;
74906+
74907+ new_reload_state.oldpolicy_ptr = polstate;
74908+ new_reload_state.oldalloc_ptr = current_alloc_state;
74909+ new_reload_state.oldmode = oldmode;
74910+
74911+ current_alloc_state = &new_reload_state.newalloc;
74912+ polstate = &new_reload_state.newpolicy;
74913+
74914+ /* everything relevant is now saved off, copy in the new policy */
74915+ if (init_variables(args, true)) {
74916+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74917+ err = -ENOMEM;
74918+ goto error;
74919+ }
74920+
74921+ err = copy_user_acl(args);
74922+ free_init_variables();
74923+ if (err)
74924+ goto error;
74925+ /* the new policy is copied in, with the old policy available via saved_state
74926+ first go through applying roles, making sure to preserve special roles
74927+ then apply new subjects, making sure to preserve inherited and nested subjects,
74928+ though currently only inherited subjects will be preserved
74929+ */
74930+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
74931+ if (err)
74932+ goto error;
74933+
74934+ /* we've now applied the new policy, so restore the old policy state to free it */
74935+ polstate = &new_reload_state.oldpolicy;
74936+ current_alloc_state = &new_reload_state.oldalloc;
74937+ free_variables(true);
74938+
74939+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
74940+ to running_polstate/current_alloc_state inside stop_machine
74941+ */
74942+ err = 0;
74943+ goto out;
74944+error:
74945+ /* on error of loading the new policy, we'll just keep the previous
74946+ policy set around
74947+ */
74948+ free_variables(true);
74949+
74950+ /* doesn't affect runtime, but maintains consistent state */
74951+out:
74952+ polstate = new_reload_state.oldpolicy_ptr;
74953+ current_alloc_state = new_reload_state.oldalloc_ptr;
74954+
74955+ return err;
74956+}
74957+
74958+static int
74959+gracl_init(struct gr_arg *args)
74960+{
74961+ int error = 0;
74962+
74963+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
74964+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
74965+
74966+ if (init_variables(args, false)) {
74967+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
74968+ error = -ENOMEM;
74969+ goto out;
74970+ }
74971+
74972+ error = copy_user_acl(args);
74973+ free_init_variables();
74974+ if (error)
74975+ goto out;
74976+
74977+ error = gr_set_acls(0);
74978+ if (error)
74979+ goto out;
74980+
74981+ gr_enable_rbac_system();
74982+
74983+ return 0;
74984+
74985+out:
74986+ free_variables(false);
74987+ return error;
74988+}
74989+
74990+static int
74991+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
74992+ unsigned char **sum)
74993+{
74994+ struct acl_role_label *r;
74995+ struct role_allowed_ip *ipp;
74996+ struct role_transition *trans;
74997+ unsigned int i;
74998+ int found = 0;
74999+ u32 curr_ip = current->signal->curr_ip;
75000+
75001+ current->signal->saved_ip = curr_ip;
75002+
75003+ /* check transition table */
75004+
75005+ for (trans = current->role->transitions; trans; trans = trans->next) {
75006+ if (!strcmp(rolename, trans->rolename)) {
75007+ found = 1;
75008+ break;
75009+ }
75010+ }
75011+
75012+ if (!found)
75013+ return 0;
75014+
75015+ /* handle special roles that do not require authentication
75016+ and check ip */
75017+
75018+ FOR_EACH_ROLE_START(r)
75019+ if (!strcmp(rolename, r->rolename) &&
75020+ (r->roletype & GR_ROLE_SPECIAL)) {
75021+ found = 0;
75022+ if (r->allowed_ips != NULL) {
75023+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75024+ if ((ntohl(curr_ip) & ipp->netmask) ==
75025+ (ntohl(ipp->addr) & ipp->netmask))
75026+ found = 1;
75027+ }
75028+ } else
75029+ found = 2;
75030+ if (!found)
75031+ return 0;
75032+
75033+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75034+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75035+ *salt = NULL;
75036+ *sum = NULL;
75037+ return 1;
75038+ }
75039+ }
75040+ FOR_EACH_ROLE_END(r)
75041+
75042+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75043+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75044+ *salt = polstate->acl_special_roles[i]->salt;
75045+ *sum = polstate->acl_special_roles[i]->sum;
75046+ return 1;
75047+ }
75048+ }
75049+
75050+ return 0;
75051+}
75052+
75053+int gr_check_secure_terminal(struct task_struct *task)
75054+{
75055+ struct task_struct *p, *p2, *p3;
75056+ struct files_struct *files;
75057+ struct fdtable *fdt;
75058+ struct file *our_file = NULL, *file;
75059+ int i;
75060+
75061+ if (task->signal->tty == NULL)
75062+ return 1;
75063+
75064+ files = get_files_struct(task);
75065+ if (files != NULL) {
75066+ rcu_read_lock();
75067+ fdt = files_fdtable(files);
75068+ for (i=0; i < fdt->max_fds; i++) {
75069+ file = fcheck_files(files, i);
75070+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75071+ get_file(file);
75072+ our_file = file;
75073+ }
75074+ }
75075+ rcu_read_unlock();
75076+ put_files_struct(files);
75077+ }
75078+
75079+ if (our_file == NULL)
75080+ return 1;
75081+
75082+ read_lock(&tasklist_lock);
75083+ do_each_thread(p2, p) {
75084+ files = get_files_struct(p);
75085+ if (files == NULL ||
75086+ (p->signal && p->signal->tty == task->signal->tty)) {
75087+ if (files != NULL)
75088+ put_files_struct(files);
75089+ continue;
75090+ }
75091+ rcu_read_lock();
75092+ fdt = files_fdtable(files);
75093+ for (i=0; i < fdt->max_fds; i++) {
75094+ file = fcheck_files(files, i);
75095+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75096+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75097+ p3 = task;
75098+ while (task_pid_nr(p3) > 0) {
75099+ if (p3 == p)
75100+ break;
75101+ p3 = p3->real_parent;
75102+ }
75103+ if (p3 == p)
75104+ break;
75105+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75106+ gr_handle_alertkill(p);
75107+ rcu_read_unlock();
75108+ put_files_struct(files);
75109+ read_unlock(&tasklist_lock);
75110+ fput(our_file);
75111+ return 0;
75112+ }
75113+ }
75114+ rcu_read_unlock();
75115+ put_files_struct(files);
75116+ } while_each_thread(p2, p);
75117+ read_unlock(&tasklist_lock);
75118+
75119+ fput(our_file);
75120+ return 1;
75121+}
75122+
75123+ssize_t
75124+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75125+{
75126+ struct gr_arg_wrapper uwrap;
75127+ unsigned char *sprole_salt = NULL;
75128+ unsigned char *sprole_sum = NULL;
75129+ int error = 0;
75130+ int error2 = 0;
75131+ size_t req_count = 0;
75132+ unsigned char oldmode = 0;
75133+
75134+ mutex_lock(&gr_dev_mutex);
75135+
75136+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75137+ error = -EPERM;
75138+ goto out;
75139+ }
75140+
75141+#ifdef CONFIG_COMPAT
75142+ pax_open_kernel();
75143+ if (is_compat_task()) {
75144+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75145+ copy_gr_arg = &copy_gr_arg_compat;
75146+ copy_acl_object_label = &copy_acl_object_label_compat;
75147+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75148+ copy_acl_role_label = &copy_acl_role_label_compat;
75149+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75150+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75151+ copy_role_transition = &copy_role_transition_compat;
75152+ copy_sprole_pw = &copy_sprole_pw_compat;
75153+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75154+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75155+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75156+ } else {
75157+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75158+ copy_gr_arg = &copy_gr_arg_normal;
75159+ copy_acl_object_label = &copy_acl_object_label_normal;
75160+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75161+ copy_acl_role_label = &copy_acl_role_label_normal;
75162+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75163+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75164+ copy_role_transition = &copy_role_transition_normal;
75165+ copy_sprole_pw = &copy_sprole_pw_normal;
75166+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75167+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75168+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75169+ }
75170+ pax_close_kernel();
75171+#endif
75172+
75173+ req_count = get_gr_arg_wrapper_size();
75174+
75175+ if (count != req_count) {
75176+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75177+ error = -EINVAL;
75178+ goto out;
75179+ }
75180+
75181+
75182+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75183+ gr_auth_expires = 0;
75184+ gr_auth_attempts = 0;
75185+ }
75186+
75187+ error = copy_gr_arg_wrapper(buf, &uwrap);
75188+ if (error)
75189+ goto out;
75190+
75191+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75192+ if (error)
75193+ goto out;
75194+
75195+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75196+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75197+ time_after(gr_auth_expires, get_seconds())) {
75198+ error = -EBUSY;
75199+ goto out;
75200+ }
75201+
75202+ /* if non-root trying to do anything other than use a special role,
75203+ do not attempt authentication, do not count towards authentication
75204+ locking
75205+ */
75206+
75207+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75208+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75209+ gr_is_global_nonroot(current_uid())) {
75210+ error = -EPERM;
75211+ goto out;
75212+ }
75213+
75214+ /* ensure pw and special role name are null terminated */
75215+
75216+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75217+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75218+
75219+ /* Okay.
75220+ * We have our enough of the argument structure..(we have yet
75221+ * to copy_from_user the tables themselves) . Copy the tables
75222+ * only if we need them, i.e. for loading operations. */
75223+
75224+ switch (gr_usermode->mode) {
75225+ case GR_STATUS:
75226+ if (gr_acl_is_enabled()) {
75227+ error = 1;
75228+ if (!gr_check_secure_terminal(current))
75229+ error = 3;
75230+ } else
75231+ error = 2;
75232+ goto out;
75233+ case GR_SHUTDOWN:
75234+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75235+ stop_machine(gr_rbac_disable, NULL, NULL);
75236+ free_variables(false);
75237+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75238+ memset(gr_system_salt, 0, GR_SALT_LEN);
75239+ memset(gr_system_sum, 0, GR_SHA_LEN);
75240+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75241+ } else if (gr_acl_is_enabled()) {
75242+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75243+ error = -EPERM;
75244+ } else {
75245+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75246+ error = -EAGAIN;
75247+ }
75248+ break;
75249+ case GR_ENABLE:
75250+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75251+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75252+ else {
75253+ if (gr_acl_is_enabled())
75254+ error = -EAGAIN;
75255+ else
75256+ error = error2;
75257+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75258+ }
75259+ break;
75260+ case GR_OLDRELOAD:
75261+ oldmode = 1;
75262+ case GR_RELOAD:
75263+ if (!gr_acl_is_enabled()) {
75264+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75265+ error = -EAGAIN;
75266+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75267+ error2 = gracl_reload(gr_usermode, oldmode);
75268+ if (!error2)
75269+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75270+ else {
75271+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75272+ error = error2;
75273+ }
75274+ } else {
75275+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75276+ error = -EPERM;
75277+ }
75278+ break;
75279+ case GR_SEGVMOD:
75280+ if (unlikely(!gr_acl_is_enabled())) {
75281+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75282+ error = -EAGAIN;
75283+ break;
75284+ }
75285+
75286+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75287+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75288+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75289+ struct acl_subject_label *segvacl;
75290+ segvacl =
75291+ lookup_acl_subj_label(gr_usermode->segv_inode,
75292+ gr_usermode->segv_device,
75293+ current->role);
75294+ if (segvacl) {
75295+ segvacl->crashes = 0;
75296+ segvacl->expires = 0;
75297+ }
75298+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75299+ gr_remove_uid(gr_usermode->segv_uid);
75300+ }
75301+ } else {
75302+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75303+ error = -EPERM;
75304+ }
75305+ break;
75306+ case GR_SPROLE:
75307+ case GR_SPROLEPAM:
75308+ if (unlikely(!gr_acl_is_enabled())) {
75309+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75310+ error = -EAGAIN;
75311+ break;
75312+ }
75313+
75314+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75315+ current->role->expires = 0;
75316+ current->role->auth_attempts = 0;
75317+ }
75318+
75319+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75320+ time_after(current->role->expires, get_seconds())) {
75321+ error = -EBUSY;
75322+ goto out;
75323+ }
75324+
75325+ if (lookup_special_role_auth
75326+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75327+ && ((!sprole_salt && !sprole_sum)
75328+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75329+ char *p = "";
75330+ assign_special_role(gr_usermode->sp_role);
75331+ read_lock(&tasklist_lock);
75332+ if (current->real_parent)
75333+ p = current->real_parent->role->rolename;
75334+ read_unlock(&tasklist_lock);
75335+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75336+ p, acl_sp_role_value);
75337+ } else {
75338+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75339+ error = -EPERM;
75340+ if(!(current->role->auth_attempts++))
75341+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75342+
75343+ goto out;
75344+ }
75345+ break;
75346+ case GR_UNSPROLE:
75347+ if (unlikely(!gr_acl_is_enabled())) {
75348+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75349+ error = -EAGAIN;
75350+ break;
75351+ }
75352+
75353+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75354+ char *p = "";
75355+ int i = 0;
75356+
75357+ read_lock(&tasklist_lock);
75358+ if (current->real_parent) {
75359+ p = current->real_parent->role->rolename;
75360+ i = current->real_parent->acl_role_id;
75361+ }
75362+ read_unlock(&tasklist_lock);
75363+
75364+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75365+ gr_set_acls(1);
75366+ } else {
75367+ error = -EPERM;
75368+ goto out;
75369+ }
75370+ break;
75371+ default:
75372+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75373+ error = -EINVAL;
75374+ break;
75375+ }
75376+
75377+ if (error != -EPERM)
75378+ goto out;
75379+
75380+ if(!(gr_auth_attempts++))
75381+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75382+
75383+ out:
75384+ mutex_unlock(&gr_dev_mutex);
75385+
75386+ if (!error)
75387+ error = req_count;
75388+
75389+ return error;
75390+}
75391+
75392+int
75393+gr_set_acls(const int type)
75394+{
75395+ struct task_struct *task, *task2;
75396+ struct acl_role_label *role = current->role;
75397+ struct acl_subject_label *subj;
75398+ __u16 acl_role_id = current->acl_role_id;
75399+ const struct cred *cred;
75400+ int ret;
75401+
75402+ rcu_read_lock();
75403+ read_lock(&tasklist_lock);
75404+ read_lock(&grsec_exec_file_lock);
75405+ do_each_thread(task2, task) {
75406+ /* check to see if we're called from the exit handler,
75407+ if so, only replace ACLs that have inherited the admin
75408+ ACL */
75409+
75410+ if (type && (task->role != role ||
75411+ task->acl_role_id != acl_role_id))
75412+ continue;
75413+
75414+ task->acl_role_id = 0;
75415+ task->acl_sp_role = 0;
75416+ task->inherited = 0;
75417+
75418+ if (task->exec_file) {
75419+ cred = __task_cred(task);
75420+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75421+ subj = __gr_get_subject_for_task(polstate, task, NULL, 1);
75422+ if (subj == NULL) {
75423+ ret = -EINVAL;
75424+ read_unlock(&grsec_exec_file_lock);
75425+ read_unlock(&tasklist_lock);
75426+ rcu_read_unlock();
75427+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75428+ return ret;
75429+ }
75430+ __gr_apply_subject_to_task(polstate, task, subj);
75431+ } else {
75432+ // it's a kernel process
75433+ task->role = polstate->kernel_role;
75434+ task->acl = polstate->kernel_role->root_label;
75435+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75436+ task->acl->mode &= ~GR_PROCFIND;
75437+#endif
75438+ }
75439+ } while_each_thread(task2, task);
75440+ read_unlock(&grsec_exec_file_lock);
75441+ read_unlock(&tasklist_lock);
75442+ rcu_read_unlock();
75443+
75444+ return 0;
75445+}
75446diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75447new file mode 100644
75448index 0000000..39645c9
75449--- /dev/null
75450+++ b/grsecurity/gracl_res.c
75451@@ -0,0 +1,68 @@
75452+#include <linux/kernel.h>
75453+#include <linux/sched.h>
75454+#include <linux/gracl.h>
75455+#include <linux/grinternal.h>
75456+
75457+static const char *restab_log[] = {
75458+ [RLIMIT_CPU] = "RLIMIT_CPU",
75459+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75460+ [RLIMIT_DATA] = "RLIMIT_DATA",
75461+ [RLIMIT_STACK] = "RLIMIT_STACK",
75462+ [RLIMIT_CORE] = "RLIMIT_CORE",
75463+ [RLIMIT_RSS] = "RLIMIT_RSS",
75464+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75465+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75466+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75467+ [RLIMIT_AS] = "RLIMIT_AS",
75468+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75469+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75470+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75471+ [RLIMIT_NICE] = "RLIMIT_NICE",
75472+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75473+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75474+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75475+};
75476+
75477+void
75478+gr_log_resource(const struct task_struct *task,
75479+ const int res, const unsigned long wanted, const int gt)
75480+{
75481+ const struct cred *cred;
75482+ unsigned long rlim;
75483+
75484+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75485+ return;
75486+
75487+ // not yet supported resource
75488+ if (unlikely(!restab_log[res]))
75489+ return;
75490+
75491+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75492+ rlim = task_rlimit_max(task, res);
75493+ else
75494+ rlim = task_rlimit(task, res);
75495+
75496+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75497+ return;
75498+
75499+ rcu_read_lock();
75500+ cred = __task_cred(task);
75501+
75502+ if (res == RLIMIT_NPROC &&
75503+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75504+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75505+ goto out_rcu_unlock;
75506+ else if (res == RLIMIT_MEMLOCK &&
75507+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75508+ goto out_rcu_unlock;
75509+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75510+ goto out_rcu_unlock;
75511+ rcu_read_unlock();
75512+
75513+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75514+
75515+ return;
75516+out_rcu_unlock:
75517+ rcu_read_unlock();
75518+ return;
75519+}
75520diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75521new file mode 100644
75522index 0000000..218b66b
75523--- /dev/null
75524+++ b/grsecurity/gracl_segv.c
75525@@ -0,0 +1,324 @@
75526+#include <linux/kernel.h>
75527+#include <linux/mm.h>
75528+#include <asm/uaccess.h>
75529+#include <asm/errno.h>
75530+#include <asm/mman.h>
75531+#include <net/sock.h>
75532+#include <linux/file.h>
75533+#include <linux/fs.h>
75534+#include <linux/net.h>
75535+#include <linux/in.h>
75536+#include <linux/slab.h>
75537+#include <linux/types.h>
75538+#include <linux/sched.h>
75539+#include <linux/timer.h>
75540+#include <linux/gracl.h>
75541+#include <linux/grsecurity.h>
75542+#include <linux/grinternal.h>
75543+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75544+#include <linux/magic.h>
75545+#include <linux/pagemap.h>
75546+#include "../fs/btrfs/async-thread.h"
75547+#include "../fs/btrfs/ctree.h"
75548+#include "../fs/btrfs/btrfs_inode.h"
75549+#endif
75550+
75551+static struct crash_uid *uid_set;
75552+static unsigned short uid_used;
75553+static DEFINE_SPINLOCK(gr_uid_lock);
75554+extern rwlock_t gr_inode_lock;
75555+extern struct acl_subject_label *
75556+ lookup_acl_subj_label(const u64 inode, const dev_t dev,
75557+ struct acl_role_label *role);
75558+
75559+static inline dev_t __get_dev(const struct dentry *dentry)
75560+{
75561+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75562+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75563+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75564+ else
75565+#endif
75566+ return dentry->d_sb->s_dev;
75567+}
75568+
75569+static inline u64 __get_ino(const struct dentry *dentry)
75570+{
75571+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75572+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75573+ return btrfs_ino(dentry->d_inode);
75574+ else
75575+#endif
75576+ return dentry->d_inode->i_ino;
75577+}
75578+
75579+int
75580+gr_init_uidset(void)
75581+{
75582+ uid_set =
75583+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75584+ uid_used = 0;
75585+
75586+ return uid_set ? 1 : 0;
75587+}
75588+
75589+void
75590+gr_free_uidset(void)
75591+{
75592+ if (uid_set) {
75593+ struct crash_uid *tmpset;
75594+ spin_lock(&gr_uid_lock);
75595+ tmpset = uid_set;
75596+ uid_set = NULL;
75597+ uid_used = 0;
75598+ spin_unlock(&gr_uid_lock);
75599+ if (tmpset)
75600+ kfree(tmpset);
75601+ }
75602+
75603+ return;
75604+}
75605+
75606+int
75607+gr_find_uid(const uid_t uid)
75608+{
75609+ struct crash_uid *tmp = uid_set;
75610+ uid_t buid;
75611+ int low = 0, high = uid_used - 1, mid;
75612+
75613+ while (high >= low) {
75614+ mid = (low + high) >> 1;
75615+ buid = tmp[mid].uid;
75616+ if (buid == uid)
75617+ return mid;
75618+ if (buid > uid)
75619+ high = mid - 1;
75620+ if (buid < uid)
75621+ low = mid + 1;
75622+ }
75623+
75624+ return -1;
75625+}
75626+
75627+static __inline__ void
75628+gr_insertsort(void)
75629+{
75630+ unsigned short i, j;
75631+ struct crash_uid index;
75632+
75633+ for (i = 1; i < uid_used; i++) {
75634+ index = uid_set[i];
75635+ j = i;
75636+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75637+ uid_set[j] = uid_set[j - 1];
75638+ j--;
75639+ }
75640+ uid_set[j] = index;
75641+ }
75642+
75643+ return;
75644+}
75645+
75646+static __inline__ void
75647+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75648+{
75649+ int loc;
75650+ uid_t uid = GR_GLOBAL_UID(kuid);
75651+
75652+ if (uid_used == GR_UIDTABLE_MAX)
75653+ return;
75654+
75655+ loc = gr_find_uid(uid);
75656+
75657+ if (loc >= 0) {
75658+ uid_set[loc].expires = expires;
75659+ return;
75660+ }
75661+
75662+ uid_set[uid_used].uid = uid;
75663+ uid_set[uid_used].expires = expires;
75664+ uid_used++;
75665+
75666+ gr_insertsort();
75667+
75668+ return;
75669+}
75670+
75671+void
75672+gr_remove_uid(const unsigned short loc)
75673+{
75674+ unsigned short i;
75675+
75676+ for (i = loc + 1; i < uid_used; i++)
75677+ uid_set[i - 1] = uid_set[i];
75678+
75679+ uid_used--;
75680+
75681+ return;
75682+}
75683+
75684+int
75685+gr_check_crash_uid(const kuid_t kuid)
75686+{
75687+ int loc;
75688+ int ret = 0;
75689+ uid_t uid;
75690+
75691+ if (unlikely(!gr_acl_is_enabled()))
75692+ return 0;
75693+
75694+ uid = GR_GLOBAL_UID(kuid);
75695+
75696+ spin_lock(&gr_uid_lock);
75697+ loc = gr_find_uid(uid);
75698+
75699+ if (loc < 0)
75700+ goto out_unlock;
75701+
75702+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75703+ gr_remove_uid(loc);
75704+ else
75705+ ret = 1;
75706+
75707+out_unlock:
75708+ spin_unlock(&gr_uid_lock);
75709+ return ret;
75710+}
75711+
75712+static __inline__ int
75713+proc_is_setxid(const struct cred *cred)
75714+{
75715+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75716+ !uid_eq(cred->uid, cred->fsuid))
75717+ return 1;
75718+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75719+ !gid_eq(cred->gid, cred->fsgid))
75720+ return 1;
75721+
75722+ return 0;
75723+}
75724+
75725+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75726+
75727+void
75728+gr_handle_crash(struct task_struct *task, const int sig)
75729+{
75730+ struct acl_subject_label *curr;
75731+ struct task_struct *tsk, *tsk2;
75732+ const struct cred *cred;
75733+ const struct cred *cred2;
75734+
75735+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75736+ return;
75737+
75738+ if (unlikely(!gr_acl_is_enabled()))
75739+ return;
75740+
75741+ curr = task->acl;
75742+
75743+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75744+ return;
75745+
75746+ if (time_before_eq(curr->expires, get_seconds())) {
75747+ curr->expires = 0;
75748+ curr->crashes = 0;
75749+ }
75750+
75751+ curr->crashes++;
75752+
75753+ if (!curr->expires)
75754+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75755+
75756+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75757+ time_after(curr->expires, get_seconds())) {
75758+ rcu_read_lock();
75759+ cred = __task_cred(task);
75760+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75761+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75762+ spin_lock(&gr_uid_lock);
75763+ gr_insert_uid(cred->uid, curr->expires);
75764+ spin_unlock(&gr_uid_lock);
75765+ curr->expires = 0;
75766+ curr->crashes = 0;
75767+ read_lock(&tasklist_lock);
75768+ do_each_thread(tsk2, tsk) {
75769+ cred2 = __task_cred(tsk);
75770+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75771+ gr_fake_force_sig(SIGKILL, tsk);
75772+ } while_each_thread(tsk2, tsk);
75773+ read_unlock(&tasklist_lock);
75774+ } else {
75775+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75776+ read_lock(&tasklist_lock);
75777+ read_lock(&grsec_exec_file_lock);
75778+ do_each_thread(tsk2, tsk) {
75779+ if (likely(tsk != task)) {
75780+ // if this thread has the same subject as the one that triggered
75781+ // RES_CRASH and it's the same binary, kill it
75782+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75783+ gr_fake_force_sig(SIGKILL, tsk);
75784+ }
75785+ } while_each_thread(tsk2, tsk);
75786+ read_unlock(&grsec_exec_file_lock);
75787+ read_unlock(&tasklist_lock);
75788+ }
75789+ rcu_read_unlock();
75790+ }
75791+
75792+ return;
75793+}
75794+
75795+int
75796+gr_check_crash_exec(const struct file *filp)
75797+{
75798+ struct acl_subject_label *curr;
75799+ struct dentry *dentry;
75800+
75801+ if (unlikely(!gr_acl_is_enabled()))
75802+ return 0;
75803+
75804+ read_lock(&gr_inode_lock);
75805+ dentry = filp->f_path.dentry;
75806+ curr = lookup_acl_subj_label(__get_ino(dentry), __get_dev(dentry),
75807+ current->role);
75808+ read_unlock(&gr_inode_lock);
75809+
75810+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75811+ (!curr->crashes && !curr->expires))
75812+ return 0;
75813+
75814+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75815+ time_after(curr->expires, get_seconds()))
75816+ return 1;
75817+ else if (time_before_eq(curr->expires, get_seconds())) {
75818+ curr->crashes = 0;
75819+ curr->expires = 0;
75820+ }
75821+
75822+ return 0;
75823+}
75824+
75825+void
75826+gr_handle_alertkill(struct task_struct *task)
75827+{
75828+ struct acl_subject_label *curracl;
75829+ __u32 curr_ip;
75830+ struct task_struct *p, *p2;
75831+
75832+ if (unlikely(!gr_acl_is_enabled()))
75833+ return;
75834+
75835+ curracl = task->acl;
75836+ curr_ip = task->signal->curr_ip;
75837+
75838+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75839+ read_lock(&tasklist_lock);
75840+ do_each_thread(p2, p) {
75841+ if (p->signal->curr_ip == curr_ip)
75842+ gr_fake_force_sig(SIGKILL, p);
75843+ } while_each_thread(p2, p);
75844+ read_unlock(&tasklist_lock);
75845+ } else if (curracl->mode & GR_KILLPROC)
75846+ gr_fake_force_sig(SIGKILL, task);
75847+
75848+ return;
75849+}
75850diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75851new file mode 100644
75852index 0000000..6b0c9cc
75853--- /dev/null
75854+++ b/grsecurity/gracl_shm.c
75855@@ -0,0 +1,40 @@
75856+#include <linux/kernel.h>
75857+#include <linux/mm.h>
75858+#include <linux/sched.h>
75859+#include <linux/file.h>
75860+#include <linux/ipc.h>
75861+#include <linux/gracl.h>
75862+#include <linux/grsecurity.h>
75863+#include <linux/grinternal.h>
75864+
75865+int
75866+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
75867+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
75868+{
75869+ struct task_struct *task;
75870+
75871+ if (!gr_acl_is_enabled())
75872+ return 1;
75873+
75874+ rcu_read_lock();
75875+ read_lock(&tasklist_lock);
75876+
75877+ task = find_task_by_vpid(shm_cprid);
75878+
75879+ if (unlikely(!task))
75880+ task = find_task_by_vpid(shm_lapid);
75881+
75882+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
75883+ (task_pid_nr(task) == shm_lapid)) &&
75884+ (task->acl->mode & GR_PROTSHM) &&
75885+ (task->acl != current->acl))) {
75886+ read_unlock(&tasklist_lock);
75887+ rcu_read_unlock();
75888+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
75889+ return 0;
75890+ }
75891+ read_unlock(&tasklist_lock);
75892+ rcu_read_unlock();
75893+
75894+ return 1;
75895+}
75896diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
75897new file mode 100644
75898index 0000000..bc0be01
75899--- /dev/null
75900+++ b/grsecurity/grsec_chdir.c
75901@@ -0,0 +1,19 @@
75902+#include <linux/kernel.h>
75903+#include <linux/sched.h>
75904+#include <linux/fs.h>
75905+#include <linux/file.h>
75906+#include <linux/grsecurity.h>
75907+#include <linux/grinternal.h>
75908+
75909+void
75910+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
75911+{
75912+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
75913+ if ((grsec_enable_chdir && grsec_enable_group &&
75914+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
75915+ !grsec_enable_group)) {
75916+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
75917+ }
75918+#endif
75919+ return;
75920+}
75921diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
75922new file mode 100644
75923index 0000000..114ea4f
75924--- /dev/null
75925+++ b/grsecurity/grsec_chroot.c
75926@@ -0,0 +1,467 @@
75927+#include <linux/kernel.h>
75928+#include <linux/module.h>
75929+#include <linux/sched.h>
75930+#include <linux/file.h>
75931+#include <linux/fs.h>
75932+#include <linux/mount.h>
75933+#include <linux/types.h>
75934+#include "../fs/mount.h"
75935+#include <linux/grsecurity.h>
75936+#include <linux/grinternal.h>
75937+
75938+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
75939+int gr_init_ran;
75940+#endif
75941+
75942+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75943+{
75944+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75945+ struct dentry *tmpd = dentry;
75946+
75947+ read_seqlock_excl(&mount_lock);
75948+ write_seqlock(&rename_lock);
75949+
75950+ while (tmpd != mnt->mnt_root) {
75951+ atomic_inc(&tmpd->chroot_refcnt);
75952+ tmpd = tmpd->d_parent;
75953+ }
75954+ atomic_inc(&tmpd->chroot_refcnt);
75955+
75956+ write_sequnlock(&rename_lock);
75957+ read_sequnlock_excl(&mount_lock);
75958+#endif
75959+}
75960+
75961+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt)
75962+{
75963+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75964+ struct dentry *tmpd = dentry;
75965+
75966+ read_seqlock_excl(&mount_lock);
75967+ write_seqlock(&rename_lock);
75968+
75969+ while (tmpd != mnt->mnt_root) {
75970+ atomic_dec(&tmpd->chroot_refcnt);
75971+ tmpd = tmpd->d_parent;
75972+ }
75973+ atomic_dec(&tmpd->chroot_refcnt);
75974+
75975+ write_sequnlock(&rename_lock);
75976+ read_sequnlock_excl(&mount_lock);
75977+#endif
75978+}
75979+
75980+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
75981+static struct dentry *get_closest_chroot(struct dentry *dentry)
75982+{
75983+ write_seqlock(&rename_lock);
75984+ do {
75985+ if (atomic_read(&dentry->chroot_refcnt)) {
75986+ write_sequnlock(&rename_lock);
75987+ return dentry;
75988+ }
75989+ dentry = dentry->d_parent;
75990+ } while (!IS_ROOT(dentry));
75991+ write_sequnlock(&rename_lock);
75992+ return NULL;
75993+}
75994+#endif
75995+
75996+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
75997+ struct dentry *newdentry, struct vfsmount *newmnt)
75998+{
75999+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
76000+ struct dentry *chroot;
76001+
76002+ if (unlikely(!grsec_enable_chroot_rename))
76003+ return 0;
76004+
76005+ if (likely(!proc_is_chrooted(current) && gr_is_global_root(current_uid())))
76006+ return 0;
76007+
76008+ chroot = get_closest_chroot(olddentry);
76009+
76010+ if (chroot == NULL)
76011+ return 0;
76012+
76013+ if (is_subdir(newdentry, chroot))
76014+ return 0;
76015+
76016+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_RENAME_MSG, olddentry, oldmnt);
76017+
76018+ return 1;
76019+#else
76020+ return 0;
76021+#endif
76022+}
76023+
76024+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76025+{
76026+#ifdef CONFIG_GRKERNSEC
76027+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76028+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76029+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76030+ && gr_init_ran
76031+#endif
76032+ )
76033+ task->gr_is_chrooted = 1;
76034+ else {
76035+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76036+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76037+ gr_init_ran = 1;
76038+#endif
76039+ task->gr_is_chrooted = 0;
76040+ }
76041+
76042+ task->gr_chroot_dentry = path->dentry;
76043+#endif
76044+ return;
76045+}
76046+
76047+void gr_clear_chroot_entries(struct task_struct *task)
76048+{
76049+#ifdef CONFIG_GRKERNSEC
76050+ task->gr_is_chrooted = 0;
76051+ task->gr_chroot_dentry = NULL;
76052+#endif
76053+ return;
76054+}
76055+
76056+int
76057+gr_handle_chroot_unix(const pid_t pid)
76058+{
76059+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76060+ struct task_struct *p;
76061+
76062+ if (unlikely(!grsec_enable_chroot_unix))
76063+ return 1;
76064+
76065+ if (likely(!proc_is_chrooted(current)))
76066+ return 1;
76067+
76068+ rcu_read_lock();
76069+ read_lock(&tasklist_lock);
76070+ p = find_task_by_vpid_unrestricted(pid);
76071+ if (unlikely(p && !have_same_root(current, p))) {
76072+ read_unlock(&tasklist_lock);
76073+ rcu_read_unlock();
76074+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76075+ return 0;
76076+ }
76077+ read_unlock(&tasklist_lock);
76078+ rcu_read_unlock();
76079+#endif
76080+ return 1;
76081+}
76082+
76083+int
76084+gr_handle_chroot_nice(void)
76085+{
76086+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76087+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76088+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76089+ return -EPERM;
76090+ }
76091+#endif
76092+ return 0;
76093+}
76094+
76095+int
76096+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76097+{
76098+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76099+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76100+ && proc_is_chrooted(current)) {
76101+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76102+ return -EACCES;
76103+ }
76104+#endif
76105+ return 0;
76106+}
76107+
76108+int
76109+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76110+{
76111+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76112+ struct task_struct *p;
76113+ int ret = 0;
76114+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76115+ return ret;
76116+
76117+ read_lock(&tasklist_lock);
76118+ do_each_pid_task(pid, type, p) {
76119+ if (!have_same_root(current, p)) {
76120+ ret = 1;
76121+ goto out;
76122+ }
76123+ } while_each_pid_task(pid, type, p);
76124+out:
76125+ read_unlock(&tasklist_lock);
76126+ return ret;
76127+#endif
76128+ return 0;
76129+}
76130+
76131+int
76132+gr_pid_is_chrooted(struct task_struct *p)
76133+{
76134+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76135+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76136+ return 0;
76137+
76138+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76139+ !have_same_root(current, p)) {
76140+ return 1;
76141+ }
76142+#endif
76143+ return 0;
76144+}
76145+
76146+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76147+
76148+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76149+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76150+{
76151+ struct path path, currentroot;
76152+ int ret = 0;
76153+
76154+ path.dentry = (struct dentry *)u_dentry;
76155+ path.mnt = (struct vfsmount *)u_mnt;
76156+ get_fs_root(current->fs, &currentroot);
76157+ if (path_is_under(&path, &currentroot))
76158+ ret = 1;
76159+ path_put(&currentroot);
76160+
76161+ return ret;
76162+}
76163+#endif
76164+
76165+int
76166+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76167+{
76168+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76169+ if (!grsec_enable_chroot_fchdir)
76170+ return 1;
76171+
76172+ if (!proc_is_chrooted(current))
76173+ return 1;
76174+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76175+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76176+ return 0;
76177+ }
76178+#endif
76179+ return 1;
76180+}
76181+
76182+int
76183+gr_chroot_fhandle(void)
76184+{
76185+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76186+ if (!grsec_enable_chroot_fchdir)
76187+ return 1;
76188+
76189+ if (!proc_is_chrooted(current))
76190+ return 1;
76191+ else {
76192+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76193+ return 0;
76194+ }
76195+#endif
76196+ return 1;
76197+}
76198+
76199+int
76200+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76201+ const u64 shm_createtime)
76202+{
76203+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76204+ struct task_struct *p;
76205+
76206+ if (unlikely(!grsec_enable_chroot_shmat))
76207+ return 1;
76208+
76209+ if (likely(!proc_is_chrooted(current)))
76210+ return 1;
76211+
76212+ rcu_read_lock();
76213+ read_lock(&tasklist_lock);
76214+
76215+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76216+ if (time_before_eq64(p->start_time, shm_createtime)) {
76217+ if (have_same_root(current, p)) {
76218+ goto allow;
76219+ } else {
76220+ read_unlock(&tasklist_lock);
76221+ rcu_read_unlock();
76222+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76223+ return 0;
76224+ }
76225+ }
76226+ /* creator exited, pid reuse, fall through to next check */
76227+ }
76228+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76229+ if (unlikely(!have_same_root(current, p))) {
76230+ read_unlock(&tasklist_lock);
76231+ rcu_read_unlock();
76232+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76233+ return 0;
76234+ }
76235+ }
76236+
76237+allow:
76238+ read_unlock(&tasklist_lock);
76239+ rcu_read_unlock();
76240+#endif
76241+ return 1;
76242+}
76243+
76244+void
76245+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76246+{
76247+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76248+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76249+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76250+#endif
76251+ return;
76252+}
76253+
76254+int
76255+gr_handle_chroot_mknod(const struct dentry *dentry,
76256+ const struct vfsmount *mnt, const int mode)
76257+{
76258+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76259+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76260+ proc_is_chrooted(current)) {
76261+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76262+ return -EPERM;
76263+ }
76264+#endif
76265+ return 0;
76266+}
76267+
76268+int
76269+gr_handle_chroot_mount(const struct dentry *dentry,
76270+ const struct vfsmount *mnt, const char *dev_name)
76271+{
76272+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76273+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76274+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76275+ return -EPERM;
76276+ }
76277+#endif
76278+ return 0;
76279+}
76280+
76281+int
76282+gr_handle_chroot_pivot(void)
76283+{
76284+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76285+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76286+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76287+ return -EPERM;
76288+ }
76289+#endif
76290+ return 0;
76291+}
76292+
76293+int
76294+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76295+{
76296+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76297+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76298+ !gr_is_outside_chroot(dentry, mnt)) {
76299+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76300+ return -EPERM;
76301+ }
76302+#endif
76303+ return 0;
76304+}
76305+
76306+extern const char *captab_log[];
76307+extern int captab_log_entries;
76308+
76309+int
76310+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76311+{
76312+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76313+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76314+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76315+ if (cap_raised(chroot_caps, cap)) {
76316+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76317+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76318+ }
76319+ return 0;
76320+ }
76321+ }
76322+#endif
76323+ return 1;
76324+}
76325+
76326+int
76327+gr_chroot_is_capable(const int cap)
76328+{
76329+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76330+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76331+#endif
76332+ return 1;
76333+}
76334+
76335+int
76336+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76337+{
76338+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76339+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76340+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76341+ if (cap_raised(chroot_caps, cap)) {
76342+ return 0;
76343+ }
76344+ }
76345+#endif
76346+ return 1;
76347+}
76348+
76349+int
76350+gr_chroot_is_capable_nolog(const int cap)
76351+{
76352+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76353+ return gr_task_chroot_is_capable_nolog(current, cap);
76354+#endif
76355+ return 1;
76356+}
76357+
76358+int
76359+gr_handle_chroot_sysctl(const int op)
76360+{
76361+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76362+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76363+ proc_is_chrooted(current))
76364+ return -EACCES;
76365+#endif
76366+ return 0;
76367+}
76368+
76369+void
76370+gr_handle_chroot_chdir(const struct path *path)
76371+{
76372+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76373+ if (grsec_enable_chroot_chdir)
76374+ set_fs_pwd(current->fs, path);
76375+#endif
76376+ return;
76377+}
76378+
76379+int
76380+gr_handle_chroot_chmod(const struct dentry *dentry,
76381+ const struct vfsmount *mnt, const int mode)
76382+{
76383+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76384+ /* allow chmod +s on directories, but not files */
76385+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76386+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76387+ proc_is_chrooted(current)) {
76388+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76389+ return -EPERM;
76390+ }
76391+#endif
76392+ return 0;
76393+}
76394diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76395new file mode 100644
76396index 0000000..946f750
76397--- /dev/null
76398+++ b/grsecurity/grsec_disabled.c
76399@@ -0,0 +1,445 @@
76400+#include <linux/kernel.h>
76401+#include <linux/module.h>
76402+#include <linux/sched.h>
76403+#include <linux/file.h>
76404+#include <linux/fs.h>
76405+#include <linux/kdev_t.h>
76406+#include <linux/net.h>
76407+#include <linux/in.h>
76408+#include <linux/ip.h>
76409+#include <linux/skbuff.h>
76410+#include <linux/sysctl.h>
76411+
76412+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76413+void
76414+pax_set_initial_flags(struct linux_binprm *bprm)
76415+{
76416+ return;
76417+}
76418+#endif
76419+
76420+#ifdef CONFIG_SYSCTL
76421+__u32
76422+gr_handle_sysctl(const struct ctl_table * table, const int op)
76423+{
76424+ return 0;
76425+}
76426+#endif
76427+
76428+#ifdef CONFIG_TASKSTATS
76429+int gr_is_taskstats_denied(int pid)
76430+{
76431+ return 0;
76432+}
76433+#endif
76434+
76435+int
76436+gr_acl_is_enabled(void)
76437+{
76438+ return 0;
76439+}
76440+
76441+int
76442+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76443+{
76444+ return 0;
76445+}
76446+
76447+void
76448+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76449+{
76450+ return;
76451+}
76452+
76453+int
76454+gr_handle_rawio(const struct inode *inode)
76455+{
76456+ return 0;
76457+}
76458+
76459+void
76460+gr_acl_handle_psacct(struct task_struct *task, const long code)
76461+{
76462+ return;
76463+}
76464+
76465+int
76466+gr_handle_ptrace(struct task_struct *task, const long request)
76467+{
76468+ return 0;
76469+}
76470+
76471+int
76472+gr_handle_proc_ptrace(struct task_struct *task)
76473+{
76474+ return 0;
76475+}
76476+
76477+int
76478+gr_set_acls(const int type)
76479+{
76480+ return 0;
76481+}
76482+
76483+int
76484+gr_check_hidden_task(const struct task_struct *tsk)
76485+{
76486+ return 0;
76487+}
76488+
76489+int
76490+gr_check_protected_task(const struct task_struct *task)
76491+{
76492+ return 0;
76493+}
76494+
76495+int
76496+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76497+{
76498+ return 0;
76499+}
76500+
76501+void
76502+gr_copy_label(struct task_struct *tsk)
76503+{
76504+ return;
76505+}
76506+
76507+void
76508+gr_set_pax_flags(struct task_struct *task)
76509+{
76510+ return;
76511+}
76512+
76513+int
76514+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76515+ const int unsafe_share)
76516+{
76517+ return 0;
76518+}
76519+
76520+void
76521+gr_handle_delete(const u64 ino, const dev_t dev)
76522+{
76523+ return;
76524+}
76525+
76526+void
76527+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76528+{
76529+ return;
76530+}
76531+
76532+void
76533+gr_handle_crash(struct task_struct *task, const int sig)
76534+{
76535+ return;
76536+}
76537+
76538+int
76539+gr_check_crash_exec(const struct file *filp)
76540+{
76541+ return 0;
76542+}
76543+
76544+int
76545+gr_check_crash_uid(const kuid_t uid)
76546+{
76547+ return 0;
76548+}
76549+
76550+void
76551+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76552+ struct dentry *old_dentry,
76553+ struct dentry *new_dentry,
76554+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76555+{
76556+ return;
76557+}
76558+
76559+int
76560+gr_search_socket(const int family, const int type, const int protocol)
76561+{
76562+ return 1;
76563+}
76564+
76565+int
76566+gr_search_connectbind(const int mode, const struct socket *sock,
76567+ const struct sockaddr_in *addr)
76568+{
76569+ return 0;
76570+}
76571+
76572+void
76573+gr_handle_alertkill(struct task_struct *task)
76574+{
76575+ return;
76576+}
76577+
76578+__u32
76579+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76580+{
76581+ return 1;
76582+}
76583+
76584+__u32
76585+gr_acl_handle_hidden_file(const struct dentry * dentry,
76586+ const struct vfsmount * mnt)
76587+{
76588+ return 1;
76589+}
76590+
76591+__u32
76592+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76593+ int acc_mode)
76594+{
76595+ return 1;
76596+}
76597+
76598+__u32
76599+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76600+{
76601+ return 1;
76602+}
76603+
76604+__u32
76605+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76606+{
76607+ return 1;
76608+}
76609+
76610+int
76611+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76612+ unsigned int *vm_flags)
76613+{
76614+ return 1;
76615+}
76616+
76617+__u32
76618+gr_acl_handle_truncate(const struct dentry * dentry,
76619+ const struct vfsmount * mnt)
76620+{
76621+ return 1;
76622+}
76623+
76624+__u32
76625+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76626+{
76627+ return 1;
76628+}
76629+
76630+__u32
76631+gr_acl_handle_access(const struct dentry * dentry,
76632+ const struct vfsmount * mnt, const int fmode)
76633+{
76634+ return 1;
76635+}
76636+
76637+__u32
76638+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76639+ umode_t *mode)
76640+{
76641+ return 1;
76642+}
76643+
76644+__u32
76645+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76646+{
76647+ return 1;
76648+}
76649+
76650+__u32
76651+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76652+{
76653+ return 1;
76654+}
76655+
76656+__u32
76657+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76658+{
76659+ return 1;
76660+}
76661+
76662+void
76663+grsecurity_init(void)
76664+{
76665+ return;
76666+}
76667+
76668+umode_t gr_acl_umask(void)
76669+{
76670+ return 0;
76671+}
76672+
76673+__u32
76674+gr_acl_handle_mknod(const struct dentry * new_dentry,
76675+ const struct dentry * parent_dentry,
76676+ const struct vfsmount * parent_mnt,
76677+ const int mode)
76678+{
76679+ return 1;
76680+}
76681+
76682+__u32
76683+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76684+ const struct dentry * parent_dentry,
76685+ const struct vfsmount * parent_mnt)
76686+{
76687+ return 1;
76688+}
76689+
76690+__u32
76691+gr_acl_handle_symlink(const struct dentry * new_dentry,
76692+ const struct dentry * parent_dentry,
76693+ const struct vfsmount * parent_mnt, const struct filename *from)
76694+{
76695+ return 1;
76696+}
76697+
76698+__u32
76699+gr_acl_handle_link(const struct dentry * new_dentry,
76700+ const struct dentry * parent_dentry,
76701+ const struct vfsmount * parent_mnt,
76702+ const struct dentry * old_dentry,
76703+ const struct vfsmount * old_mnt, const struct filename *to)
76704+{
76705+ return 1;
76706+}
76707+
76708+int
76709+gr_acl_handle_rename(const struct dentry *new_dentry,
76710+ const struct dentry *parent_dentry,
76711+ const struct vfsmount *parent_mnt,
76712+ const struct dentry *old_dentry,
76713+ const struct inode *old_parent_inode,
76714+ const struct vfsmount *old_mnt, const struct filename *newname,
76715+ unsigned int flags)
76716+{
76717+ return 0;
76718+}
76719+
76720+int
76721+gr_acl_handle_filldir(const struct file *file, const char *name,
76722+ const int namelen, const u64 ino)
76723+{
76724+ return 1;
76725+}
76726+
76727+int
76728+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76729+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76730+{
76731+ return 1;
76732+}
76733+
76734+int
76735+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76736+{
76737+ return 0;
76738+}
76739+
76740+int
76741+gr_search_accept(const struct socket *sock)
76742+{
76743+ return 0;
76744+}
76745+
76746+int
76747+gr_search_listen(const struct socket *sock)
76748+{
76749+ return 0;
76750+}
76751+
76752+int
76753+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76754+{
76755+ return 0;
76756+}
76757+
76758+__u32
76759+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76760+{
76761+ return 1;
76762+}
76763+
76764+__u32
76765+gr_acl_handle_creat(const struct dentry * dentry,
76766+ const struct dentry * p_dentry,
76767+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76768+ const int imode)
76769+{
76770+ return 1;
76771+}
76772+
76773+void
76774+gr_acl_handle_exit(void)
76775+{
76776+ return;
76777+}
76778+
76779+int
76780+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76781+{
76782+ return 1;
76783+}
76784+
76785+void
76786+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76787+{
76788+ return;
76789+}
76790+
76791+int
76792+gr_acl_handle_procpidmem(const struct task_struct *task)
76793+{
76794+ return 0;
76795+}
76796+
76797+int
76798+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76799+{
76800+ return 0;
76801+}
76802+
76803+int
76804+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76805+{
76806+ return 0;
76807+}
76808+
76809+int
76810+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76811+{
76812+ return 0;
76813+}
76814+
76815+int
76816+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76817+{
76818+ return 0;
76819+}
76820+
76821+int gr_acl_enable_at_secure(void)
76822+{
76823+ return 0;
76824+}
76825+
76826+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76827+{
76828+ return dentry->d_sb->s_dev;
76829+}
76830+
76831+u64 gr_get_ino_from_dentry(struct dentry *dentry)
76832+{
76833+ return dentry->d_inode->i_ino;
76834+}
76835+
76836+void gr_put_exec_file(struct task_struct *task)
76837+{
76838+ return;
76839+}
76840+
76841+#ifdef CONFIG_SECURITY
76842+EXPORT_SYMBOL_GPL(gr_check_user_change);
76843+EXPORT_SYMBOL_GPL(gr_check_group_change);
76844+#endif
76845diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76846new file mode 100644
76847index 0000000..fb7531e
76848--- /dev/null
76849+++ b/grsecurity/grsec_exec.c
76850@@ -0,0 +1,189 @@
76851+#include <linux/kernel.h>
76852+#include <linux/sched.h>
76853+#include <linux/file.h>
76854+#include <linux/binfmts.h>
76855+#include <linux/fs.h>
76856+#include <linux/types.h>
76857+#include <linux/grdefs.h>
76858+#include <linux/grsecurity.h>
76859+#include <linux/grinternal.h>
76860+#include <linux/capability.h>
76861+#include <linux/module.h>
76862+#include <linux/compat.h>
76863+
76864+#include <asm/uaccess.h>
76865+
76866+#ifdef CONFIG_GRKERNSEC_EXECLOG
76867+static char gr_exec_arg_buf[132];
76868+static DEFINE_MUTEX(gr_exec_arg_mutex);
76869+#endif
76870+
76871+struct user_arg_ptr {
76872+#ifdef CONFIG_COMPAT
76873+ bool is_compat;
76874+#endif
76875+ union {
76876+ const char __user *const __user *native;
76877+#ifdef CONFIG_COMPAT
76878+ const compat_uptr_t __user *compat;
76879+#endif
76880+ } ptr;
76881+};
76882+
76883+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76884+
76885+void
76886+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76887+{
76888+#ifdef CONFIG_GRKERNSEC_EXECLOG
76889+ char *grarg = gr_exec_arg_buf;
76890+ unsigned int i, x, execlen = 0;
76891+ char c;
76892+
76893+ if (!((grsec_enable_execlog && grsec_enable_group &&
76894+ in_group_p(grsec_audit_gid))
76895+ || (grsec_enable_execlog && !grsec_enable_group)))
76896+ return;
76897+
76898+ mutex_lock(&gr_exec_arg_mutex);
76899+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76900+
76901+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76902+ const char __user *p;
76903+ unsigned int len;
76904+
76905+ p = get_user_arg_ptr(argv, i);
76906+ if (IS_ERR(p))
76907+ goto log;
76908+
76909+ len = strnlen_user(p, 128 - execlen);
76910+ if (len > 128 - execlen)
76911+ len = 128 - execlen;
76912+ else if (len > 0)
76913+ len--;
76914+ if (copy_from_user(grarg + execlen, p, len))
76915+ goto log;
76916+
76917+ /* rewrite unprintable characters */
76918+ for (x = 0; x < len; x++) {
76919+ c = *(grarg + execlen + x);
76920+ if (c < 32 || c > 126)
76921+ *(grarg + execlen + x) = ' ';
76922+ }
76923+
76924+ execlen += len;
76925+ *(grarg + execlen) = ' ';
76926+ *(grarg + execlen + 1) = '\0';
76927+ execlen++;
76928+ }
76929+
76930+ log:
76931+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76932+ bprm->file->f_path.mnt, grarg);
76933+ mutex_unlock(&gr_exec_arg_mutex);
76934+#endif
76935+ return;
76936+}
76937+
76938+#ifdef CONFIG_GRKERNSEC
76939+extern int gr_acl_is_capable(const int cap);
76940+extern int gr_acl_is_capable_nolog(const int cap);
76941+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76942+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76943+extern int gr_chroot_is_capable(const int cap);
76944+extern int gr_chroot_is_capable_nolog(const int cap);
76945+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76946+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
76947+#endif
76948+
76949+const char *captab_log[] = {
76950+ "CAP_CHOWN",
76951+ "CAP_DAC_OVERRIDE",
76952+ "CAP_DAC_READ_SEARCH",
76953+ "CAP_FOWNER",
76954+ "CAP_FSETID",
76955+ "CAP_KILL",
76956+ "CAP_SETGID",
76957+ "CAP_SETUID",
76958+ "CAP_SETPCAP",
76959+ "CAP_LINUX_IMMUTABLE",
76960+ "CAP_NET_BIND_SERVICE",
76961+ "CAP_NET_BROADCAST",
76962+ "CAP_NET_ADMIN",
76963+ "CAP_NET_RAW",
76964+ "CAP_IPC_LOCK",
76965+ "CAP_IPC_OWNER",
76966+ "CAP_SYS_MODULE",
76967+ "CAP_SYS_RAWIO",
76968+ "CAP_SYS_CHROOT",
76969+ "CAP_SYS_PTRACE",
76970+ "CAP_SYS_PACCT",
76971+ "CAP_SYS_ADMIN",
76972+ "CAP_SYS_BOOT",
76973+ "CAP_SYS_NICE",
76974+ "CAP_SYS_RESOURCE",
76975+ "CAP_SYS_TIME",
76976+ "CAP_SYS_TTY_CONFIG",
76977+ "CAP_MKNOD",
76978+ "CAP_LEASE",
76979+ "CAP_AUDIT_WRITE",
76980+ "CAP_AUDIT_CONTROL",
76981+ "CAP_SETFCAP",
76982+ "CAP_MAC_OVERRIDE",
76983+ "CAP_MAC_ADMIN",
76984+ "CAP_SYSLOG",
76985+ "CAP_WAKE_ALARM",
76986+ "CAP_BLOCK_SUSPEND",
76987+ "CAP_AUDIT_READ"
76988+};
76989+
76990+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
76991+
76992+int gr_is_capable(const int cap)
76993+{
76994+#ifdef CONFIG_GRKERNSEC
76995+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
76996+ return 1;
76997+ return 0;
76998+#else
76999+ return 1;
77000+#endif
77001+}
77002+
77003+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77004+{
77005+#ifdef CONFIG_GRKERNSEC
77006+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77007+ return 1;
77008+ return 0;
77009+#else
77010+ return 1;
77011+#endif
77012+}
77013+
77014+int gr_is_capable_nolog(const int cap)
77015+{
77016+#ifdef CONFIG_GRKERNSEC
77017+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77018+ return 1;
77019+ return 0;
77020+#else
77021+ return 1;
77022+#endif
77023+}
77024+
77025+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77026+{
77027+#ifdef CONFIG_GRKERNSEC
77028+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77029+ return 1;
77030+ return 0;
77031+#else
77032+ return 1;
77033+#endif
77034+}
77035+
77036+EXPORT_SYMBOL_GPL(gr_is_capable);
77037+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77038+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77039+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77040diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77041new file mode 100644
77042index 0000000..06cc6ea
77043--- /dev/null
77044+++ b/grsecurity/grsec_fifo.c
77045@@ -0,0 +1,24 @@
77046+#include <linux/kernel.h>
77047+#include <linux/sched.h>
77048+#include <linux/fs.h>
77049+#include <linux/file.h>
77050+#include <linux/grinternal.h>
77051+
77052+int
77053+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77054+ const struct dentry *dir, const int flag, const int acc_mode)
77055+{
77056+#ifdef CONFIG_GRKERNSEC_FIFO
77057+ const struct cred *cred = current_cred();
77058+
77059+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77060+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77061+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77062+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77063+ if (!inode_permission(dentry->d_inode, acc_mode))
77064+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77065+ return -EACCES;
77066+ }
77067+#endif
77068+ return 0;
77069+}
77070diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77071new file mode 100644
77072index 0000000..8ca18bf
77073--- /dev/null
77074+++ b/grsecurity/grsec_fork.c
77075@@ -0,0 +1,23 @@
77076+#include <linux/kernel.h>
77077+#include <linux/sched.h>
77078+#include <linux/grsecurity.h>
77079+#include <linux/grinternal.h>
77080+#include <linux/errno.h>
77081+
77082+void
77083+gr_log_forkfail(const int retval)
77084+{
77085+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77086+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77087+ switch (retval) {
77088+ case -EAGAIN:
77089+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77090+ break;
77091+ case -ENOMEM:
77092+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77093+ break;
77094+ }
77095+ }
77096+#endif
77097+ return;
77098+}
77099diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77100new file mode 100644
77101index 0000000..4ed9e7d
77102--- /dev/null
77103+++ b/grsecurity/grsec_init.c
77104@@ -0,0 +1,290 @@
77105+#include <linux/kernel.h>
77106+#include <linux/sched.h>
77107+#include <linux/mm.h>
77108+#include <linux/gracl.h>
77109+#include <linux/slab.h>
77110+#include <linux/vmalloc.h>
77111+#include <linux/percpu.h>
77112+#include <linux/module.h>
77113+
77114+int grsec_enable_ptrace_readexec;
77115+int grsec_enable_setxid;
77116+int grsec_enable_symlinkown;
77117+kgid_t grsec_symlinkown_gid;
77118+int grsec_enable_brute;
77119+int grsec_enable_link;
77120+int grsec_enable_dmesg;
77121+int grsec_enable_harden_ptrace;
77122+int grsec_enable_harden_ipc;
77123+int grsec_enable_fifo;
77124+int grsec_enable_execlog;
77125+int grsec_enable_signal;
77126+int grsec_enable_forkfail;
77127+int grsec_enable_audit_ptrace;
77128+int grsec_enable_time;
77129+int grsec_enable_group;
77130+kgid_t grsec_audit_gid;
77131+int grsec_enable_chdir;
77132+int grsec_enable_mount;
77133+int grsec_enable_rofs;
77134+int grsec_deny_new_usb;
77135+int grsec_enable_chroot_findtask;
77136+int grsec_enable_chroot_mount;
77137+int grsec_enable_chroot_shmat;
77138+int grsec_enable_chroot_fchdir;
77139+int grsec_enable_chroot_double;
77140+int grsec_enable_chroot_pivot;
77141+int grsec_enable_chroot_chdir;
77142+int grsec_enable_chroot_chmod;
77143+int grsec_enable_chroot_mknod;
77144+int grsec_enable_chroot_nice;
77145+int grsec_enable_chroot_execlog;
77146+int grsec_enable_chroot_caps;
77147+int grsec_enable_chroot_rename;
77148+int grsec_enable_chroot_sysctl;
77149+int grsec_enable_chroot_unix;
77150+int grsec_enable_tpe;
77151+kgid_t grsec_tpe_gid;
77152+int grsec_enable_blackhole;
77153+#ifdef CONFIG_IPV6_MODULE
77154+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77155+#endif
77156+int grsec_lastack_retries;
77157+int grsec_enable_tpe_all;
77158+int grsec_enable_tpe_invert;
77159+int grsec_enable_socket_all;
77160+kgid_t grsec_socket_all_gid;
77161+int grsec_enable_socket_client;
77162+kgid_t grsec_socket_client_gid;
77163+int grsec_enable_socket_server;
77164+kgid_t grsec_socket_server_gid;
77165+int grsec_resource_logging;
77166+int grsec_disable_privio;
77167+int grsec_enable_log_rwxmaps;
77168+int grsec_lock;
77169+
77170+DEFINE_SPINLOCK(grsec_alert_lock);
77171+unsigned long grsec_alert_wtime = 0;
77172+unsigned long grsec_alert_fyet = 0;
77173+
77174+DEFINE_SPINLOCK(grsec_audit_lock);
77175+
77176+DEFINE_RWLOCK(grsec_exec_file_lock);
77177+
77178+char *gr_shared_page[4];
77179+
77180+char *gr_alert_log_fmt;
77181+char *gr_audit_log_fmt;
77182+char *gr_alert_log_buf;
77183+char *gr_audit_log_buf;
77184+
77185+extern struct gr_arg *gr_usermode;
77186+extern unsigned char *gr_system_salt;
77187+extern unsigned char *gr_system_sum;
77188+
77189+void __init
77190+grsecurity_init(void)
77191+{
77192+ int j;
77193+ /* create the per-cpu shared pages */
77194+
77195+#ifdef CONFIG_X86
77196+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77197+#endif
77198+
77199+ for (j = 0; j < 4; j++) {
77200+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77201+ if (gr_shared_page[j] == NULL) {
77202+ panic("Unable to allocate grsecurity shared page");
77203+ return;
77204+ }
77205+ }
77206+
77207+ /* allocate log buffers */
77208+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77209+ if (!gr_alert_log_fmt) {
77210+ panic("Unable to allocate grsecurity alert log format buffer");
77211+ return;
77212+ }
77213+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77214+ if (!gr_audit_log_fmt) {
77215+ panic("Unable to allocate grsecurity audit log format buffer");
77216+ return;
77217+ }
77218+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77219+ if (!gr_alert_log_buf) {
77220+ panic("Unable to allocate grsecurity alert log buffer");
77221+ return;
77222+ }
77223+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77224+ if (!gr_audit_log_buf) {
77225+ panic("Unable to allocate grsecurity audit log buffer");
77226+ return;
77227+ }
77228+
77229+ /* allocate memory for authentication structure */
77230+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77231+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77232+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77233+
77234+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77235+ panic("Unable to allocate grsecurity authentication structure");
77236+ return;
77237+ }
77238+
77239+#ifdef CONFIG_GRKERNSEC_IO
77240+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77241+ grsec_disable_privio = 1;
77242+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77243+ grsec_disable_privio = 1;
77244+#else
77245+ grsec_disable_privio = 0;
77246+#endif
77247+#endif
77248+
77249+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77250+ /* for backward compatibility, tpe_invert always defaults to on if
77251+ enabled in the kernel
77252+ */
77253+ grsec_enable_tpe_invert = 1;
77254+#endif
77255+
77256+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77257+#ifndef CONFIG_GRKERNSEC_SYSCTL
77258+ grsec_lock = 1;
77259+#endif
77260+
77261+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77262+ grsec_enable_log_rwxmaps = 1;
77263+#endif
77264+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77265+ grsec_enable_group = 1;
77266+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77267+#endif
77268+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77269+ grsec_enable_ptrace_readexec = 1;
77270+#endif
77271+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77272+ grsec_enable_chdir = 1;
77273+#endif
77274+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77275+ grsec_enable_harden_ptrace = 1;
77276+#endif
77277+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77278+ grsec_enable_harden_ipc = 1;
77279+#endif
77280+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77281+ grsec_enable_mount = 1;
77282+#endif
77283+#ifdef CONFIG_GRKERNSEC_LINK
77284+ grsec_enable_link = 1;
77285+#endif
77286+#ifdef CONFIG_GRKERNSEC_BRUTE
77287+ grsec_enable_brute = 1;
77288+#endif
77289+#ifdef CONFIG_GRKERNSEC_DMESG
77290+ grsec_enable_dmesg = 1;
77291+#endif
77292+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77293+ grsec_enable_blackhole = 1;
77294+ grsec_lastack_retries = 4;
77295+#endif
77296+#ifdef CONFIG_GRKERNSEC_FIFO
77297+ grsec_enable_fifo = 1;
77298+#endif
77299+#ifdef CONFIG_GRKERNSEC_EXECLOG
77300+ grsec_enable_execlog = 1;
77301+#endif
77302+#ifdef CONFIG_GRKERNSEC_SETXID
77303+ grsec_enable_setxid = 1;
77304+#endif
77305+#ifdef CONFIG_GRKERNSEC_SIGNAL
77306+ grsec_enable_signal = 1;
77307+#endif
77308+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77309+ grsec_enable_forkfail = 1;
77310+#endif
77311+#ifdef CONFIG_GRKERNSEC_TIME
77312+ grsec_enable_time = 1;
77313+#endif
77314+#ifdef CONFIG_GRKERNSEC_RESLOG
77315+ grsec_resource_logging = 1;
77316+#endif
77317+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77318+ grsec_enable_chroot_findtask = 1;
77319+#endif
77320+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77321+ grsec_enable_chroot_unix = 1;
77322+#endif
77323+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77324+ grsec_enable_chroot_mount = 1;
77325+#endif
77326+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77327+ grsec_enable_chroot_fchdir = 1;
77328+#endif
77329+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77330+ grsec_enable_chroot_shmat = 1;
77331+#endif
77332+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77333+ grsec_enable_audit_ptrace = 1;
77334+#endif
77335+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77336+ grsec_enable_chroot_double = 1;
77337+#endif
77338+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77339+ grsec_enable_chroot_pivot = 1;
77340+#endif
77341+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77342+ grsec_enable_chroot_chdir = 1;
77343+#endif
77344+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77345+ grsec_enable_chroot_chmod = 1;
77346+#endif
77347+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77348+ grsec_enable_chroot_mknod = 1;
77349+#endif
77350+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77351+ grsec_enable_chroot_nice = 1;
77352+#endif
77353+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77354+ grsec_enable_chroot_execlog = 1;
77355+#endif
77356+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77357+ grsec_enable_chroot_caps = 1;
77358+#endif
77359+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
77360+ grsec_enable_chroot_rename = 1;
77361+#endif
77362+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77363+ grsec_enable_chroot_sysctl = 1;
77364+#endif
77365+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77366+ grsec_enable_symlinkown = 1;
77367+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77368+#endif
77369+#ifdef CONFIG_GRKERNSEC_TPE
77370+ grsec_enable_tpe = 1;
77371+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77372+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77373+ grsec_enable_tpe_all = 1;
77374+#endif
77375+#endif
77376+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77377+ grsec_enable_socket_all = 1;
77378+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77379+#endif
77380+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77381+ grsec_enable_socket_client = 1;
77382+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77383+#endif
77384+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77385+ grsec_enable_socket_server = 1;
77386+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77387+#endif
77388+#endif
77389+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77390+ grsec_deny_new_usb = 1;
77391+#endif
77392+
77393+ return;
77394+}
77395diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77396new file mode 100644
77397index 0000000..1773300
77398--- /dev/null
77399+++ b/grsecurity/grsec_ipc.c
77400@@ -0,0 +1,48 @@
77401+#include <linux/kernel.h>
77402+#include <linux/mm.h>
77403+#include <linux/sched.h>
77404+#include <linux/file.h>
77405+#include <linux/ipc.h>
77406+#include <linux/ipc_namespace.h>
77407+#include <linux/grsecurity.h>
77408+#include <linux/grinternal.h>
77409+
77410+int
77411+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77412+{
77413+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77414+ int write;
77415+ int orig_granted_mode;
77416+ kuid_t euid;
77417+ kgid_t egid;
77418+
77419+ if (!grsec_enable_harden_ipc)
77420+ return 1;
77421+
77422+ euid = current_euid();
77423+ egid = current_egid();
77424+
77425+ write = requested_mode & 00002;
77426+ orig_granted_mode = ipcp->mode;
77427+
77428+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77429+ orig_granted_mode >>= 6;
77430+ else {
77431+ /* if likely wrong permissions, lock to user */
77432+ if (orig_granted_mode & 0007)
77433+ orig_granted_mode = 0;
77434+ /* otherwise do a egid-only check */
77435+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77436+ orig_granted_mode >>= 3;
77437+ /* otherwise, no access */
77438+ else
77439+ orig_granted_mode = 0;
77440+ }
77441+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77442+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77443+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77444+ return 0;
77445+ }
77446+#endif
77447+ return 1;
77448+}
77449diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77450new file mode 100644
77451index 0000000..5e05e20
77452--- /dev/null
77453+++ b/grsecurity/grsec_link.c
77454@@ -0,0 +1,58 @@
77455+#include <linux/kernel.h>
77456+#include <linux/sched.h>
77457+#include <linux/fs.h>
77458+#include <linux/file.h>
77459+#include <linux/grinternal.h>
77460+
77461+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77462+{
77463+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77464+ const struct inode *link_inode = link->dentry->d_inode;
77465+
77466+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77467+ /* ignore root-owned links, e.g. /proc/self */
77468+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77469+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77470+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77471+ return 1;
77472+ }
77473+#endif
77474+ return 0;
77475+}
77476+
77477+int
77478+gr_handle_follow_link(const struct inode *parent,
77479+ const struct inode *inode,
77480+ const struct dentry *dentry, const struct vfsmount *mnt)
77481+{
77482+#ifdef CONFIG_GRKERNSEC_LINK
77483+ const struct cred *cred = current_cred();
77484+
77485+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77486+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77487+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77488+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77489+ return -EACCES;
77490+ }
77491+#endif
77492+ return 0;
77493+}
77494+
77495+int
77496+gr_handle_hardlink(const struct dentry *dentry,
77497+ const struct vfsmount *mnt,
77498+ struct inode *inode, const int mode, const struct filename *to)
77499+{
77500+#ifdef CONFIG_GRKERNSEC_LINK
77501+ const struct cred *cred = current_cred();
77502+
77503+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77504+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77505+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77506+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77507+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77508+ return -EPERM;
77509+ }
77510+#endif
77511+ return 0;
77512+}
77513diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77514new file mode 100644
77515index 0000000..dbe0a6b
77516--- /dev/null
77517+++ b/grsecurity/grsec_log.c
77518@@ -0,0 +1,341 @@
77519+#include <linux/kernel.h>
77520+#include <linux/sched.h>
77521+#include <linux/file.h>
77522+#include <linux/tty.h>
77523+#include <linux/fs.h>
77524+#include <linux/mm.h>
77525+#include <linux/grinternal.h>
77526+
77527+#ifdef CONFIG_TREE_PREEMPT_RCU
77528+#define DISABLE_PREEMPT() preempt_disable()
77529+#define ENABLE_PREEMPT() preempt_enable()
77530+#else
77531+#define DISABLE_PREEMPT()
77532+#define ENABLE_PREEMPT()
77533+#endif
77534+
77535+#define BEGIN_LOCKS(x) \
77536+ DISABLE_PREEMPT(); \
77537+ rcu_read_lock(); \
77538+ read_lock(&tasklist_lock); \
77539+ read_lock(&grsec_exec_file_lock); \
77540+ if (x != GR_DO_AUDIT) \
77541+ spin_lock(&grsec_alert_lock); \
77542+ else \
77543+ spin_lock(&grsec_audit_lock)
77544+
77545+#define END_LOCKS(x) \
77546+ if (x != GR_DO_AUDIT) \
77547+ spin_unlock(&grsec_alert_lock); \
77548+ else \
77549+ spin_unlock(&grsec_audit_lock); \
77550+ read_unlock(&grsec_exec_file_lock); \
77551+ read_unlock(&tasklist_lock); \
77552+ rcu_read_unlock(); \
77553+ ENABLE_PREEMPT(); \
77554+ if (x == GR_DONT_AUDIT) \
77555+ gr_handle_alertkill(current)
77556+
77557+enum {
77558+ FLOODING,
77559+ NO_FLOODING
77560+};
77561+
77562+extern char *gr_alert_log_fmt;
77563+extern char *gr_audit_log_fmt;
77564+extern char *gr_alert_log_buf;
77565+extern char *gr_audit_log_buf;
77566+
77567+static int gr_log_start(int audit)
77568+{
77569+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77570+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77571+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77572+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77573+ unsigned long curr_secs = get_seconds();
77574+
77575+ if (audit == GR_DO_AUDIT)
77576+ goto set_fmt;
77577+
77578+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77579+ grsec_alert_wtime = curr_secs;
77580+ grsec_alert_fyet = 0;
77581+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77582+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77583+ grsec_alert_fyet++;
77584+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77585+ grsec_alert_wtime = curr_secs;
77586+ grsec_alert_fyet++;
77587+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77588+ return FLOODING;
77589+ }
77590+ else return FLOODING;
77591+
77592+set_fmt:
77593+#endif
77594+ memset(buf, 0, PAGE_SIZE);
77595+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77596+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77597+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77598+ } else if (current->signal->curr_ip) {
77599+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77600+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77601+ } else if (gr_acl_is_enabled()) {
77602+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77603+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77604+ } else {
77605+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77606+ strcpy(buf, fmt);
77607+ }
77608+
77609+ return NO_FLOODING;
77610+}
77611+
77612+static void gr_log_middle(int audit, const char *msg, va_list ap)
77613+ __attribute__ ((format (printf, 2, 0)));
77614+
77615+static void gr_log_middle(int audit, const char *msg, va_list ap)
77616+{
77617+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77618+ unsigned int len = strlen(buf);
77619+
77620+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77621+
77622+ return;
77623+}
77624+
77625+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77626+ __attribute__ ((format (printf, 2, 3)));
77627+
77628+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77629+{
77630+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77631+ unsigned int len = strlen(buf);
77632+ va_list ap;
77633+
77634+ va_start(ap, msg);
77635+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77636+ va_end(ap);
77637+
77638+ return;
77639+}
77640+
77641+static void gr_log_end(int audit, int append_default)
77642+{
77643+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77644+ if (append_default) {
77645+ struct task_struct *task = current;
77646+ struct task_struct *parent = task->real_parent;
77647+ const struct cred *cred = __task_cred(task);
77648+ const struct cred *pcred = __task_cred(parent);
77649+ unsigned int len = strlen(buf);
77650+
77651+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77652+ }
77653+
77654+ printk("%s\n", buf);
77655+
77656+ return;
77657+}
77658+
77659+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77660+{
77661+ int logtype;
77662+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77663+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77664+ void *voidptr = NULL;
77665+ int num1 = 0, num2 = 0;
77666+ unsigned long ulong1 = 0, ulong2 = 0;
77667+ struct dentry *dentry = NULL;
77668+ struct vfsmount *mnt = NULL;
77669+ struct file *file = NULL;
77670+ struct task_struct *task = NULL;
77671+ struct vm_area_struct *vma = NULL;
77672+ const struct cred *cred, *pcred;
77673+ va_list ap;
77674+
77675+ BEGIN_LOCKS(audit);
77676+ logtype = gr_log_start(audit);
77677+ if (logtype == FLOODING) {
77678+ END_LOCKS(audit);
77679+ return;
77680+ }
77681+ va_start(ap, argtypes);
77682+ switch (argtypes) {
77683+ case GR_TTYSNIFF:
77684+ task = va_arg(ap, struct task_struct *);
77685+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77686+ break;
77687+ case GR_SYSCTL_HIDDEN:
77688+ str1 = va_arg(ap, char *);
77689+ gr_log_middle_varargs(audit, msg, result, str1);
77690+ break;
77691+ case GR_RBAC:
77692+ dentry = va_arg(ap, struct dentry *);
77693+ mnt = va_arg(ap, struct vfsmount *);
77694+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77695+ break;
77696+ case GR_RBAC_STR:
77697+ dentry = va_arg(ap, struct dentry *);
77698+ mnt = va_arg(ap, struct vfsmount *);
77699+ str1 = va_arg(ap, char *);
77700+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77701+ break;
77702+ case GR_STR_RBAC:
77703+ str1 = va_arg(ap, char *);
77704+ dentry = va_arg(ap, struct dentry *);
77705+ mnt = va_arg(ap, struct vfsmount *);
77706+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77707+ break;
77708+ case GR_RBAC_MODE2:
77709+ dentry = va_arg(ap, struct dentry *);
77710+ mnt = va_arg(ap, struct vfsmount *);
77711+ str1 = va_arg(ap, char *);
77712+ str2 = va_arg(ap, char *);
77713+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77714+ break;
77715+ case GR_RBAC_MODE3:
77716+ dentry = va_arg(ap, struct dentry *);
77717+ mnt = va_arg(ap, struct vfsmount *);
77718+ str1 = va_arg(ap, char *);
77719+ str2 = va_arg(ap, char *);
77720+ str3 = va_arg(ap, char *);
77721+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77722+ break;
77723+ case GR_FILENAME:
77724+ dentry = va_arg(ap, struct dentry *);
77725+ mnt = va_arg(ap, struct vfsmount *);
77726+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77727+ break;
77728+ case GR_STR_FILENAME:
77729+ str1 = va_arg(ap, char *);
77730+ dentry = va_arg(ap, struct dentry *);
77731+ mnt = va_arg(ap, struct vfsmount *);
77732+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77733+ break;
77734+ case GR_FILENAME_STR:
77735+ dentry = va_arg(ap, struct dentry *);
77736+ mnt = va_arg(ap, struct vfsmount *);
77737+ str1 = va_arg(ap, char *);
77738+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77739+ break;
77740+ case GR_FILENAME_TWO_INT:
77741+ dentry = va_arg(ap, struct dentry *);
77742+ mnt = va_arg(ap, struct vfsmount *);
77743+ num1 = va_arg(ap, int);
77744+ num2 = va_arg(ap, int);
77745+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77746+ break;
77747+ case GR_FILENAME_TWO_INT_STR:
77748+ dentry = va_arg(ap, struct dentry *);
77749+ mnt = va_arg(ap, struct vfsmount *);
77750+ num1 = va_arg(ap, int);
77751+ num2 = va_arg(ap, int);
77752+ str1 = va_arg(ap, char *);
77753+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77754+ break;
77755+ case GR_TEXTREL:
77756+ file = va_arg(ap, struct file *);
77757+ ulong1 = va_arg(ap, unsigned long);
77758+ ulong2 = va_arg(ap, unsigned long);
77759+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77760+ break;
77761+ case GR_PTRACE:
77762+ task = va_arg(ap, struct task_struct *);
77763+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77764+ break;
77765+ case GR_RESOURCE:
77766+ task = va_arg(ap, struct task_struct *);
77767+ cred = __task_cred(task);
77768+ pcred = __task_cred(task->real_parent);
77769+ ulong1 = va_arg(ap, unsigned long);
77770+ str1 = va_arg(ap, char *);
77771+ ulong2 = va_arg(ap, unsigned long);
77772+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77773+ break;
77774+ case GR_CAP:
77775+ task = va_arg(ap, struct task_struct *);
77776+ cred = __task_cred(task);
77777+ pcred = __task_cred(task->real_parent);
77778+ str1 = va_arg(ap, char *);
77779+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77780+ break;
77781+ case GR_SIG:
77782+ str1 = va_arg(ap, char *);
77783+ voidptr = va_arg(ap, void *);
77784+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77785+ break;
77786+ case GR_SIG2:
77787+ task = va_arg(ap, struct task_struct *);
77788+ cred = __task_cred(task);
77789+ pcred = __task_cred(task->real_parent);
77790+ num1 = va_arg(ap, int);
77791+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77792+ break;
77793+ case GR_CRASH1:
77794+ task = va_arg(ap, struct task_struct *);
77795+ cred = __task_cred(task);
77796+ pcred = __task_cred(task->real_parent);
77797+ ulong1 = va_arg(ap, unsigned long);
77798+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77799+ break;
77800+ case GR_CRASH2:
77801+ task = va_arg(ap, struct task_struct *);
77802+ cred = __task_cred(task);
77803+ pcred = __task_cred(task->real_parent);
77804+ ulong1 = va_arg(ap, unsigned long);
77805+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77806+ break;
77807+ case GR_RWXMAP:
77808+ file = va_arg(ap, struct file *);
77809+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77810+ break;
77811+ case GR_RWXMAPVMA:
77812+ vma = va_arg(ap, struct vm_area_struct *);
77813+ if (vma->vm_file)
77814+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77815+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77816+ str1 = "<stack>";
77817+ else if (vma->vm_start <= current->mm->brk &&
77818+ vma->vm_end >= current->mm->start_brk)
77819+ str1 = "<heap>";
77820+ else
77821+ str1 = "<anonymous mapping>";
77822+ gr_log_middle_varargs(audit, msg, str1);
77823+ break;
77824+ case GR_PSACCT:
77825+ {
77826+ unsigned int wday, cday;
77827+ __u8 whr, chr;
77828+ __u8 wmin, cmin;
77829+ __u8 wsec, csec;
77830+ char cur_tty[64] = { 0 };
77831+ char parent_tty[64] = { 0 };
77832+
77833+ task = va_arg(ap, struct task_struct *);
77834+ wday = va_arg(ap, unsigned int);
77835+ cday = va_arg(ap, unsigned int);
77836+ whr = va_arg(ap, int);
77837+ chr = va_arg(ap, int);
77838+ wmin = va_arg(ap, int);
77839+ cmin = va_arg(ap, int);
77840+ wsec = va_arg(ap, int);
77841+ csec = va_arg(ap, int);
77842+ ulong1 = va_arg(ap, unsigned long);
77843+ cred = __task_cred(task);
77844+ pcred = __task_cred(task->real_parent);
77845+
77846+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77847+ }
77848+ break;
77849+ default:
77850+ gr_log_middle(audit, msg, ap);
77851+ }
77852+ va_end(ap);
77853+ // these don't need DEFAULTSECARGS printed on the end
77854+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77855+ gr_log_end(audit, 0);
77856+ else
77857+ gr_log_end(audit, 1);
77858+ END_LOCKS(audit);
77859+}
77860diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77861new file mode 100644
77862index 0000000..0e39d8c
77863--- /dev/null
77864+++ b/grsecurity/grsec_mem.c
77865@@ -0,0 +1,48 @@
77866+#include <linux/kernel.h>
77867+#include <linux/sched.h>
77868+#include <linux/mm.h>
77869+#include <linux/mman.h>
77870+#include <linux/module.h>
77871+#include <linux/grinternal.h>
77872+
77873+void gr_handle_msr_write(void)
77874+{
77875+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77876+ return;
77877+}
77878+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77879+
77880+void
77881+gr_handle_ioperm(void)
77882+{
77883+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77884+ return;
77885+}
77886+
77887+void
77888+gr_handle_iopl(void)
77889+{
77890+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77891+ return;
77892+}
77893+
77894+void
77895+gr_handle_mem_readwrite(u64 from, u64 to)
77896+{
77897+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77898+ return;
77899+}
77900+
77901+void
77902+gr_handle_vm86(void)
77903+{
77904+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77905+ return;
77906+}
77907+
77908+void
77909+gr_log_badprocpid(const char *entry)
77910+{
77911+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77912+ return;
77913+}
77914diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77915new file mode 100644
77916index 0000000..6f9eb73
77917--- /dev/null
77918+++ b/grsecurity/grsec_mount.c
77919@@ -0,0 +1,65 @@
77920+#include <linux/kernel.h>
77921+#include <linux/sched.h>
77922+#include <linux/mount.h>
77923+#include <linux/major.h>
77924+#include <linux/grsecurity.h>
77925+#include <linux/grinternal.h>
77926+
77927+void
77928+gr_log_remount(const char *devname, const int retval)
77929+{
77930+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77931+ if (grsec_enable_mount && (retval >= 0))
77932+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77933+#endif
77934+ return;
77935+}
77936+
77937+void
77938+gr_log_unmount(const char *devname, const int retval)
77939+{
77940+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77941+ if (grsec_enable_mount && (retval >= 0))
77942+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77943+#endif
77944+ return;
77945+}
77946+
77947+void
77948+gr_log_mount(const char *from, struct path *to, const int retval)
77949+{
77950+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77951+ if (grsec_enable_mount && (retval >= 0))
77952+ gr_log_str_fs(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to->dentry, to->mnt);
77953+#endif
77954+ return;
77955+}
77956+
77957+int
77958+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
77959+{
77960+#ifdef CONFIG_GRKERNSEC_ROFS
77961+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
77962+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
77963+ return -EPERM;
77964+ } else
77965+ return 0;
77966+#endif
77967+ return 0;
77968+}
77969+
77970+int
77971+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
77972+{
77973+#ifdef CONFIG_GRKERNSEC_ROFS
77974+ struct inode *inode = dentry->d_inode;
77975+
77976+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
77977+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
77978+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
77979+ return -EPERM;
77980+ } else
77981+ return 0;
77982+#endif
77983+ return 0;
77984+}
77985diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
77986new file mode 100644
77987index 0000000..6ee9d50
77988--- /dev/null
77989+++ b/grsecurity/grsec_pax.c
77990@@ -0,0 +1,45 @@
77991+#include <linux/kernel.h>
77992+#include <linux/sched.h>
77993+#include <linux/mm.h>
77994+#include <linux/file.h>
77995+#include <linux/grinternal.h>
77996+#include <linux/grsecurity.h>
77997+
77998+void
77999+gr_log_textrel(struct vm_area_struct * vma)
78000+{
78001+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78002+ if (grsec_enable_log_rwxmaps)
78003+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78004+#endif
78005+ return;
78006+}
78007+
78008+void gr_log_ptgnustack(struct file *file)
78009+{
78010+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78011+ if (grsec_enable_log_rwxmaps)
78012+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78013+#endif
78014+ return;
78015+}
78016+
78017+void
78018+gr_log_rwxmmap(struct file *file)
78019+{
78020+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78021+ if (grsec_enable_log_rwxmaps)
78022+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78023+#endif
78024+ return;
78025+}
78026+
78027+void
78028+gr_log_rwxmprotect(struct vm_area_struct *vma)
78029+{
78030+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78031+ if (grsec_enable_log_rwxmaps)
78032+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78033+#endif
78034+ return;
78035+}
78036diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78037new file mode 100644
78038index 0000000..2005a3a
78039--- /dev/null
78040+++ b/grsecurity/grsec_proc.c
78041@@ -0,0 +1,20 @@
78042+#include <linux/kernel.h>
78043+#include <linux/sched.h>
78044+#include <linux/grsecurity.h>
78045+#include <linux/grinternal.h>
78046+
78047+int gr_proc_is_restricted(void)
78048+{
78049+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78050+ const struct cred *cred = current_cred();
78051+#endif
78052+
78053+#ifdef CONFIG_GRKERNSEC_PROC_USER
78054+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78055+ return -EACCES;
78056+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78057+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78058+ return -EACCES;
78059+#endif
78060+ return 0;
78061+}
78062diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78063new file mode 100644
78064index 0000000..f7f29aa
78065--- /dev/null
78066+++ b/grsecurity/grsec_ptrace.c
78067@@ -0,0 +1,30 @@
78068+#include <linux/kernel.h>
78069+#include <linux/sched.h>
78070+#include <linux/grinternal.h>
78071+#include <linux/security.h>
78072+
78073+void
78074+gr_audit_ptrace(struct task_struct *task)
78075+{
78076+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78077+ if (grsec_enable_audit_ptrace)
78078+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78079+#endif
78080+ return;
78081+}
78082+
78083+int
78084+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78085+{
78086+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78087+ const struct dentry *dentry = file->f_path.dentry;
78088+ const struct vfsmount *mnt = file->f_path.mnt;
78089+
78090+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78091+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78092+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78093+ return -EACCES;
78094+ }
78095+#endif
78096+ return 0;
78097+}
78098diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78099new file mode 100644
78100index 0000000..3860c7e
78101--- /dev/null
78102+++ b/grsecurity/grsec_sig.c
78103@@ -0,0 +1,236 @@
78104+#include <linux/kernel.h>
78105+#include <linux/sched.h>
78106+#include <linux/fs.h>
78107+#include <linux/delay.h>
78108+#include <linux/grsecurity.h>
78109+#include <linux/grinternal.h>
78110+#include <linux/hardirq.h>
78111+
78112+char *signames[] = {
78113+ [SIGSEGV] = "Segmentation fault",
78114+ [SIGILL] = "Illegal instruction",
78115+ [SIGABRT] = "Abort",
78116+ [SIGBUS] = "Invalid alignment/Bus error"
78117+};
78118+
78119+void
78120+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78121+{
78122+#ifdef CONFIG_GRKERNSEC_SIGNAL
78123+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78124+ (sig == SIGABRT) || (sig == SIGBUS))) {
78125+ if (task_pid_nr(t) == task_pid_nr(current)) {
78126+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78127+ } else {
78128+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78129+ }
78130+ }
78131+#endif
78132+ return;
78133+}
78134+
78135+int
78136+gr_handle_signal(const struct task_struct *p, const int sig)
78137+{
78138+#ifdef CONFIG_GRKERNSEC
78139+ /* ignore the 0 signal for protected task checks */
78140+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78141+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78142+ return -EPERM;
78143+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78144+ return -EPERM;
78145+ }
78146+#endif
78147+ return 0;
78148+}
78149+
78150+#ifdef CONFIG_GRKERNSEC
78151+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78152+
78153+int gr_fake_force_sig(int sig, struct task_struct *t)
78154+{
78155+ unsigned long int flags;
78156+ int ret, blocked, ignored;
78157+ struct k_sigaction *action;
78158+
78159+ spin_lock_irqsave(&t->sighand->siglock, flags);
78160+ action = &t->sighand->action[sig-1];
78161+ ignored = action->sa.sa_handler == SIG_IGN;
78162+ blocked = sigismember(&t->blocked, sig);
78163+ if (blocked || ignored) {
78164+ action->sa.sa_handler = SIG_DFL;
78165+ if (blocked) {
78166+ sigdelset(&t->blocked, sig);
78167+ recalc_sigpending_and_wake(t);
78168+ }
78169+ }
78170+ if (action->sa.sa_handler == SIG_DFL)
78171+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78172+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78173+
78174+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78175+
78176+ return ret;
78177+}
78178+#endif
78179+
78180+#define GR_USER_BAN_TIME (15 * 60)
78181+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78182+
78183+void gr_handle_brute_attach(int dumpable)
78184+{
78185+#ifdef CONFIG_GRKERNSEC_BRUTE
78186+ struct task_struct *p = current;
78187+ kuid_t uid = GLOBAL_ROOT_UID;
78188+ int daemon = 0;
78189+
78190+ if (!grsec_enable_brute)
78191+ return;
78192+
78193+ rcu_read_lock();
78194+ read_lock(&tasklist_lock);
78195+ read_lock(&grsec_exec_file_lock);
78196+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78197+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78198+ p->real_parent->brute = 1;
78199+ daemon = 1;
78200+ } else {
78201+ const struct cred *cred = __task_cred(p), *cred2;
78202+ struct task_struct *tsk, *tsk2;
78203+
78204+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78205+ struct user_struct *user;
78206+
78207+ uid = cred->uid;
78208+
78209+ /* this is put upon execution past expiration */
78210+ user = find_user(uid);
78211+ if (user == NULL)
78212+ goto unlock;
78213+ user->suid_banned = 1;
78214+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78215+ if (user->suid_ban_expires == ~0UL)
78216+ user->suid_ban_expires--;
78217+
78218+ /* only kill other threads of the same binary, from the same user */
78219+ do_each_thread(tsk2, tsk) {
78220+ cred2 = __task_cred(tsk);
78221+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78222+ gr_fake_force_sig(SIGKILL, tsk);
78223+ } while_each_thread(tsk2, tsk);
78224+ }
78225+ }
78226+unlock:
78227+ read_unlock(&grsec_exec_file_lock);
78228+ read_unlock(&tasklist_lock);
78229+ rcu_read_unlock();
78230+
78231+ if (gr_is_global_nonroot(uid))
78232+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78233+ else if (daemon)
78234+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78235+
78236+#endif
78237+ return;
78238+}
78239+
78240+void gr_handle_brute_check(void)
78241+{
78242+#ifdef CONFIG_GRKERNSEC_BRUTE
78243+ struct task_struct *p = current;
78244+
78245+ if (unlikely(p->brute)) {
78246+ if (!grsec_enable_brute)
78247+ p->brute = 0;
78248+ else if (time_before(get_seconds(), p->brute_expires))
78249+ msleep(30 * 1000);
78250+ }
78251+#endif
78252+ return;
78253+}
78254+
78255+void gr_handle_kernel_exploit(void)
78256+{
78257+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78258+ const struct cred *cred;
78259+ struct task_struct *tsk, *tsk2;
78260+ struct user_struct *user;
78261+ kuid_t uid;
78262+
78263+ if (in_irq() || in_serving_softirq() || in_nmi())
78264+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78265+
78266+ uid = current_uid();
78267+
78268+ if (gr_is_global_root(uid))
78269+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78270+ else {
78271+ /* kill all the processes of this user, hold a reference
78272+ to their creds struct, and prevent them from creating
78273+ another process until system reset
78274+ */
78275+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78276+ GR_GLOBAL_UID(uid));
78277+ /* we intentionally leak this ref */
78278+ user = get_uid(current->cred->user);
78279+ if (user)
78280+ user->kernel_banned = 1;
78281+
78282+ /* kill all processes of this user */
78283+ read_lock(&tasklist_lock);
78284+ do_each_thread(tsk2, tsk) {
78285+ cred = __task_cred(tsk);
78286+ if (uid_eq(cred->uid, uid))
78287+ gr_fake_force_sig(SIGKILL, tsk);
78288+ } while_each_thread(tsk2, tsk);
78289+ read_unlock(&tasklist_lock);
78290+ }
78291+#endif
78292+}
78293+
78294+#ifdef CONFIG_GRKERNSEC_BRUTE
78295+static bool suid_ban_expired(struct user_struct *user)
78296+{
78297+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78298+ user->suid_banned = 0;
78299+ user->suid_ban_expires = 0;
78300+ free_uid(user);
78301+ return true;
78302+ }
78303+
78304+ return false;
78305+}
78306+#endif
78307+
78308+int gr_process_kernel_exec_ban(void)
78309+{
78310+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78311+ if (unlikely(current->cred->user->kernel_banned))
78312+ return -EPERM;
78313+#endif
78314+ return 0;
78315+}
78316+
78317+int gr_process_kernel_setuid_ban(struct user_struct *user)
78318+{
78319+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78320+ if (unlikely(user->kernel_banned))
78321+ gr_fake_force_sig(SIGKILL, current);
78322+#endif
78323+ return 0;
78324+}
78325+
78326+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78327+{
78328+#ifdef CONFIG_GRKERNSEC_BRUTE
78329+ struct user_struct *user = current->cred->user;
78330+ if (unlikely(user->suid_banned)) {
78331+ if (suid_ban_expired(user))
78332+ return 0;
78333+ /* disallow execution of suid binaries only */
78334+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78335+ return -EPERM;
78336+ }
78337+#endif
78338+ return 0;
78339+}
78340diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78341new file mode 100644
78342index 0000000..e3650b6
78343--- /dev/null
78344+++ b/grsecurity/grsec_sock.c
78345@@ -0,0 +1,244 @@
78346+#include <linux/kernel.h>
78347+#include <linux/module.h>
78348+#include <linux/sched.h>
78349+#include <linux/file.h>
78350+#include <linux/net.h>
78351+#include <linux/in.h>
78352+#include <linux/ip.h>
78353+#include <net/sock.h>
78354+#include <net/inet_sock.h>
78355+#include <linux/grsecurity.h>
78356+#include <linux/grinternal.h>
78357+#include <linux/gracl.h>
78358+
78359+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78360+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78361+
78362+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78363+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78364+
78365+#ifdef CONFIG_UNIX_MODULE
78366+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78367+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78368+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78369+EXPORT_SYMBOL_GPL(gr_handle_create);
78370+#endif
78371+
78372+#ifdef CONFIG_GRKERNSEC
78373+#define gr_conn_table_size 32749
78374+struct conn_table_entry {
78375+ struct conn_table_entry *next;
78376+ struct signal_struct *sig;
78377+};
78378+
78379+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78380+DEFINE_SPINLOCK(gr_conn_table_lock);
78381+
78382+extern const char * gr_socktype_to_name(unsigned char type);
78383+extern const char * gr_proto_to_name(unsigned char proto);
78384+extern const char * gr_sockfamily_to_name(unsigned char family);
78385+
78386+static __inline__ int
78387+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78388+{
78389+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78390+}
78391+
78392+static __inline__ int
78393+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78394+ __u16 sport, __u16 dport)
78395+{
78396+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78397+ sig->gr_sport == sport && sig->gr_dport == dport))
78398+ return 1;
78399+ else
78400+ return 0;
78401+}
78402+
78403+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78404+{
78405+ struct conn_table_entry **match;
78406+ unsigned int index;
78407+
78408+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78409+ sig->gr_sport, sig->gr_dport,
78410+ gr_conn_table_size);
78411+
78412+ newent->sig = sig;
78413+
78414+ match = &gr_conn_table[index];
78415+ newent->next = *match;
78416+ *match = newent;
78417+
78418+ return;
78419+}
78420+
78421+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78422+{
78423+ struct conn_table_entry *match, *last = NULL;
78424+ unsigned int index;
78425+
78426+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78427+ sig->gr_sport, sig->gr_dport,
78428+ gr_conn_table_size);
78429+
78430+ match = gr_conn_table[index];
78431+ while (match && !conn_match(match->sig,
78432+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78433+ sig->gr_dport)) {
78434+ last = match;
78435+ match = match->next;
78436+ }
78437+
78438+ if (match) {
78439+ if (last)
78440+ last->next = match->next;
78441+ else
78442+ gr_conn_table[index] = NULL;
78443+ kfree(match);
78444+ }
78445+
78446+ return;
78447+}
78448+
78449+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78450+ __u16 sport, __u16 dport)
78451+{
78452+ struct conn_table_entry *match;
78453+ unsigned int index;
78454+
78455+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78456+
78457+ match = gr_conn_table[index];
78458+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78459+ match = match->next;
78460+
78461+ if (match)
78462+ return match->sig;
78463+ else
78464+ return NULL;
78465+}
78466+
78467+#endif
78468+
78469+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78470+{
78471+#ifdef CONFIG_GRKERNSEC
78472+ struct signal_struct *sig = current->signal;
78473+ struct conn_table_entry *newent;
78474+
78475+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78476+ if (newent == NULL)
78477+ return;
78478+ /* no bh lock needed since we are called with bh disabled */
78479+ spin_lock(&gr_conn_table_lock);
78480+ gr_del_task_from_ip_table_nolock(sig);
78481+ sig->gr_saddr = inet->inet_rcv_saddr;
78482+ sig->gr_daddr = inet->inet_daddr;
78483+ sig->gr_sport = inet->inet_sport;
78484+ sig->gr_dport = inet->inet_dport;
78485+ gr_add_to_task_ip_table_nolock(sig, newent);
78486+ spin_unlock(&gr_conn_table_lock);
78487+#endif
78488+ return;
78489+}
78490+
78491+void gr_del_task_from_ip_table(struct task_struct *task)
78492+{
78493+#ifdef CONFIG_GRKERNSEC
78494+ spin_lock_bh(&gr_conn_table_lock);
78495+ gr_del_task_from_ip_table_nolock(task->signal);
78496+ spin_unlock_bh(&gr_conn_table_lock);
78497+#endif
78498+ return;
78499+}
78500+
78501+void
78502+gr_attach_curr_ip(const struct sock *sk)
78503+{
78504+#ifdef CONFIG_GRKERNSEC
78505+ struct signal_struct *p, *set;
78506+ const struct inet_sock *inet = inet_sk(sk);
78507+
78508+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78509+ return;
78510+
78511+ set = current->signal;
78512+
78513+ spin_lock_bh(&gr_conn_table_lock);
78514+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78515+ inet->inet_dport, inet->inet_sport);
78516+ if (unlikely(p != NULL)) {
78517+ set->curr_ip = p->curr_ip;
78518+ set->used_accept = 1;
78519+ gr_del_task_from_ip_table_nolock(p);
78520+ spin_unlock_bh(&gr_conn_table_lock);
78521+ return;
78522+ }
78523+ spin_unlock_bh(&gr_conn_table_lock);
78524+
78525+ set->curr_ip = inet->inet_daddr;
78526+ set->used_accept = 1;
78527+#endif
78528+ return;
78529+}
78530+
78531+int
78532+gr_handle_sock_all(const int family, const int type, const int protocol)
78533+{
78534+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78535+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78536+ (family != AF_UNIX)) {
78537+ if (family == AF_INET)
78538+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78539+ else
78540+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78541+ return -EACCES;
78542+ }
78543+#endif
78544+ return 0;
78545+}
78546+
78547+int
78548+gr_handle_sock_server(const struct sockaddr *sck)
78549+{
78550+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78551+ if (grsec_enable_socket_server &&
78552+ in_group_p(grsec_socket_server_gid) &&
78553+ sck && (sck->sa_family != AF_UNIX) &&
78554+ (sck->sa_family != AF_LOCAL)) {
78555+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78556+ return -EACCES;
78557+ }
78558+#endif
78559+ return 0;
78560+}
78561+
78562+int
78563+gr_handle_sock_server_other(const struct sock *sck)
78564+{
78565+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78566+ if (grsec_enable_socket_server &&
78567+ in_group_p(grsec_socket_server_gid) &&
78568+ sck && (sck->sk_family != AF_UNIX) &&
78569+ (sck->sk_family != AF_LOCAL)) {
78570+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78571+ return -EACCES;
78572+ }
78573+#endif
78574+ return 0;
78575+}
78576+
78577+int
78578+gr_handle_sock_client(const struct sockaddr *sck)
78579+{
78580+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78581+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78582+ sck && (sck->sa_family != AF_UNIX) &&
78583+ (sck->sa_family != AF_LOCAL)) {
78584+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78585+ return -EACCES;
78586+ }
78587+#endif
78588+ return 0;
78589+}
78590diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78591new file mode 100644
78592index 0000000..cce889e
78593--- /dev/null
78594+++ b/grsecurity/grsec_sysctl.c
78595@@ -0,0 +1,488 @@
78596+#include <linux/kernel.h>
78597+#include <linux/sched.h>
78598+#include <linux/sysctl.h>
78599+#include <linux/grsecurity.h>
78600+#include <linux/grinternal.h>
78601+
78602+int
78603+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78604+{
78605+#ifdef CONFIG_GRKERNSEC_SYSCTL
78606+ if (dirname == NULL || name == NULL)
78607+ return 0;
78608+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78609+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78610+ return -EACCES;
78611+ }
78612+#endif
78613+ return 0;
78614+}
78615+
78616+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78617+static int __maybe_unused __read_only one = 1;
78618+#endif
78619+
78620+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78621+ defined(CONFIG_GRKERNSEC_DENYUSB)
78622+struct ctl_table grsecurity_table[] = {
78623+#ifdef CONFIG_GRKERNSEC_SYSCTL
78624+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78625+#ifdef CONFIG_GRKERNSEC_IO
78626+ {
78627+ .procname = "disable_priv_io",
78628+ .data = &grsec_disable_privio,
78629+ .maxlen = sizeof(int),
78630+ .mode = 0600,
78631+ .proc_handler = &proc_dointvec,
78632+ },
78633+#endif
78634+#endif
78635+#ifdef CONFIG_GRKERNSEC_LINK
78636+ {
78637+ .procname = "linking_restrictions",
78638+ .data = &grsec_enable_link,
78639+ .maxlen = sizeof(int),
78640+ .mode = 0600,
78641+ .proc_handler = &proc_dointvec,
78642+ },
78643+#endif
78644+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78645+ {
78646+ .procname = "enforce_symlinksifowner",
78647+ .data = &grsec_enable_symlinkown,
78648+ .maxlen = sizeof(int),
78649+ .mode = 0600,
78650+ .proc_handler = &proc_dointvec,
78651+ },
78652+ {
78653+ .procname = "symlinkown_gid",
78654+ .data = &grsec_symlinkown_gid,
78655+ .maxlen = sizeof(int),
78656+ .mode = 0600,
78657+ .proc_handler = &proc_dointvec,
78658+ },
78659+#endif
78660+#ifdef CONFIG_GRKERNSEC_BRUTE
78661+ {
78662+ .procname = "deter_bruteforce",
78663+ .data = &grsec_enable_brute,
78664+ .maxlen = sizeof(int),
78665+ .mode = 0600,
78666+ .proc_handler = &proc_dointvec,
78667+ },
78668+#endif
78669+#ifdef CONFIG_GRKERNSEC_FIFO
78670+ {
78671+ .procname = "fifo_restrictions",
78672+ .data = &grsec_enable_fifo,
78673+ .maxlen = sizeof(int),
78674+ .mode = 0600,
78675+ .proc_handler = &proc_dointvec,
78676+ },
78677+#endif
78678+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78679+ {
78680+ .procname = "ptrace_readexec",
78681+ .data = &grsec_enable_ptrace_readexec,
78682+ .maxlen = sizeof(int),
78683+ .mode = 0600,
78684+ .proc_handler = &proc_dointvec,
78685+ },
78686+#endif
78687+#ifdef CONFIG_GRKERNSEC_SETXID
78688+ {
78689+ .procname = "consistent_setxid",
78690+ .data = &grsec_enable_setxid,
78691+ .maxlen = sizeof(int),
78692+ .mode = 0600,
78693+ .proc_handler = &proc_dointvec,
78694+ },
78695+#endif
78696+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78697+ {
78698+ .procname = "ip_blackhole",
78699+ .data = &grsec_enable_blackhole,
78700+ .maxlen = sizeof(int),
78701+ .mode = 0600,
78702+ .proc_handler = &proc_dointvec,
78703+ },
78704+ {
78705+ .procname = "lastack_retries",
78706+ .data = &grsec_lastack_retries,
78707+ .maxlen = sizeof(int),
78708+ .mode = 0600,
78709+ .proc_handler = &proc_dointvec,
78710+ },
78711+#endif
78712+#ifdef CONFIG_GRKERNSEC_EXECLOG
78713+ {
78714+ .procname = "exec_logging",
78715+ .data = &grsec_enable_execlog,
78716+ .maxlen = sizeof(int),
78717+ .mode = 0600,
78718+ .proc_handler = &proc_dointvec,
78719+ },
78720+#endif
78721+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78722+ {
78723+ .procname = "rwxmap_logging",
78724+ .data = &grsec_enable_log_rwxmaps,
78725+ .maxlen = sizeof(int),
78726+ .mode = 0600,
78727+ .proc_handler = &proc_dointvec,
78728+ },
78729+#endif
78730+#ifdef CONFIG_GRKERNSEC_SIGNAL
78731+ {
78732+ .procname = "signal_logging",
78733+ .data = &grsec_enable_signal,
78734+ .maxlen = sizeof(int),
78735+ .mode = 0600,
78736+ .proc_handler = &proc_dointvec,
78737+ },
78738+#endif
78739+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78740+ {
78741+ .procname = "forkfail_logging",
78742+ .data = &grsec_enable_forkfail,
78743+ .maxlen = sizeof(int),
78744+ .mode = 0600,
78745+ .proc_handler = &proc_dointvec,
78746+ },
78747+#endif
78748+#ifdef CONFIG_GRKERNSEC_TIME
78749+ {
78750+ .procname = "timechange_logging",
78751+ .data = &grsec_enable_time,
78752+ .maxlen = sizeof(int),
78753+ .mode = 0600,
78754+ .proc_handler = &proc_dointvec,
78755+ },
78756+#endif
78757+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78758+ {
78759+ .procname = "chroot_deny_shmat",
78760+ .data = &grsec_enable_chroot_shmat,
78761+ .maxlen = sizeof(int),
78762+ .mode = 0600,
78763+ .proc_handler = &proc_dointvec,
78764+ },
78765+#endif
78766+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78767+ {
78768+ .procname = "chroot_deny_unix",
78769+ .data = &grsec_enable_chroot_unix,
78770+ .maxlen = sizeof(int),
78771+ .mode = 0600,
78772+ .proc_handler = &proc_dointvec,
78773+ },
78774+#endif
78775+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78776+ {
78777+ .procname = "chroot_deny_mount",
78778+ .data = &grsec_enable_chroot_mount,
78779+ .maxlen = sizeof(int),
78780+ .mode = 0600,
78781+ .proc_handler = &proc_dointvec,
78782+ },
78783+#endif
78784+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78785+ {
78786+ .procname = "chroot_deny_fchdir",
78787+ .data = &grsec_enable_chroot_fchdir,
78788+ .maxlen = sizeof(int),
78789+ .mode = 0600,
78790+ .proc_handler = &proc_dointvec,
78791+ },
78792+#endif
78793+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78794+ {
78795+ .procname = "chroot_deny_chroot",
78796+ .data = &grsec_enable_chroot_double,
78797+ .maxlen = sizeof(int),
78798+ .mode = 0600,
78799+ .proc_handler = &proc_dointvec,
78800+ },
78801+#endif
78802+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78803+ {
78804+ .procname = "chroot_deny_pivot",
78805+ .data = &grsec_enable_chroot_pivot,
78806+ .maxlen = sizeof(int),
78807+ .mode = 0600,
78808+ .proc_handler = &proc_dointvec,
78809+ },
78810+#endif
78811+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78812+ {
78813+ .procname = "chroot_enforce_chdir",
78814+ .data = &grsec_enable_chroot_chdir,
78815+ .maxlen = sizeof(int),
78816+ .mode = 0600,
78817+ .proc_handler = &proc_dointvec,
78818+ },
78819+#endif
78820+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78821+ {
78822+ .procname = "chroot_deny_chmod",
78823+ .data = &grsec_enable_chroot_chmod,
78824+ .maxlen = sizeof(int),
78825+ .mode = 0600,
78826+ .proc_handler = &proc_dointvec,
78827+ },
78828+#endif
78829+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78830+ {
78831+ .procname = "chroot_deny_mknod",
78832+ .data = &grsec_enable_chroot_mknod,
78833+ .maxlen = sizeof(int),
78834+ .mode = 0600,
78835+ .proc_handler = &proc_dointvec,
78836+ },
78837+#endif
78838+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78839+ {
78840+ .procname = "chroot_restrict_nice",
78841+ .data = &grsec_enable_chroot_nice,
78842+ .maxlen = sizeof(int),
78843+ .mode = 0600,
78844+ .proc_handler = &proc_dointvec,
78845+ },
78846+#endif
78847+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78848+ {
78849+ .procname = "chroot_execlog",
78850+ .data = &grsec_enable_chroot_execlog,
78851+ .maxlen = sizeof(int),
78852+ .mode = 0600,
78853+ .proc_handler = &proc_dointvec,
78854+ },
78855+#endif
78856+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78857+ {
78858+ .procname = "chroot_caps",
78859+ .data = &grsec_enable_chroot_caps,
78860+ .maxlen = sizeof(int),
78861+ .mode = 0600,
78862+ .proc_handler = &proc_dointvec,
78863+ },
78864+#endif
78865+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
78866+ {
78867+ .procname = "chroot_deny_bad_rename",
78868+ .data = &grsec_enable_chroot_rename,
78869+ .maxlen = sizeof(int),
78870+ .mode = 0600,
78871+ .proc_handler = &proc_dointvec,
78872+ },
78873+#endif
78874+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78875+ {
78876+ .procname = "chroot_deny_sysctl",
78877+ .data = &grsec_enable_chroot_sysctl,
78878+ .maxlen = sizeof(int),
78879+ .mode = 0600,
78880+ .proc_handler = &proc_dointvec,
78881+ },
78882+#endif
78883+#ifdef CONFIG_GRKERNSEC_TPE
78884+ {
78885+ .procname = "tpe",
78886+ .data = &grsec_enable_tpe,
78887+ .maxlen = sizeof(int),
78888+ .mode = 0600,
78889+ .proc_handler = &proc_dointvec,
78890+ },
78891+ {
78892+ .procname = "tpe_gid",
78893+ .data = &grsec_tpe_gid,
78894+ .maxlen = sizeof(int),
78895+ .mode = 0600,
78896+ .proc_handler = &proc_dointvec,
78897+ },
78898+#endif
78899+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78900+ {
78901+ .procname = "tpe_invert",
78902+ .data = &grsec_enable_tpe_invert,
78903+ .maxlen = sizeof(int),
78904+ .mode = 0600,
78905+ .proc_handler = &proc_dointvec,
78906+ },
78907+#endif
78908+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78909+ {
78910+ .procname = "tpe_restrict_all",
78911+ .data = &grsec_enable_tpe_all,
78912+ .maxlen = sizeof(int),
78913+ .mode = 0600,
78914+ .proc_handler = &proc_dointvec,
78915+ },
78916+#endif
78917+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78918+ {
78919+ .procname = "socket_all",
78920+ .data = &grsec_enable_socket_all,
78921+ .maxlen = sizeof(int),
78922+ .mode = 0600,
78923+ .proc_handler = &proc_dointvec,
78924+ },
78925+ {
78926+ .procname = "socket_all_gid",
78927+ .data = &grsec_socket_all_gid,
78928+ .maxlen = sizeof(int),
78929+ .mode = 0600,
78930+ .proc_handler = &proc_dointvec,
78931+ },
78932+#endif
78933+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78934+ {
78935+ .procname = "socket_client",
78936+ .data = &grsec_enable_socket_client,
78937+ .maxlen = sizeof(int),
78938+ .mode = 0600,
78939+ .proc_handler = &proc_dointvec,
78940+ },
78941+ {
78942+ .procname = "socket_client_gid",
78943+ .data = &grsec_socket_client_gid,
78944+ .maxlen = sizeof(int),
78945+ .mode = 0600,
78946+ .proc_handler = &proc_dointvec,
78947+ },
78948+#endif
78949+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78950+ {
78951+ .procname = "socket_server",
78952+ .data = &grsec_enable_socket_server,
78953+ .maxlen = sizeof(int),
78954+ .mode = 0600,
78955+ .proc_handler = &proc_dointvec,
78956+ },
78957+ {
78958+ .procname = "socket_server_gid",
78959+ .data = &grsec_socket_server_gid,
78960+ .maxlen = sizeof(int),
78961+ .mode = 0600,
78962+ .proc_handler = &proc_dointvec,
78963+ },
78964+#endif
78965+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
78966+ {
78967+ .procname = "audit_group",
78968+ .data = &grsec_enable_group,
78969+ .maxlen = sizeof(int),
78970+ .mode = 0600,
78971+ .proc_handler = &proc_dointvec,
78972+ },
78973+ {
78974+ .procname = "audit_gid",
78975+ .data = &grsec_audit_gid,
78976+ .maxlen = sizeof(int),
78977+ .mode = 0600,
78978+ .proc_handler = &proc_dointvec,
78979+ },
78980+#endif
78981+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
78982+ {
78983+ .procname = "audit_chdir",
78984+ .data = &grsec_enable_chdir,
78985+ .maxlen = sizeof(int),
78986+ .mode = 0600,
78987+ .proc_handler = &proc_dointvec,
78988+ },
78989+#endif
78990+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78991+ {
78992+ .procname = "audit_mount",
78993+ .data = &grsec_enable_mount,
78994+ .maxlen = sizeof(int),
78995+ .mode = 0600,
78996+ .proc_handler = &proc_dointvec,
78997+ },
78998+#endif
78999+#ifdef CONFIG_GRKERNSEC_DMESG
79000+ {
79001+ .procname = "dmesg",
79002+ .data = &grsec_enable_dmesg,
79003+ .maxlen = sizeof(int),
79004+ .mode = 0600,
79005+ .proc_handler = &proc_dointvec,
79006+ },
79007+#endif
79008+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79009+ {
79010+ .procname = "chroot_findtask",
79011+ .data = &grsec_enable_chroot_findtask,
79012+ .maxlen = sizeof(int),
79013+ .mode = 0600,
79014+ .proc_handler = &proc_dointvec,
79015+ },
79016+#endif
79017+#ifdef CONFIG_GRKERNSEC_RESLOG
79018+ {
79019+ .procname = "resource_logging",
79020+ .data = &grsec_resource_logging,
79021+ .maxlen = sizeof(int),
79022+ .mode = 0600,
79023+ .proc_handler = &proc_dointvec,
79024+ },
79025+#endif
79026+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79027+ {
79028+ .procname = "audit_ptrace",
79029+ .data = &grsec_enable_audit_ptrace,
79030+ .maxlen = sizeof(int),
79031+ .mode = 0600,
79032+ .proc_handler = &proc_dointvec,
79033+ },
79034+#endif
79035+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79036+ {
79037+ .procname = "harden_ptrace",
79038+ .data = &grsec_enable_harden_ptrace,
79039+ .maxlen = sizeof(int),
79040+ .mode = 0600,
79041+ .proc_handler = &proc_dointvec,
79042+ },
79043+#endif
79044+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79045+ {
79046+ .procname = "harden_ipc",
79047+ .data = &grsec_enable_harden_ipc,
79048+ .maxlen = sizeof(int),
79049+ .mode = 0600,
79050+ .proc_handler = &proc_dointvec,
79051+ },
79052+#endif
79053+ {
79054+ .procname = "grsec_lock",
79055+ .data = &grsec_lock,
79056+ .maxlen = sizeof(int),
79057+ .mode = 0600,
79058+ .proc_handler = &proc_dointvec,
79059+ },
79060+#endif
79061+#ifdef CONFIG_GRKERNSEC_ROFS
79062+ {
79063+ .procname = "romount_protect",
79064+ .data = &grsec_enable_rofs,
79065+ .maxlen = sizeof(int),
79066+ .mode = 0600,
79067+ .proc_handler = &proc_dointvec_minmax,
79068+ .extra1 = &one,
79069+ .extra2 = &one,
79070+ },
79071+#endif
79072+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79073+ {
79074+ .procname = "deny_new_usb",
79075+ .data = &grsec_deny_new_usb,
79076+ .maxlen = sizeof(int),
79077+ .mode = 0600,
79078+ .proc_handler = &proc_dointvec,
79079+ },
79080+#endif
79081+ { }
79082+};
79083+#endif
79084diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79085new file mode 100644
79086index 0000000..61b514e
79087--- /dev/null
79088+++ b/grsecurity/grsec_time.c
79089@@ -0,0 +1,16 @@
79090+#include <linux/kernel.h>
79091+#include <linux/sched.h>
79092+#include <linux/grinternal.h>
79093+#include <linux/module.h>
79094+
79095+void
79096+gr_log_timechange(void)
79097+{
79098+#ifdef CONFIG_GRKERNSEC_TIME
79099+ if (grsec_enable_time)
79100+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79101+#endif
79102+ return;
79103+}
79104+
79105+EXPORT_SYMBOL_GPL(gr_log_timechange);
79106diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79107new file mode 100644
79108index 0000000..d1953de
79109--- /dev/null
79110+++ b/grsecurity/grsec_tpe.c
79111@@ -0,0 +1,78 @@
79112+#include <linux/kernel.h>
79113+#include <linux/sched.h>
79114+#include <linux/file.h>
79115+#include <linux/fs.h>
79116+#include <linux/grinternal.h>
79117+
79118+extern int gr_acl_tpe_check(void);
79119+
79120+int
79121+gr_tpe_allow(const struct file *file)
79122+{
79123+#ifdef CONFIG_GRKERNSEC
79124+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79125+ struct inode *file_inode = file->f_path.dentry->d_inode;
79126+ const struct cred *cred = current_cred();
79127+ char *msg = NULL;
79128+ char *msg2 = NULL;
79129+
79130+ // never restrict root
79131+ if (gr_is_global_root(cred->uid))
79132+ return 1;
79133+
79134+ if (grsec_enable_tpe) {
79135+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79136+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79137+ msg = "not being in trusted group";
79138+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79139+ msg = "being in untrusted group";
79140+#else
79141+ if (in_group_p(grsec_tpe_gid))
79142+ msg = "being in untrusted group";
79143+#endif
79144+ }
79145+ if (!msg && gr_acl_tpe_check())
79146+ msg = "being in untrusted role";
79147+
79148+ // not in any affected group/role
79149+ if (!msg)
79150+ goto next_check;
79151+
79152+ if (gr_is_global_nonroot(inode->i_uid))
79153+ msg2 = "file in non-root-owned directory";
79154+ else if (inode->i_mode & S_IWOTH)
79155+ msg2 = "file in world-writable directory";
79156+ else if (inode->i_mode & S_IWGRP)
79157+ msg2 = "file in group-writable directory";
79158+ else if (file_inode->i_mode & S_IWOTH)
79159+ msg2 = "file is world-writable";
79160+
79161+ if (msg && msg2) {
79162+ char fullmsg[70] = {0};
79163+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79164+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79165+ return 0;
79166+ }
79167+ msg = NULL;
79168+next_check:
79169+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79170+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79171+ return 1;
79172+
79173+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79174+ msg = "directory not owned by user";
79175+ else if (inode->i_mode & S_IWOTH)
79176+ msg = "file in world-writable directory";
79177+ else if (inode->i_mode & S_IWGRP)
79178+ msg = "file in group-writable directory";
79179+ else if (file_inode->i_mode & S_IWOTH)
79180+ msg = "file is world-writable";
79181+
79182+ if (msg) {
79183+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79184+ return 0;
79185+ }
79186+#endif
79187+#endif
79188+ return 1;
79189+}
79190diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79191new file mode 100644
79192index 0000000..ae02d8e
79193--- /dev/null
79194+++ b/grsecurity/grsec_usb.c
79195@@ -0,0 +1,15 @@
79196+#include <linux/kernel.h>
79197+#include <linux/grinternal.h>
79198+#include <linux/module.h>
79199+
79200+int gr_handle_new_usb(void)
79201+{
79202+#ifdef CONFIG_GRKERNSEC_DENYUSB
79203+ if (grsec_deny_new_usb) {
79204+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79205+ return 1;
79206+ }
79207+#endif
79208+ return 0;
79209+}
79210+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79211diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79212new file mode 100644
79213index 0000000..158b330
79214--- /dev/null
79215+++ b/grsecurity/grsum.c
79216@@ -0,0 +1,64 @@
79217+#include <linux/err.h>
79218+#include <linux/kernel.h>
79219+#include <linux/sched.h>
79220+#include <linux/mm.h>
79221+#include <linux/scatterlist.h>
79222+#include <linux/crypto.h>
79223+#include <linux/gracl.h>
79224+
79225+
79226+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79227+#error "crypto and sha256 must be built into the kernel"
79228+#endif
79229+
79230+int
79231+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79232+{
79233+ struct crypto_hash *tfm;
79234+ struct hash_desc desc;
79235+ struct scatterlist sg[2];
79236+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79237+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79238+ unsigned long *sumptr = (unsigned long *)sum;
79239+ int cryptres;
79240+ int retval = 1;
79241+ volatile int mismatched = 0;
79242+ volatile int dummy = 0;
79243+ unsigned int i;
79244+
79245+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79246+ if (IS_ERR(tfm)) {
79247+ /* should never happen, since sha256 should be built in */
79248+ memset(entry->pw, 0, GR_PW_LEN);
79249+ return 1;
79250+ }
79251+
79252+ sg_init_table(sg, 2);
79253+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79254+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79255+
79256+ desc.tfm = tfm;
79257+ desc.flags = 0;
79258+
79259+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79260+ temp_sum);
79261+
79262+ memset(entry->pw, 0, GR_PW_LEN);
79263+
79264+ if (cryptres)
79265+ goto out;
79266+
79267+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79268+ if (sumptr[i] != tmpsumptr[i])
79269+ mismatched = 1;
79270+ else
79271+ dummy = 1; // waste a cycle
79272+
79273+ if (!mismatched)
79274+ retval = dummy - 1;
79275+
79276+out:
79277+ crypto_free_hash(tfm);
79278+
79279+ return retval;
79280+}
79281diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79282index 77ff547..181834f 100644
79283--- a/include/asm-generic/4level-fixup.h
79284+++ b/include/asm-generic/4level-fixup.h
79285@@ -13,8 +13,10 @@
79286 #define pmd_alloc(mm, pud, address) \
79287 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79288 NULL: pmd_offset(pud, address))
79289+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79290
79291 #define pud_alloc(mm, pgd, address) (pgd)
79292+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79293 #define pud_offset(pgd, start) (pgd)
79294 #define pud_none(pud) 0
79295 #define pud_bad(pud) 0
79296diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79297index b7babf0..1e4b4f1 100644
79298--- a/include/asm-generic/atomic-long.h
79299+++ b/include/asm-generic/atomic-long.h
79300@@ -22,6 +22,12 @@
79301
79302 typedef atomic64_t atomic_long_t;
79303
79304+#ifdef CONFIG_PAX_REFCOUNT
79305+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79306+#else
79307+typedef atomic64_t atomic_long_unchecked_t;
79308+#endif
79309+
79310 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79311
79312 static inline long atomic_long_read(atomic_long_t *l)
79313@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79314 return (long)atomic64_read(v);
79315 }
79316
79317+#ifdef CONFIG_PAX_REFCOUNT
79318+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79319+{
79320+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79321+
79322+ return (long)atomic64_read_unchecked(v);
79323+}
79324+#endif
79325+
79326 static inline void atomic_long_set(atomic_long_t *l, long i)
79327 {
79328 atomic64_t *v = (atomic64_t *)l;
79329@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79330 atomic64_set(v, i);
79331 }
79332
79333+#ifdef CONFIG_PAX_REFCOUNT
79334+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79335+{
79336+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79337+
79338+ atomic64_set_unchecked(v, i);
79339+}
79340+#endif
79341+
79342 static inline void atomic_long_inc(atomic_long_t *l)
79343 {
79344 atomic64_t *v = (atomic64_t *)l;
79345@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79346 atomic64_inc(v);
79347 }
79348
79349+#ifdef CONFIG_PAX_REFCOUNT
79350+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79351+{
79352+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79353+
79354+ atomic64_inc_unchecked(v);
79355+}
79356+#endif
79357+
79358 static inline void atomic_long_dec(atomic_long_t *l)
79359 {
79360 atomic64_t *v = (atomic64_t *)l;
79361@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79362 atomic64_dec(v);
79363 }
79364
79365+#ifdef CONFIG_PAX_REFCOUNT
79366+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79367+{
79368+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79369+
79370+ atomic64_dec_unchecked(v);
79371+}
79372+#endif
79373+
79374 static inline void atomic_long_add(long i, atomic_long_t *l)
79375 {
79376 atomic64_t *v = (atomic64_t *)l;
79377@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79378 atomic64_add(i, v);
79379 }
79380
79381+#ifdef CONFIG_PAX_REFCOUNT
79382+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79383+{
79384+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79385+
79386+ atomic64_add_unchecked(i, v);
79387+}
79388+#endif
79389+
79390 static inline void atomic_long_sub(long i, atomic_long_t *l)
79391 {
79392 atomic64_t *v = (atomic64_t *)l;
79393@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79394 atomic64_sub(i, v);
79395 }
79396
79397+#ifdef CONFIG_PAX_REFCOUNT
79398+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79399+{
79400+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79401+
79402+ atomic64_sub_unchecked(i, v);
79403+}
79404+#endif
79405+
79406 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79407 {
79408 atomic64_t *v = (atomic64_t *)l;
79409@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79410 return atomic64_add_negative(i, v);
79411 }
79412
79413-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79414+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79415 {
79416 atomic64_t *v = (atomic64_t *)l;
79417
79418 return (long)atomic64_add_return(i, v);
79419 }
79420
79421+#ifdef CONFIG_PAX_REFCOUNT
79422+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79423+{
79424+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79425+
79426+ return (long)atomic64_add_return_unchecked(i, v);
79427+}
79428+#endif
79429+
79430 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79431 {
79432 atomic64_t *v = (atomic64_t *)l;
79433@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79434 return (long)atomic64_inc_return(v);
79435 }
79436
79437+#ifdef CONFIG_PAX_REFCOUNT
79438+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79439+{
79440+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79441+
79442+ return (long)atomic64_inc_return_unchecked(v);
79443+}
79444+#endif
79445+
79446 static inline long atomic_long_dec_return(atomic_long_t *l)
79447 {
79448 atomic64_t *v = (atomic64_t *)l;
79449@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79450
79451 typedef atomic_t atomic_long_t;
79452
79453+#ifdef CONFIG_PAX_REFCOUNT
79454+typedef atomic_unchecked_t atomic_long_unchecked_t;
79455+#else
79456+typedef atomic_t atomic_long_unchecked_t;
79457+#endif
79458+
79459 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79460 static inline long atomic_long_read(atomic_long_t *l)
79461 {
79462@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79463 return (long)atomic_read(v);
79464 }
79465
79466+#ifdef CONFIG_PAX_REFCOUNT
79467+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79468+{
79469+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79470+
79471+ return (long)atomic_read_unchecked(v);
79472+}
79473+#endif
79474+
79475 static inline void atomic_long_set(atomic_long_t *l, long i)
79476 {
79477 atomic_t *v = (atomic_t *)l;
79478@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79479 atomic_set(v, i);
79480 }
79481
79482+#ifdef CONFIG_PAX_REFCOUNT
79483+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79484+{
79485+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79486+
79487+ atomic_set_unchecked(v, i);
79488+}
79489+#endif
79490+
79491 static inline void atomic_long_inc(atomic_long_t *l)
79492 {
79493 atomic_t *v = (atomic_t *)l;
79494@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79495 atomic_inc(v);
79496 }
79497
79498+#ifdef CONFIG_PAX_REFCOUNT
79499+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79500+{
79501+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79502+
79503+ atomic_inc_unchecked(v);
79504+}
79505+#endif
79506+
79507 static inline void atomic_long_dec(atomic_long_t *l)
79508 {
79509 atomic_t *v = (atomic_t *)l;
79510@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79511 atomic_dec(v);
79512 }
79513
79514+#ifdef CONFIG_PAX_REFCOUNT
79515+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79516+{
79517+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79518+
79519+ atomic_dec_unchecked(v);
79520+}
79521+#endif
79522+
79523 static inline void atomic_long_add(long i, atomic_long_t *l)
79524 {
79525 atomic_t *v = (atomic_t *)l;
79526@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79527 atomic_add(i, v);
79528 }
79529
79530+#ifdef CONFIG_PAX_REFCOUNT
79531+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79532+{
79533+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79534+
79535+ atomic_add_unchecked(i, v);
79536+}
79537+#endif
79538+
79539 static inline void atomic_long_sub(long i, atomic_long_t *l)
79540 {
79541 atomic_t *v = (atomic_t *)l;
79542@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79543 atomic_sub(i, v);
79544 }
79545
79546+#ifdef CONFIG_PAX_REFCOUNT
79547+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79548+{
79549+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79550+
79551+ atomic_sub_unchecked(i, v);
79552+}
79553+#endif
79554+
79555 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79556 {
79557 atomic_t *v = (atomic_t *)l;
79558@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79559 return atomic_add_negative(i, v);
79560 }
79561
79562-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79563+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79564 {
79565 atomic_t *v = (atomic_t *)l;
79566
79567 return (long)atomic_add_return(i, v);
79568 }
79569
79570+#ifdef CONFIG_PAX_REFCOUNT
79571+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79572+{
79573+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79574+
79575+ return (long)atomic_add_return_unchecked(i, v);
79576+}
79577+
79578+#endif
79579+
79580 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79581 {
79582 atomic_t *v = (atomic_t *)l;
79583@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79584 return (long)atomic_inc_return(v);
79585 }
79586
79587+#ifdef CONFIG_PAX_REFCOUNT
79588+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79589+{
79590+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79591+
79592+ return (long)atomic_inc_return_unchecked(v);
79593+}
79594+#endif
79595+
79596 static inline long atomic_long_dec_return(atomic_long_t *l)
79597 {
79598 atomic_t *v = (atomic_t *)l;
79599@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79600
79601 #endif /* BITS_PER_LONG == 64 */
79602
79603+#ifdef CONFIG_PAX_REFCOUNT
79604+static inline void pax_refcount_needs_these_functions(void)
79605+{
79606+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79607+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79608+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79609+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79610+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79611+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79612+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79613+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79614+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79615+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79616+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79617+#ifdef CONFIG_X86
79618+ atomic_clear_mask_unchecked(0, NULL);
79619+ atomic_set_mask_unchecked(0, NULL);
79620+#endif
79621+
79622+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79623+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79624+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79625+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79626+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79627+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79628+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79629+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79630+}
79631+#else
79632+#define atomic_read_unchecked(v) atomic_read(v)
79633+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79634+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79635+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79636+#define atomic_inc_unchecked(v) atomic_inc(v)
79637+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79638+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79639+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79640+#define atomic_dec_unchecked(v) atomic_dec(v)
79641+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79642+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79643+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79644+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79645+
79646+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79647+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79648+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79649+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79650+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79651+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79652+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79653+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79654+#endif
79655+
79656 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79657diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79658index 30ad9c8..c70c170 100644
79659--- a/include/asm-generic/atomic64.h
79660+++ b/include/asm-generic/atomic64.h
79661@@ -16,6 +16,8 @@ typedef struct {
79662 long long counter;
79663 } atomic64_t;
79664
79665+typedef atomic64_t atomic64_unchecked_t;
79666+
79667 #define ATOMIC64_INIT(i) { (i) }
79668
79669 extern long long atomic64_read(const atomic64_t *v);
79670@@ -51,4 +53,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79671 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79672 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79673
79674+#define atomic64_read_unchecked(v) atomic64_read(v)
79675+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79676+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79677+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79678+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79679+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79680+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79681+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79682+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79683+
79684 #endif /* _ASM_GENERIC_ATOMIC64_H */
79685diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79686index f5c40b0..e902f9d 100644
79687--- a/include/asm-generic/barrier.h
79688+++ b/include/asm-generic/barrier.h
79689@@ -82,7 +82,7 @@
79690 do { \
79691 compiletime_assert_atomic_type(*p); \
79692 smp_mb(); \
79693- ACCESS_ONCE(*p) = (v); \
79694+ ACCESS_ONCE_RW(*p) = (v); \
79695 } while (0)
79696
79697 #define smp_load_acquire(p) \
79698diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79699index a60a7cc..0fe12f2 100644
79700--- a/include/asm-generic/bitops/__fls.h
79701+++ b/include/asm-generic/bitops/__fls.h
79702@@ -9,7 +9,7 @@
79703 *
79704 * Undefined if no set bit exists, so code should check against 0 first.
79705 */
79706-static __always_inline unsigned long __fls(unsigned long word)
79707+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79708 {
79709 int num = BITS_PER_LONG - 1;
79710
79711diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79712index 0576d1f..dad6c71 100644
79713--- a/include/asm-generic/bitops/fls.h
79714+++ b/include/asm-generic/bitops/fls.h
79715@@ -9,7 +9,7 @@
79716 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79717 */
79718
79719-static __always_inline int fls(int x)
79720+static __always_inline int __intentional_overflow(-1) fls(int x)
79721 {
79722 int r = 32;
79723
79724diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79725index b097cf8..3d40e14 100644
79726--- a/include/asm-generic/bitops/fls64.h
79727+++ b/include/asm-generic/bitops/fls64.h
79728@@ -15,7 +15,7 @@
79729 * at position 64.
79730 */
79731 #if BITS_PER_LONG == 32
79732-static __always_inline int fls64(__u64 x)
79733+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79734 {
79735 __u32 h = x >> 32;
79736 if (h)
79737@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79738 return fls(x);
79739 }
79740 #elif BITS_PER_LONG == 64
79741-static __always_inline int fls64(__u64 x)
79742+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79743 {
79744 if (x == 0)
79745 return 0;
79746diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79747index 1bfcfe5..e04c5c9 100644
79748--- a/include/asm-generic/cache.h
79749+++ b/include/asm-generic/cache.h
79750@@ -6,7 +6,7 @@
79751 * cache lines need to provide their own cache.h.
79752 */
79753
79754-#define L1_CACHE_SHIFT 5
79755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79756+#define L1_CACHE_SHIFT 5UL
79757+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79758
79759 #endif /* __ASM_GENERIC_CACHE_H */
79760diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79761index 0d68a1e..b74a761 100644
79762--- a/include/asm-generic/emergency-restart.h
79763+++ b/include/asm-generic/emergency-restart.h
79764@@ -1,7 +1,7 @@
79765 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79766 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79767
79768-static inline void machine_emergency_restart(void)
79769+static inline __noreturn void machine_emergency_restart(void)
79770 {
79771 machine_restart(NULL);
79772 }
79773diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79774index 90f99c7..00ce236 100644
79775--- a/include/asm-generic/kmap_types.h
79776+++ b/include/asm-generic/kmap_types.h
79777@@ -2,9 +2,9 @@
79778 #define _ASM_GENERIC_KMAP_TYPES_H
79779
79780 #ifdef __WITH_KM_FENCE
79781-# define KM_TYPE_NR 41
79782+# define KM_TYPE_NR 42
79783 #else
79784-# define KM_TYPE_NR 20
79785+# define KM_TYPE_NR 21
79786 #endif
79787
79788 #endif
79789diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79790index 9ceb03b..62b0b8f 100644
79791--- a/include/asm-generic/local.h
79792+++ b/include/asm-generic/local.h
79793@@ -23,24 +23,37 @@ typedef struct
79794 atomic_long_t a;
79795 } local_t;
79796
79797+typedef struct {
79798+ atomic_long_unchecked_t a;
79799+} local_unchecked_t;
79800+
79801 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79802
79803 #define local_read(l) atomic_long_read(&(l)->a)
79804+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79805 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79806+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79807 #define local_inc(l) atomic_long_inc(&(l)->a)
79808+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79809 #define local_dec(l) atomic_long_dec(&(l)->a)
79810+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79811 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79812+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79813 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79814+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79815
79816 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79817 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79818 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79819 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79820 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79821+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79822 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79823 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79824+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79825
79826 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79827+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79828 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79829 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79830 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79831diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79832index 725612b..9cc513a 100644
79833--- a/include/asm-generic/pgtable-nopmd.h
79834+++ b/include/asm-generic/pgtable-nopmd.h
79835@@ -1,14 +1,19 @@
79836 #ifndef _PGTABLE_NOPMD_H
79837 #define _PGTABLE_NOPMD_H
79838
79839-#ifndef __ASSEMBLY__
79840-
79841 #include <asm-generic/pgtable-nopud.h>
79842
79843-struct mm_struct;
79844-
79845 #define __PAGETABLE_PMD_FOLDED
79846
79847+#define PMD_SHIFT PUD_SHIFT
79848+#define PTRS_PER_PMD 1
79849+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79850+#define PMD_MASK (~(PMD_SIZE-1))
79851+
79852+#ifndef __ASSEMBLY__
79853+
79854+struct mm_struct;
79855+
79856 /*
79857 * Having the pmd type consist of a pud gets the size right, and allows
79858 * us to conceptually access the pud entry that this pmd is folded into
79859@@ -16,11 +21,6 @@ struct mm_struct;
79860 */
79861 typedef struct { pud_t pud; } pmd_t;
79862
79863-#define PMD_SHIFT PUD_SHIFT
79864-#define PTRS_PER_PMD 1
79865-#define PMD_SIZE (1UL << PMD_SHIFT)
79866-#define PMD_MASK (~(PMD_SIZE-1))
79867-
79868 /*
79869 * The "pud_xxx()" functions here are trivial for a folded two-level
79870 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79871diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79872index 810431d..0ec4804f 100644
79873--- a/include/asm-generic/pgtable-nopud.h
79874+++ b/include/asm-generic/pgtable-nopud.h
79875@@ -1,10 +1,15 @@
79876 #ifndef _PGTABLE_NOPUD_H
79877 #define _PGTABLE_NOPUD_H
79878
79879-#ifndef __ASSEMBLY__
79880-
79881 #define __PAGETABLE_PUD_FOLDED
79882
79883+#define PUD_SHIFT PGDIR_SHIFT
79884+#define PTRS_PER_PUD 1
79885+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79886+#define PUD_MASK (~(PUD_SIZE-1))
79887+
79888+#ifndef __ASSEMBLY__
79889+
79890 /*
79891 * Having the pud type consist of a pgd gets the size right, and allows
79892 * us to conceptually access the pgd entry that this pud is folded into
79893@@ -12,11 +17,6 @@
79894 */
79895 typedef struct { pgd_t pgd; } pud_t;
79896
79897-#define PUD_SHIFT PGDIR_SHIFT
79898-#define PTRS_PER_PUD 1
79899-#define PUD_SIZE (1UL << PUD_SHIFT)
79900-#define PUD_MASK (~(PUD_SIZE-1))
79901-
79902 /*
79903 * The "pgd_xxx()" functions here are trivial for a folded two-level
79904 * setup: the pud is never bad, and a pud always exists (as it's folded
79905@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79906 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79907
79908 #define pgd_populate(mm, pgd, pud) do { } while (0)
79909+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79910 /*
79911 * (puds are folded into pgds so this doesn't get actually called,
79912 * but the define is needed for a generic inline function.)
79913diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79914index 177d597..2826237 100644
79915--- a/include/asm-generic/pgtable.h
79916+++ b/include/asm-generic/pgtable.h
79917@@ -839,6 +839,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79918 }
79919 #endif /* CONFIG_NUMA_BALANCING */
79920
79921+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79922+#ifdef CONFIG_PAX_KERNEXEC
79923+#error KERNEXEC requires pax_open_kernel
79924+#else
79925+static inline unsigned long pax_open_kernel(void) { return 0; }
79926+#endif
79927+#endif
79928+
79929+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79930+#ifdef CONFIG_PAX_KERNEXEC
79931+#error KERNEXEC requires pax_close_kernel
79932+#else
79933+static inline unsigned long pax_close_kernel(void) { return 0; }
79934+#endif
79935+#endif
79936+
79937 #endif /* CONFIG_MMU */
79938
79939 #endif /* !__ASSEMBLY__ */
79940diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
79941index 72d8803..cb9749c 100644
79942--- a/include/asm-generic/uaccess.h
79943+++ b/include/asm-generic/uaccess.h
79944@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
79945 return __clear_user(to, n);
79946 }
79947
79948+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
79949+#ifdef CONFIG_PAX_MEMORY_UDEREF
79950+#error UDEREF requires pax_open_userland
79951+#else
79952+static inline unsigned long pax_open_userland(void) { return 0; }
79953+#endif
79954+#endif
79955+
79956+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
79957+#ifdef CONFIG_PAX_MEMORY_UDEREF
79958+#error UDEREF requires pax_close_userland
79959+#else
79960+static inline unsigned long pax_close_userland(void) { return 0; }
79961+#endif
79962+#endif
79963+
79964 #endif /* __ASM_GENERIC_UACCESS_H */
79965diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
79966index bee5d68..8d362d1 100644
79967--- a/include/asm-generic/vmlinux.lds.h
79968+++ b/include/asm-generic/vmlinux.lds.h
79969@@ -234,6 +234,7 @@
79970 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
79971 VMLINUX_SYMBOL(__start_rodata) = .; \
79972 *(.rodata) *(.rodata.*) \
79973+ *(.data..read_only) \
79974 *(__vermagic) /* Kernel version magic */ \
79975 . = ALIGN(8); \
79976 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
79977@@ -726,17 +727,18 @@
79978 * section in the linker script will go there too. @phdr should have
79979 * a leading colon.
79980 *
79981- * Note that this macros defines __per_cpu_load as an absolute symbol.
79982+ * Note that this macros defines per_cpu_load as an absolute symbol.
79983 * If there is no need to put the percpu section at a predetermined
79984 * address, use PERCPU_SECTION.
79985 */
79986 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
79987- VMLINUX_SYMBOL(__per_cpu_load) = .; \
79988- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
79989+ per_cpu_load = .; \
79990+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
79991 - LOAD_OFFSET) { \
79992+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
79993 PERCPU_INPUT(cacheline) \
79994 } phdr \
79995- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
79996+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
79997
79998 /**
79999 * PERCPU_SECTION - define output section for percpu area, simple version
80000diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80001index 623a59c..1e79ab9 100644
80002--- a/include/crypto/algapi.h
80003+++ b/include/crypto/algapi.h
80004@@ -34,7 +34,7 @@ struct crypto_type {
80005 unsigned int maskclear;
80006 unsigned int maskset;
80007 unsigned int tfmsize;
80008-};
80009+} __do_const;
80010
80011 struct crypto_instance {
80012 struct crypto_alg alg;
80013diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80014index e1b2e8b..2697bd2 100644
80015--- a/include/drm/drmP.h
80016+++ b/include/drm/drmP.h
80017@@ -59,6 +59,7 @@
80018
80019 #include <asm/mman.h>
80020 #include <asm/pgalloc.h>
80021+#include <asm/local.h>
80022 #include <asm/uaccess.h>
80023
80024 #include <uapi/drm/drm.h>
80025@@ -223,10 +224,12 @@ void drm_err(const char *format, ...);
80026 * \param cmd command.
80027 * \param arg argument.
80028 */
80029-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80030+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80031+ struct drm_file *file_priv);
80032+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80033 struct drm_file *file_priv);
80034
80035-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80036+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80037 unsigned long arg);
80038
80039 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80040@@ -242,10 +245,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80041 struct drm_ioctl_desc {
80042 unsigned int cmd;
80043 int flags;
80044- drm_ioctl_t *func;
80045+ drm_ioctl_t func;
80046 unsigned int cmd_drv;
80047 const char *name;
80048-};
80049+} __do_const;
80050
80051 /**
80052 * Creates a driver or general drm_ioctl_desc array entry for the given
80053@@ -629,7 +632,8 @@ struct drm_info_list {
80054 int (*show)(struct seq_file*, void*); /** show callback */
80055 u32 driver_features; /**< Required driver features for this entry */
80056 void *data;
80057-};
80058+} __do_const;
80059+typedef struct drm_info_list __no_const drm_info_list_no_const;
80060
80061 /**
80062 * debugfs node structure. This structure represents a debugfs file.
80063@@ -713,7 +717,7 @@ struct drm_device {
80064
80065 /** \name Usage Counters */
80066 /*@{ */
80067- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80068+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80069 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80070 int buf_use; /**< Buffers in use -- cannot alloc */
80071 atomic_t buf_alloc; /**< Buffer allocation in progress */
80072diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80073index 7adbb65..2a1eb1f 100644
80074--- a/include/drm/drm_crtc_helper.h
80075+++ b/include/drm/drm_crtc_helper.h
80076@@ -116,7 +116,7 @@ struct drm_encoder_helper_funcs {
80077 struct drm_connector *connector);
80078 /* disable encoder when not in use - more explicit than dpms off */
80079 void (*disable)(struct drm_encoder *encoder);
80080-};
80081+} __no_const;
80082
80083 /**
80084 * drm_connector_helper_funcs - helper operations for connectors
80085diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80086index d016dc5..3951fe0 100644
80087--- a/include/drm/i915_pciids.h
80088+++ b/include/drm/i915_pciids.h
80089@@ -37,7 +37,7 @@
80090 */
80091 #define INTEL_VGA_DEVICE(id, info) { \
80092 0x8086, id, \
80093- ~0, ~0, \
80094+ PCI_ANY_ID, PCI_ANY_ID, \
80095 0x030000, 0xff0000, \
80096 (unsigned long) info }
80097
80098diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80099index 72dcbe8..8db58d7 100644
80100--- a/include/drm/ttm/ttm_memory.h
80101+++ b/include/drm/ttm/ttm_memory.h
80102@@ -48,7 +48,7 @@
80103
80104 struct ttm_mem_shrink {
80105 int (*do_shrink) (struct ttm_mem_shrink *);
80106-};
80107+} __no_const;
80108
80109 /**
80110 * struct ttm_mem_global - Global memory accounting structure.
80111diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80112index 49a8284..9643967 100644
80113--- a/include/drm/ttm/ttm_page_alloc.h
80114+++ b/include/drm/ttm/ttm_page_alloc.h
80115@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80116 */
80117 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80118
80119+struct device;
80120 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80121 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80122
80123diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80124index 4b840e8..155d235 100644
80125--- a/include/keys/asymmetric-subtype.h
80126+++ b/include/keys/asymmetric-subtype.h
80127@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80128 /* Verify the signature on a key of this subtype (optional) */
80129 int (*verify_signature)(const struct key *key,
80130 const struct public_key_signature *sig);
80131-};
80132+} __do_const;
80133
80134 /**
80135 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80136diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80137index c1da539..1dcec55 100644
80138--- a/include/linux/atmdev.h
80139+++ b/include/linux/atmdev.h
80140@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80141 #endif
80142
80143 struct k_atm_aal_stats {
80144-#define __HANDLE_ITEM(i) atomic_t i
80145+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80146 __AAL_STAT_ITEMS
80147 #undef __HANDLE_ITEM
80148 };
80149@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80150 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80151 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80152 struct module *owner;
80153-};
80154+} __do_const ;
80155
80156 struct atmphy_ops {
80157 int (*start)(struct atm_dev *dev);
80158diff --git a/include/linux/atomic.h b/include/linux/atomic.h
80159index 5b08a85..60922fb 100644
80160--- a/include/linux/atomic.h
80161+++ b/include/linux/atomic.h
80162@@ -12,7 +12,7 @@
80163 * Atomically adds @a to @v, so long as @v was not already @u.
80164 * Returns non-zero if @v was not @u, and zero otherwise.
80165 */
80166-static inline int atomic_add_unless(atomic_t *v, int a, int u)
80167+static inline int __intentional_overflow(-1) atomic_add_unless(atomic_t *v, int a, int u)
80168 {
80169 return __atomic_add_unless(v, a, u) != u;
80170 }
80171diff --git a/include/linux/audit.h b/include/linux/audit.h
80172index af84234..4177a40 100644
80173--- a/include/linux/audit.h
80174+++ b/include/linux/audit.h
80175@@ -225,7 +225,7 @@ static inline void audit_ptrace(struct task_struct *t)
80176 extern unsigned int audit_serial(void);
80177 extern int auditsc_get_stamp(struct audit_context *ctx,
80178 struct timespec *t, unsigned int *serial);
80179-extern int audit_set_loginuid(kuid_t loginuid);
80180+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80181
80182 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80183 {
80184diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80185index 576e463..28fd926 100644
80186--- a/include/linux/binfmts.h
80187+++ b/include/linux/binfmts.h
80188@@ -44,7 +44,7 @@ struct linux_binprm {
80189 unsigned interp_flags;
80190 unsigned interp_data;
80191 unsigned long loader, exec;
80192-};
80193+} __randomize_layout;
80194
80195 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80196 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80197@@ -77,8 +77,10 @@ struct linux_binfmt {
80198 int (*load_binary)(struct linux_binprm *);
80199 int (*load_shlib)(struct file *);
80200 int (*core_dump)(struct coredump_params *cprm);
80201+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80202+ void (*handle_mmap)(struct file *);
80203 unsigned long min_coredump; /* minimal dump size */
80204-};
80205+} __do_const __randomize_layout;
80206
80207 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80208
80209diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
80210index 202e403..16e6617 100644
80211--- a/include/linux/bitmap.h
80212+++ b/include/linux/bitmap.h
80213@@ -302,7 +302,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
80214 return __bitmap_full(src, nbits);
80215 }
80216
80217-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
80218+static inline int __intentional_overflow(-1) bitmap_weight(const unsigned long *src, unsigned int nbits)
80219 {
80220 if (small_const_nbits(nbits))
80221 return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
80222diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80223index 5d858e0..336c1d9 100644
80224--- a/include/linux/bitops.h
80225+++ b/include/linux/bitops.h
80226@@ -105,7 +105,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80227 * @word: value to rotate
80228 * @shift: bits to roll
80229 */
80230-static inline __u32 rol32(__u32 word, unsigned int shift)
80231+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80232 {
80233 return (word << shift) | (word >> (32 - shift));
80234 }
80235@@ -115,7 +115,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80236 * @word: value to rotate
80237 * @shift: bits to roll
80238 */
80239-static inline __u32 ror32(__u32 word, unsigned int shift)
80240+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80241 {
80242 return (word >> shift) | (word << (32 - shift));
80243 }
80244@@ -171,7 +171,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80245 return (__s32)(value << shift) >> shift;
80246 }
80247
80248-static inline unsigned fls_long(unsigned long l)
80249+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80250 {
80251 if (sizeof(l) == 4)
80252 return fls(l);
80253diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80254index 92f4b4b..483d537 100644
80255--- a/include/linux/blkdev.h
80256+++ b/include/linux/blkdev.h
80257@@ -1613,7 +1613,7 @@ struct block_device_operations {
80258 /* this callback is with swap_lock and sometimes page table lock held */
80259 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80260 struct module *owner;
80261-};
80262+} __do_const;
80263
80264 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80265 unsigned long);
80266diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80267index afc1343..9735539 100644
80268--- a/include/linux/blktrace_api.h
80269+++ b/include/linux/blktrace_api.h
80270@@ -25,7 +25,7 @@ struct blk_trace {
80271 struct dentry *dropped_file;
80272 struct dentry *msg_file;
80273 struct list_head running_list;
80274- atomic_t dropped;
80275+ atomic_unchecked_t dropped;
80276 };
80277
80278 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80279diff --git a/include/linux/cache.h b/include/linux/cache.h
80280index 17e7e82..1d7da26 100644
80281--- a/include/linux/cache.h
80282+++ b/include/linux/cache.h
80283@@ -16,6 +16,14 @@
80284 #define __read_mostly
80285 #endif
80286
80287+#ifndef __read_only
80288+#ifdef CONFIG_PAX_KERNEXEC
80289+#error KERNEXEC requires __read_only
80290+#else
80291+#define __read_only __read_mostly
80292+#endif
80293+#endif
80294+
80295 #ifndef ____cacheline_aligned
80296 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80297 #endif
80298diff --git a/include/linux/capability.h b/include/linux/capability.h
80299index aa93e5e..985a1b0 100644
80300--- a/include/linux/capability.h
80301+++ b/include/linux/capability.h
80302@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80303 extern bool capable(int cap);
80304 extern bool ns_capable(struct user_namespace *ns, int cap);
80305 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80306+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80307 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80308+extern bool capable_nolog(int cap);
80309+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80310
80311 /* audit system wants to get cap info from files as well */
80312 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80313
80314+extern int is_privileged_binary(const struct dentry *dentry);
80315+
80316 #endif /* !_LINUX_CAPABILITY_H */
80317diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80318index 8609d57..86e4d79 100644
80319--- a/include/linux/cdrom.h
80320+++ b/include/linux/cdrom.h
80321@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80322
80323 /* driver specifications */
80324 const int capability; /* capability flags */
80325- int n_minors; /* number of active minor devices */
80326 /* handle uniform packets for scsi type devices (scsi,atapi) */
80327 int (*generic_packet) (struct cdrom_device_info *,
80328 struct packet_command *);
80329diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80330index 4ce9056..86caac6 100644
80331--- a/include/linux/cleancache.h
80332+++ b/include/linux/cleancache.h
80333@@ -31,7 +31,7 @@ struct cleancache_ops {
80334 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80335 void (*invalidate_inode)(int, struct cleancache_filekey);
80336 void (*invalidate_fs)(int);
80337-};
80338+} __no_const;
80339
80340 extern struct cleancache_ops *
80341 cleancache_register_ops(struct cleancache_ops *ops);
80342diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80343index d936409..ce9f842 100644
80344--- a/include/linux/clk-provider.h
80345+++ b/include/linux/clk-provider.h
80346@@ -191,6 +191,7 @@ struct clk_ops {
80347 void (*init)(struct clk_hw *hw);
80348 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80349 };
80350+typedef struct clk_ops __no_const clk_ops_no_const;
80351
80352 /**
80353 * struct clk_init_data - holds init data that's common to all clocks and is
80354diff --git a/include/linux/compat.h b/include/linux/compat.h
80355index 7450ca2..a824b81 100644
80356--- a/include/linux/compat.h
80357+++ b/include/linux/compat.h
80358@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80359 compat_size_t __user *len_ptr);
80360
80361 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80362-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80363+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80364 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80365 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80366 compat_ssize_t msgsz, int msgflg);
80367@@ -439,7 +439,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80368 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80369 compat_ulong_t addr, compat_ulong_t data);
80370 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80371- compat_long_t addr, compat_long_t data);
80372+ compat_ulong_t addr, compat_ulong_t data);
80373
80374 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80375 /*
80376diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80377index d1a5582..4424efa 100644
80378--- a/include/linux/compiler-gcc4.h
80379+++ b/include/linux/compiler-gcc4.h
80380@@ -39,9 +39,34 @@
80381 # define __compiletime_warning(message) __attribute__((warning(message)))
80382 # define __compiletime_error(message) __attribute__((error(message)))
80383 #endif /* __CHECKER__ */
80384+
80385+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80386+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80387+#define __bos0(ptr) __bos((ptr), 0)
80388+#define __bos1(ptr) __bos((ptr), 1)
80389 #endif /* GCC_VERSION >= 40300 */
80390
80391 #if GCC_VERSION >= 40500
80392+
80393+#ifdef RANDSTRUCT_PLUGIN
80394+#define __randomize_layout __attribute__((randomize_layout))
80395+#define __no_randomize_layout __attribute__((no_randomize_layout))
80396+#endif
80397+
80398+#ifdef CONSTIFY_PLUGIN
80399+#define __no_const __attribute__((no_const))
80400+#define __do_const __attribute__((do_const))
80401+#endif
80402+
80403+#ifdef SIZE_OVERFLOW_PLUGIN
80404+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80405+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80406+#endif
80407+
80408+#ifdef LATENT_ENTROPY_PLUGIN
80409+#define __latent_entropy __attribute__((latent_entropy))
80410+#endif
80411+
80412 /*
80413 * Mark a position in code as unreachable. This can be used to
80414 * suppress control flow warnings after asm blocks that transfer
80415diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
80416index c8c5659..d09f2ad 100644
80417--- a/include/linux/compiler-gcc5.h
80418+++ b/include/linux/compiler-gcc5.h
80419@@ -28,6 +28,28 @@
80420 # define __compiletime_error(message) __attribute__((error(message)))
80421 #endif /* __CHECKER__ */
80422
80423+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80424+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80425+#define __bos0(ptr) __bos((ptr), 0)
80426+#define __bos1(ptr) __bos((ptr), 1)
80427+
80428+#ifdef CONSTIFY_PLUGIN
80429+#error not yet
80430+#define __no_const __attribute__((no_const))
80431+#define __do_const __attribute__((do_const))
80432+#endif
80433+
80434+#ifdef SIZE_OVERFLOW_PLUGIN
80435+#error not yet
80436+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80437+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80438+#endif
80439+
80440+#ifdef LATENT_ENTROPY_PLUGIN
80441+#error not yet
80442+#define __latent_entropy __attribute__((latent_entropy))
80443+#endif
80444+
80445 /*
80446 * Mark a position in code as unreachable. This can be used to
80447 * suppress control flow warnings after asm blocks that transfer
80448diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80449index fa6a314..a1b01ad 100644
80450--- a/include/linux/compiler.h
80451+++ b/include/linux/compiler.h
80452@@ -5,11 +5,14 @@
80453
80454 #ifdef __CHECKER__
80455 # define __user __attribute__((noderef, address_space(1)))
80456+# define __force_user __force __user
80457 # define __kernel __attribute__((address_space(0)))
80458+# define __force_kernel __force __kernel
80459 # define __safe __attribute__((safe))
80460 # define __force __attribute__((force))
80461 # define __nocast __attribute__((nocast))
80462 # define __iomem __attribute__((noderef, address_space(2)))
80463+# define __force_iomem __force __iomem
80464 # define __must_hold(x) __attribute__((context(x,1,1)))
80465 # define __acquires(x) __attribute__((context(x,0,1)))
80466 # define __releases(x) __attribute__((context(x,1,0)))
80467@@ -17,20 +20,37 @@
80468 # define __release(x) __context__(x,-1)
80469 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80470 # define __percpu __attribute__((noderef, address_space(3)))
80471+# define __force_percpu __force __percpu
80472 #ifdef CONFIG_SPARSE_RCU_POINTER
80473 # define __rcu __attribute__((noderef, address_space(4)))
80474+# define __force_rcu __force __rcu
80475 #else
80476 # define __rcu
80477+# define __force_rcu
80478 #endif
80479 extern void __chk_user_ptr(const volatile void __user *);
80480 extern void __chk_io_ptr(const volatile void __iomem *);
80481 #else
80482-# define __user
80483-# define __kernel
80484+# ifdef CHECKER_PLUGIN
80485+//# define __user
80486+//# define __force_user
80487+//# define __kernel
80488+//# define __force_kernel
80489+# else
80490+# ifdef STRUCTLEAK_PLUGIN
80491+# define __user __attribute__((user))
80492+# else
80493+# define __user
80494+# endif
80495+# define __force_user
80496+# define __kernel
80497+# define __force_kernel
80498+# endif
80499 # define __safe
80500 # define __force
80501 # define __nocast
80502 # define __iomem
80503+# define __force_iomem
80504 # define __chk_user_ptr(x) (void)0
80505 # define __chk_io_ptr(x) (void)0
80506 # define __builtin_warning(x, y...) (1)
80507@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80508 # define __release(x) (void)0
80509 # define __cond_lock(x,c) (c)
80510 # define __percpu
80511+# define __force_percpu
80512 # define __rcu
80513+# define __force_rcu
80514 #endif
80515
80516 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80517@@ -201,32 +223,32 @@ static __always_inline void data_access_exceeds_word_size(void)
80518 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
80519 {
80520 switch (size) {
80521- case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
80522- case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
80523- case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
80524+ case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
80525+ case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
80526+ case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
80527 #ifdef CONFIG_64BIT
80528- case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
80529+ case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
80530 #endif
80531 default:
80532 barrier();
80533- __builtin_memcpy((void *)res, (const void *)p, size);
80534+ __builtin_memcpy(res, (const void *)p, size);
80535 data_access_exceeds_word_size();
80536 barrier();
80537 }
80538 }
80539
80540-static __always_inline void __write_once_size(volatile void *p, void *res, int size)
80541+static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
80542 {
80543 switch (size) {
80544- case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
80545- case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
80546- case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
80547+ case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
80548+ case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
80549+ case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
80550 #ifdef CONFIG_64BIT
80551- case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
80552+ case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
80553 #endif
80554 default:
80555 barrier();
80556- __builtin_memcpy((void *)p, (const void *)res, size);
80557+ __builtin_memcpy((void *)p, res, size);
80558 data_access_exceeds_word_size();
80559 barrier();
80560 }
80561@@ -360,6 +382,34 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80562 # define __attribute_const__ /* unimplemented */
80563 #endif
80564
80565+#ifndef __randomize_layout
80566+# define __randomize_layout
80567+#endif
80568+
80569+#ifndef __no_randomize_layout
80570+# define __no_randomize_layout
80571+#endif
80572+
80573+#ifndef __no_const
80574+# define __no_const
80575+#endif
80576+
80577+#ifndef __do_const
80578+# define __do_const
80579+#endif
80580+
80581+#ifndef __size_overflow
80582+# define __size_overflow(...)
80583+#endif
80584+
80585+#ifndef __intentional_overflow
80586+# define __intentional_overflow(...)
80587+#endif
80588+
80589+#ifndef __latent_entropy
80590+# define __latent_entropy
80591+#endif
80592+
80593 /*
80594 * Tell gcc if a function is cold. The compiler will assume any path
80595 * directly leading to the call is unlikely.
80596@@ -369,6 +419,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80597 #define __cold
80598 #endif
80599
80600+#ifndef __alloc_size
80601+#define __alloc_size(...)
80602+#endif
80603+
80604+#ifndef __bos
80605+#define __bos(ptr, arg)
80606+#endif
80607+
80608+#ifndef __bos0
80609+#define __bos0(ptr)
80610+#endif
80611+
80612+#ifndef __bos1
80613+#define __bos1(ptr)
80614+#endif
80615+
80616 /* Simple shorthand for a section definition */
80617 #ifndef __section
80618 # define __section(S) __attribute__ ((__section__(#S)))
80619@@ -383,6 +449,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80620 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
80621 #endif
80622
80623+#define __type_is_unsigned(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
80624+
80625 /* Is this type a native word size -- useful for atomic operations */
80626 #ifndef __native_word
80627 # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
80628@@ -462,8 +530,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
80629 */
80630 #define __ACCESS_ONCE(x) ({ \
80631 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
80632- (volatile typeof(x) *)&(x); })
80633+ (volatile const typeof(x) *)&(x); })
80634 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
80635+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80636
80637 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80638 #ifdef CONFIG_KPROBES
80639diff --git a/include/linux/completion.h b/include/linux/completion.h
80640index 5d5aaae..0ea9b84 100644
80641--- a/include/linux/completion.h
80642+++ b/include/linux/completion.h
80643@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80644
80645 extern void wait_for_completion(struct completion *);
80646 extern void wait_for_completion_io(struct completion *);
80647-extern int wait_for_completion_interruptible(struct completion *x);
80648-extern int wait_for_completion_killable(struct completion *x);
80649+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80650+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80651 extern unsigned long wait_for_completion_timeout(struct completion *x,
80652- unsigned long timeout);
80653+ unsigned long timeout) __intentional_overflow(-1);
80654 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80655- unsigned long timeout);
80656+ unsigned long timeout) __intentional_overflow(-1);
80657 extern long wait_for_completion_interruptible_timeout(
80658- struct completion *x, unsigned long timeout);
80659+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80660 extern long wait_for_completion_killable_timeout(
80661- struct completion *x, unsigned long timeout);
80662+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80663 extern bool try_wait_for_completion(struct completion *x);
80664 extern bool completion_done(struct completion *x);
80665
80666diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80667index 34025df..d94bbbc 100644
80668--- a/include/linux/configfs.h
80669+++ b/include/linux/configfs.h
80670@@ -125,7 +125,7 @@ struct configfs_attribute {
80671 const char *ca_name;
80672 struct module *ca_owner;
80673 umode_t ca_mode;
80674-};
80675+} __do_const;
80676
80677 /*
80678 * Users often need to create attribute structures for their configurable
80679diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80680index 4d078ce..c970f4d 100644
80681--- a/include/linux/cpufreq.h
80682+++ b/include/linux/cpufreq.h
80683@@ -206,6 +206,7 @@ struct global_attr {
80684 ssize_t (*store)(struct kobject *a, struct attribute *b,
80685 const char *c, size_t count);
80686 };
80687+typedef struct global_attr __no_const global_attr_no_const;
80688
80689 #define define_one_global_ro(_name) \
80690 static struct global_attr _name = \
80691@@ -277,7 +278,7 @@ struct cpufreq_driver {
80692 bool boost_supported;
80693 bool boost_enabled;
80694 int (*set_boost)(int state);
80695-};
80696+} __do_const;
80697
80698 /* flags */
80699 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80700diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80701index 948df62..4602717 100644
80702--- a/include/linux/cpuidle.h
80703+++ b/include/linux/cpuidle.h
80704@@ -50,7 +50,8 @@ struct cpuidle_state {
80705 int index);
80706
80707 int (*enter_dead) (struct cpuidle_device *dev, int index);
80708-};
80709+} __do_const;
80710+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80711
80712 /* Idle State Flags */
80713 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
80714@@ -205,7 +206,7 @@ struct cpuidle_governor {
80715 void (*reflect) (struct cpuidle_device *dev, int index);
80716
80717 struct module *owner;
80718-};
80719+} __do_const;
80720
80721 #ifdef CONFIG_CPU_IDLE
80722 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80723diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80724index b950e9d..63810aa 100644
80725--- a/include/linux/cpumask.h
80726+++ b/include/linux/cpumask.h
80727@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80728 }
80729
80730 /* Valid inputs for n are -1 and 0. */
80731-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80732+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80733 {
80734 return n+1;
80735 }
80736
80737-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80738+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80739 {
80740 return n+1;
80741 }
80742
80743-static inline unsigned int cpumask_next_and(int n,
80744+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80745 const struct cpumask *srcp,
80746 const struct cpumask *andp)
80747 {
80748@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80749 *
80750 * Returns >= nr_cpu_ids if no further cpus set.
80751 */
80752-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80753+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80754 {
80755 /* -1 is a legal arg here. */
80756 if (n != -1)
80757@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80758 *
80759 * Returns >= nr_cpu_ids if no further cpus unset.
80760 */
80761-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80762+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80763 {
80764 /* -1 is a legal arg here. */
80765 if (n != -1)
80766@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80767 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80768 }
80769
80770-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80771+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80772 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80773 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80774
80775@@ -464,7 +464,7 @@ static inline bool cpumask_full(const struct cpumask *srcp)
80776 * cpumask_weight - Count of bits in *srcp
80777 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
80778 */
80779-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
80780+static inline unsigned int __intentional_overflow(-1) cpumask_weight(const struct cpumask *srcp)
80781 {
80782 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
80783 }
80784diff --git a/include/linux/cred.h b/include/linux/cred.h
80785index 2fb2ca2..d6a3340 100644
80786--- a/include/linux/cred.h
80787+++ b/include/linux/cred.h
80788@@ -35,7 +35,7 @@ struct group_info {
80789 int nblocks;
80790 kgid_t small_block[NGROUPS_SMALL];
80791 kgid_t *blocks[0];
80792-};
80793+} __randomize_layout;
80794
80795 /**
80796 * get_group_info - Get a reference to a group info structure
80797@@ -137,7 +137,7 @@ struct cred {
80798 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80799 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80800 struct rcu_head rcu; /* RCU deletion hook */
80801-};
80802+} __randomize_layout;
80803
80804 extern void __put_cred(struct cred *);
80805 extern void exit_creds(struct task_struct *);
80806@@ -195,6 +195,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80807 static inline void validate_process_creds(void)
80808 {
80809 }
80810+static inline void validate_task_creds(struct task_struct *task)
80811+{
80812+}
80813 #endif
80814
80815 /**
80816@@ -332,6 +335,7 @@ static inline void put_cred(const struct cred *_cred)
80817
80818 #define task_uid(task) (task_cred_xxx((task), uid))
80819 #define task_euid(task) (task_cred_xxx((task), euid))
80820+#define task_securebits(task) (task_cred_xxx((task), securebits))
80821
80822 #define current_cred_xxx(xxx) \
80823 ({ \
80824diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80825index 9c8776d..8c526c2 100644
80826--- a/include/linux/crypto.h
80827+++ b/include/linux/crypto.h
80828@@ -626,7 +626,7 @@ struct cipher_tfm {
80829 const u8 *key, unsigned int keylen);
80830 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80831 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80832-};
80833+} __no_const;
80834
80835 struct hash_tfm {
80836 int (*init)(struct hash_desc *desc);
80837@@ -647,13 +647,13 @@ struct compress_tfm {
80838 int (*cot_decompress)(struct crypto_tfm *tfm,
80839 const u8 *src, unsigned int slen,
80840 u8 *dst, unsigned int *dlen);
80841-};
80842+} __no_const;
80843
80844 struct rng_tfm {
80845 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80846 unsigned int dlen);
80847 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80848-};
80849+} __no_const;
80850
80851 #define crt_ablkcipher crt_u.ablkcipher
80852 #define crt_aead crt_u.aead
80853diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80854index 653589e..4ef254a 100644
80855--- a/include/linux/ctype.h
80856+++ b/include/linux/ctype.h
80857@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80858 * Fast implementation of tolower() for internal usage. Do not use in your
80859 * code.
80860 */
80861-static inline char _tolower(const char c)
80862+static inline unsigned char _tolower(const unsigned char c)
80863 {
80864 return c | 0x20;
80865 }
80866diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80867index 5a81398..6bbee30 100644
80868--- a/include/linux/dcache.h
80869+++ b/include/linux/dcache.h
80870@@ -123,6 +123,9 @@ struct dentry {
80871 unsigned long d_time; /* used by d_revalidate */
80872 void *d_fsdata; /* fs-specific data */
80873
80874+#ifdef CONFIG_GRKERNSEC_CHROOT_RENAME
80875+ atomic_t chroot_refcnt; /* tracks use of directory in chroot */
80876+#endif
80877 struct list_head d_lru; /* LRU list */
80878 struct list_head d_child; /* child of parent list */
80879 struct list_head d_subdirs; /* our children */
80880@@ -133,7 +136,7 @@ struct dentry {
80881 struct hlist_node d_alias; /* inode alias list */
80882 struct rcu_head d_rcu;
80883 } d_u;
80884-};
80885+} __randomize_layout;
80886
80887 /*
80888 * dentry->d_lock spinlock nesting subclasses:
80889diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80890index 7925bf0..d5143d2 100644
80891--- a/include/linux/decompress/mm.h
80892+++ b/include/linux/decompress/mm.h
80893@@ -77,7 +77,7 @@ static void free(void *where)
80894 * warnings when not needed (indeed large_malloc / large_free are not
80895 * needed by inflate */
80896
80897-#define malloc(a) kmalloc(a, GFP_KERNEL)
80898+#define malloc(a) kmalloc((a), GFP_KERNEL)
80899 #define free(a) kfree(a)
80900
80901 #define large_malloc(a) vmalloc(a)
80902diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80903index ce447f0..83c66bd 100644
80904--- a/include/linux/devfreq.h
80905+++ b/include/linux/devfreq.h
80906@@ -114,7 +114,7 @@ struct devfreq_governor {
80907 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80908 int (*event_handler)(struct devfreq *devfreq,
80909 unsigned int event, void *data);
80910-};
80911+} __do_const;
80912
80913 /**
80914 * struct devfreq - Device devfreq structure
80915diff --git a/include/linux/device.h b/include/linux/device.h
80916index fb50673..ec0b35b 100644
80917--- a/include/linux/device.h
80918+++ b/include/linux/device.h
80919@@ -311,7 +311,7 @@ struct subsys_interface {
80920 struct list_head node;
80921 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80922 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80923-};
80924+} __do_const;
80925
80926 int subsys_interface_register(struct subsys_interface *sif);
80927 void subsys_interface_unregister(struct subsys_interface *sif);
80928@@ -507,7 +507,7 @@ struct device_type {
80929 void (*release)(struct device *dev);
80930
80931 const struct dev_pm_ops *pm;
80932-};
80933+} __do_const;
80934
80935 /* interface for exporting device attributes */
80936 struct device_attribute {
80937@@ -517,11 +517,12 @@ struct device_attribute {
80938 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80939 const char *buf, size_t count);
80940 };
80941+typedef struct device_attribute __no_const device_attribute_no_const;
80942
80943 struct dev_ext_attribute {
80944 struct device_attribute attr;
80945 void *var;
80946-};
80947+} __do_const;
80948
80949 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80950 char *buf);
80951diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80952index c3007cb..43efc8c 100644
80953--- a/include/linux/dma-mapping.h
80954+++ b/include/linux/dma-mapping.h
80955@@ -60,7 +60,7 @@ struct dma_map_ops {
80956 u64 (*get_required_mask)(struct device *dev);
80957 #endif
80958 int is_phys;
80959-};
80960+} __do_const;
80961
80962 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80963
80964diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80965index 40cd75e..38572a9 100644
80966--- a/include/linux/dmaengine.h
80967+++ b/include/linux/dmaengine.h
80968@@ -1137,9 +1137,9 @@ struct dma_pinned_list {
80969 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80970 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80971
80972-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80973+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80974 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80975-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80976+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80977 struct dma_pinned_list *pinned_list, struct page *page,
80978 unsigned int offset, size_t len);
80979
80980diff --git a/include/linux/efi.h b/include/linux/efi.h
80981index 0238d61..34a758f 100644
80982--- a/include/linux/efi.h
80983+++ b/include/linux/efi.h
80984@@ -1054,6 +1054,7 @@ struct efivar_operations {
80985 efi_set_variable_nonblocking_t *set_variable_nonblocking;
80986 efi_query_variable_store_t *query_variable_store;
80987 };
80988+typedef struct efivar_operations __no_const efivar_operations_no_const;
80989
80990 struct efivars {
80991 /*
80992diff --git a/include/linux/elf.h b/include/linux/elf.h
80993index 20fa8d8..3d0dd18 100644
80994--- a/include/linux/elf.h
80995+++ b/include/linux/elf.h
80996@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
80997 #define elf_note elf32_note
80998 #define elf_addr_t Elf32_Off
80999 #define Elf_Half Elf32_Half
81000+#define elf_dyn Elf32_Dyn
81001
81002 #else
81003
81004@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
81005 #define elf_note elf64_note
81006 #define elf_addr_t Elf64_Off
81007 #define Elf_Half Elf64_Half
81008+#define elf_dyn Elf64_Dyn
81009
81010 #endif
81011
81012diff --git a/include/linux/err.h b/include/linux/err.h
81013index a729120..6ede2c9 100644
81014--- a/include/linux/err.h
81015+++ b/include/linux/err.h
81016@@ -20,12 +20,12 @@
81017
81018 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
81019
81020-static inline void * __must_check ERR_PTR(long error)
81021+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
81022 {
81023 return (void *) error;
81024 }
81025
81026-static inline long __must_check PTR_ERR(__force const void *ptr)
81027+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
81028 {
81029 return (long) ptr;
81030 }
81031diff --git a/include/linux/extcon.h b/include/linux/extcon.h
81032index 36f49c4..a2a1f4c 100644
81033--- a/include/linux/extcon.h
81034+++ b/include/linux/extcon.h
81035@@ -135,7 +135,7 @@ struct extcon_dev {
81036 /* /sys/class/extcon/.../mutually_exclusive/... */
81037 struct attribute_group attr_g_muex;
81038 struct attribute **attrs_muex;
81039- struct device_attribute *d_attrs_muex;
81040+ device_attribute_no_const *d_attrs_muex;
81041 };
81042
81043 /**
81044diff --git a/include/linux/fb.h b/include/linux/fb.h
81045index 09bb7a1..d98870a 100644
81046--- a/include/linux/fb.h
81047+++ b/include/linux/fb.h
81048@@ -305,7 +305,7 @@ struct fb_ops {
81049 /* called at KDB enter and leave time to prepare the console */
81050 int (*fb_debug_enter)(struct fb_info *info);
81051 int (*fb_debug_leave)(struct fb_info *info);
81052-};
81053+} __do_const;
81054
81055 #ifdef CONFIG_FB_TILEBLITTING
81056 #define FB_TILE_CURSOR_NONE 0
81057diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81058index 230f87b..1fd0485 100644
81059--- a/include/linux/fdtable.h
81060+++ b/include/linux/fdtable.h
81061@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81062 void put_files_struct(struct files_struct *fs);
81063 void reset_files_struct(struct files_struct *);
81064 int unshare_files(struct files_struct **);
81065-struct files_struct *dup_fd(struct files_struct *, int *);
81066+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81067 void do_close_on_exec(struct files_struct *);
81068 int iterate_fd(struct files_struct *, unsigned,
81069 int (*)(const void *, struct file *, unsigned),
81070diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81071index 8293262..2b3b8bd 100644
81072--- a/include/linux/frontswap.h
81073+++ b/include/linux/frontswap.h
81074@@ -11,7 +11,7 @@ struct frontswap_ops {
81075 int (*load)(unsigned, pgoff_t, struct page *);
81076 void (*invalidate_page)(unsigned, pgoff_t);
81077 void (*invalidate_area)(unsigned);
81078-};
81079+} __no_const;
81080
81081 extern bool frontswap_enabled;
81082 extern struct frontswap_ops *
81083diff --git a/include/linux/fs.h b/include/linux/fs.h
81084index 42efe13..72d42ee 100644
81085--- a/include/linux/fs.h
81086+++ b/include/linux/fs.h
81087@@ -413,7 +413,7 @@ struct address_space {
81088 spinlock_t private_lock; /* for use by the address_space */
81089 struct list_head private_list; /* ditto */
81090 void *private_data; /* ditto */
81091-} __attribute__((aligned(sizeof(long))));
81092+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81093 /*
81094 * On most architectures that alignment is already the case; but
81095 * must be enforced here for CRIS, to let the least significant bit
81096@@ -456,7 +456,7 @@ struct block_device {
81097 int bd_fsfreeze_count;
81098 /* Mutex for freeze */
81099 struct mutex bd_fsfreeze_mutex;
81100-};
81101+} __randomize_layout;
81102
81103 /*
81104 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81105@@ -642,7 +642,7 @@ struct inode {
81106 #endif
81107
81108 void *i_private; /* fs or device private pointer */
81109-};
81110+} __randomize_layout;
81111
81112 static inline int inode_unhashed(struct inode *inode)
81113 {
81114@@ -837,7 +837,7 @@ struct file {
81115 struct list_head f_tfile_llink;
81116 #endif /* #ifdef CONFIG_EPOLL */
81117 struct address_space *f_mapping;
81118-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81119+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81120
81121 struct file_handle {
81122 __u32 handle_bytes;
81123@@ -962,7 +962,7 @@ struct file_lock {
81124 int state; /* state of grant or error if -ve */
81125 } afs;
81126 } fl_u;
81127-};
81128+} __randomize_layout;
81129
81130 /* The following constant reflects the upper bound of the file/locking space */
81131 #ifndef OFFSET_MAX
81132@@ -1305,7 +1305,7 @@ struct super_block {
81133 * Indicates how deep in a filesystem stack this SB is
81134 */
81135 int s_stack_depth;
81136-};
81137+} __randomize_layout;
81138
81139 extern struct timespec current_fs_time(struct super_block *sb);
81140
81141@@ -1536,7 +1536,8 @@ struct file_operations {
81142 long (*fallocate)(struct file *file, int mode, loff_t offset,
81143 loff_t len);
81144 void (*show_fdinfo)(struct seq_file *m, struct file *f);
81145-};
81146+} __do_const __randomize_layout;
81147+typedef struct file_operations __no_const file_operations_no_const;
81148
81149 struct inode_operations {
81150 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81151@@ -2854,4 +2855,14 @@ static inline bool dir_relax(struct inode *inode)
81152 return !IS_DEADDIR(inode);
81153 }
81154
81155+static inline bool is_sidechannel_device(const struct inode *inode)
81156+{
81157+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81158+ umode_t mode = inode->i_mode;
81159+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81160+#else
81161+ return false;
81162+#endif
81163+}
81164+
81165 #endif /* _LINUX_FS_H */
81166diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81167index 0efc3e6..fd23610 100644
81168--- a/include/linux/fs_struct.h
81169+++ b/include/linux/fs_struct.h
81170@@ -6,13 +6,13 @@
81171 #include <linux/seqlock.h>
81172
81173 struct fs_struct {
81174- int users;
81175+ atomic_t users;
81176 spinlock_t lock;
81177 seqcount_t seq;
81178 int umask;
81179 int in_exec;
81180 struct path root, pwd;
81181-};
81182+} __randomize_layout;
81183
81184 extern struct kmem_cache *fs_cachep;
81185
81186diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81187index 7714849..a4a5c7a 100644
81188--- a/include/linux/fscache-cache.h
81189+++ b/include/linux/fscache-cache.h
81190@@ -113,7 +113,7 @@ struct fscache_operation {
81191 fscache_operation_release_t release;
81192 };
81193
81194-extern atomic_t fscache_op_debug_id;
81195+extern atomic_unchecked_t fscache_op_debug_id;
81196 extern void fscache_op_work_func(struct work_struct *work);
81197
81198 extern void fscache_enqueue_operation(struct fscache_operation *);
81199@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81200 INIT_WORK(&op->work, fscache_op_work_func);
81201 atomic_set(&op->usage, 1);
81202 op->state = FSCACHE_OP_ST_INITIALISED;
81203- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81204+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81205 op->processor = processor;
81206 op->release = release;
81207 INIT_LIST_HEAD(&op->pend_link);
81208diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81209index 115bb81..e7b812b 100644
81210--- a/include/linux/fscache.h
81211+++ b/include/linux/fscache.h
81212@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81213 * - this is mandatory for any object that may have data
81214 */
81215 void (*now_uncached)(void *cookie_netfs_data);
81216-};
81217+} __do_const;
81218
81219 /*
81220 * fscache cached network filesystem type
81221diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81222index 7ee1774..72505b8 100644
81223--- a/include/linux/fsnotify.h
81224+++ b/include/linux/fsnotify.h
81225@@ -197,6 +197,9 @@ static inline void fsnotify_access(struct file *file)
81226 struct inode *inode = file_inode(file);
81227 __u32 mask = FS_ACCESS;
81228
81229+ if (is_sidechannel_device(inode))
81230+ return;
81231+
81232 if (S_ISDIR(inode->i_mode))
81233 mask |= FS_ISDIR;
81234
81235@@ -215,6 +218,9 @@ static inline void fsnotify_modify(struct file *file)
81236 struct inode *inode = file_inode(file);
81237 __u32 mask = FS_MODIFY;
81238
81239+ if (is_sidechannel_device(inode))
81240+ return;
81241+
81242 if (S_ISDIR(inode->i_mode))
81243 mask |= FS_ISDIR;
81244
81245@@ -317,7 +323,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81246 */
81247 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81248 {
81249- return kstrdup(name, GFP_KERNEL);
81250+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81251 }
81252
81253 /*
81254diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81255index ec274e0..e678159 100644
81256--- a/include/linux/genhd.h
81257+++ b/include/linux/genhd.h
81258@@ -194,7 +194,7 @@ struct gendisk {
81259 struct kobject *slave_dir;
81260
81261 struct timer_rand_state *random;
81262- atomic_t sync_io; /* RAID */
81263+ atomic_unchecked_t sync_io; /* RAID */
81264 struct disk_events *ev;
81265 #ifdef CONFIG_BLK_DEV_INTEGRITY
81266 struct blk_integrity *integrity;
81267@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81268 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81269
81270 /* drivers/char/random.c */
81271-extern void add_disk_randomness(struct gendisk *disk);
81272+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81273 extern void rand_initialize_disk(struct gendisk *disk);
81274
81275 static inline sector_t get_start_sect(struct block_device *bdev)
81276diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81277index 667c311..abac2a7 100644
81278--- a/include/linux/genl_magic_func.h
81279+++ b/include/linux/genl_magic_func.h
81280@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81281 },
81282
81283 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81284-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81285+static struct genl_ops ZZZ_genl_ops[] = {
81286 #include GENL_MAGIC_INCLUDE_FILE
81287 };
81288
81289diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81290index b840e3b..aeaeef9 100644
81291--- a/include/linux/gfp.h
81292+++ b/include/linux/gfp.h
81293@@ -34,6 +34,13 @@ struct vm_area_struct;
81294 #define ___GFP_NO_KSWAPD 0x400000u
81295 #define ___GFP_OTHER_NODE 0x800000u
81296 #define ___GFP_WRITE 0x1000000u
81297+
81298+#ifdef CONFIG_PAX_USERCOPY_SLABS
81299+#define ___GFP_USERCOPY 0x2000000u
81300+#else
81301+#define ___GFP_USERCOPY 0
81302+#endif
81303+
81304 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81305
81306 /*
81307@@ -90,6 +97,7 @@ struct vm_area_struct;
81308 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81309 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81310 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81311+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81312
81313 /*
81314 * This may seem redundant, but it's a way of annotating false positives vs.
81315@@ -97,7 +105,7 @@ struct vm_area_struct;
81316 */
81317 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81318
81319-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81320+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81321 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81322
81323 /* This equals 0, but use constants in case they ever change */
81324@@ -152,6 +160,8 @@ struct vm_area_struct;
81325 /* 4GB DMA on some platforms */
81326 #define GFP_DMA32 __GFP_DMA32
81327
81328+#define GFP_USERCOPY __GFP_USERCOPY
81329+
81330 /* Convert GFP flags to their corresponding migrate type */
81331 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
81332 {
81333diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81334new file mode 100644
81335index 0000000..91858e4
81336--- /dev/null
81337+++ b/include/linux/gracl.h
81338@@ -0,0 +1,342 @@
81339+#ifndef GR_ACL_H
81340+#define GR_ACL_H
81341+
81342+#include <linux/grdefs.h>
81343+#include <linux/resource.h>
81344+#include <linux/capability.h>
81345+#include <linux/dcache.h>
81346+#include <asm/resource.h>
81347+
81348+/* Major status information */
81349+
81350+#define GR_VERSION "grsecurity 3.1"
81351+#define GRSECURITY_VERSION 0x3100
81352+
81353+enum {
81354+ GR_SHUTDOWN = 0,
81355+ GR_ENABLE = 1,
81356+ GR_SPROLE = 2,
81357+ GR_OLDRELOAD = 3,
81358+ GR_SEGVMOD = 4,
81359+ GR_STATUS = 5,
81360+ GR_UNSPROLE = 6,
81361+ GR_PASSSET = 7,
81362+ GR_SPROLEPAM = 8,
81363+ GR_RELOAD = 9,
81364+};
81365+
81366+/* Password setup definitions
81367+ * kernel/grhash.c */
81368+enum {
81369+ GR_PW_LEN = 128,
81370+ GR_SALT_LEN = 16,
81371+ GR_SHA_LEN = 32,
81372+};
81373+
81374+enum {
81375+ GR_SPROLE_LEN = 64,
81376+};
81377+
81378+enum {
81379+ GR_NO_GLOB = 0,
81380+ GR_REG_GLOB,
81381+ GR_CREATE_GLOB
81382+};
81383+
81384+#define GR_NLIMITS 32
81385+
81386+/* Begin Data Structures */
81387+
81388+struct sprole_pw {
81389+ unsigned char *rolename;
81390+ unsigned char salt[GR_SALT_LEN];
81391+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81392+};
81393+
81394+struct name_entry {
81395+ __u32 key;
81396+ u64 inode;
81397+ dev_t device;
81398+ char *name;
81399+ __u16 len;
81400+ __u8 deleted;
81401+ struct name_entry *prev;
81402+ struct name_entry *next;
81403+};
81404+
81405+struct inodev_entry {
81406+ struct name_entry *nentry;
81407+ struct inodev_entry *prev;
81408+ struct inodev_entry *next;
81409+};
81410+
81411+struct acl_role_db {
81412+ struct acl_role_label **r_hash;
81413+ __u32 r_size;
81414+};
81415+
81416+struct inodev_db {
81417+ struct inodev_entry **i_hash;
81418+ __u32 i_size;
81419+};
81420+
81421+struct name_db {
81422+ struct name_entry **n_hash;
81423+ __u32 n_size;
81424+};
81425+
81426+struct crash_uid {
81427+ uid_t uid;
81428+ unsigned long expires;
81429+};
81430+
81431+struct gr_hash_struct {
81432+ void **table;
81433+ void **nametable;
81434+ void *first;
81435+ __u32 table_size;
81436+ __u32 used_size;
81437+ int type;
81438+};
81439+
81440+/* Userspace Grsecurity ACL data structures */
81441+
81442+struct acl_subject_label {
81443+ char *filename;
81444+ u64 inode;
81445+ dev_t device;
81446+ __u32 mode;
81447+ kernel_cap_t cap_mask;
81448+ kernel_cap_t cap_lower;
81449+ kernel_cap_t cap_invert_audit;
81450+
81451+ struct rlimit res[GR_NLIMITS];
81452+ __u32 resmask;
81453+
81454+ __u8 user_trans_type;
81455+ __u8 group_trans_type;
81456+ uid_t *user_transitions;
81457+ gid_t *group_transitions;
81458+ __u16 user_trans_num;
81459+ __u16 group_trans_num;
81460+
81461+ __u32 sock_families[2];
81462+ __u32 ip_proto[8];
81463+ __u32 ip_type;
81464+ struct acl_ip_label **ips;
81465+ __u32 ip_num;
81466+ __u32 inaddr_any_override;
81467+
81468+ __u32 crashes;
81469+ unsigned long expires;
81470+
81471+ struct acl_subject_label *parent_subject;
81472+ struct gr_hash_struct *hash;
81473+ struct acl_subject_label *prev;
81474+ struct acl_subject_label *next;
81475+
81476+ struct acl_object_label **obj_hash;
81477+ __u32 obj_hash_size;
81478+ __u16 pax_flags;
81479+};
81480+
81481+struct role_allowed_ip {
81482+ __u32 addr;
81483+ __u32 netmask;
81484+
81485+ struct role_allowed_ip *prev;
81486+ struct role_allowed_ip *next;
81487+};
81488+
81489+struct role_transition {
81490+ char *rolename;
81491+
81492+ struct role_transition *prev;
81493+ struct role_transition *next;
81494+};
81495+
81496+struct acl_role_label {
81497+ char *rolename;
81498+ uid_t uidgid;
81499+ __u16 roletype;
81500+
81501+ __u16 auth_attempts;
81502+ unsigned long expires;
81503+
81504+ struct acl_subject_label *root_label;
81505+ struct gr_hash_struct *hash;
81506+
81507+ struct acl_role_label *prev;
81508+ struct acl_role_label *next;
81509+
81510+ struct role_transition *transitions;
81511+ struct role_allowed_ip *allowed_ips;
81512+ uid_t *domain_children;
81513+ __u16 domain_child_num;
81514+
81515+ umode_t umask;
81516+
81517+ struct acl_subject_label **subj_hash;
81518+ __u32 subj_hash_size;
81519+};
81520+
81521+struct user_acl_role_db {
81522+ struct acl_role_label **r_table;
81523+ __u32 num_pointers; /* Number of allocations to track */
81524+ __u32 num_roles; /* Number of roles */
81525+ __u32 num_domain_children; /* Number of domain children */
81526+ __u32 num_subjects; /* Number of subjects */
81527+ __u32 num_objects; /* Number of objects */
81528+};
81529+
81530+struct acl_object_label {
81531+ char *filename;
81532+ u64 inode;
81533+ dev_t device;
81534+ __u32 mode;
81535+
81536+ struct acl_subject_label *nested;
81537+ struct acl_object_label *globbed;
81538+
81539+ /* next two structures not used */
81540+
81541+ struct acl_object_label *prev;
81542+ struct acl_object_label *next;
81543+};
81544+
81545+struct acl_ip_label {
81546+ char *iface;
81547+ __u32 addr;
81548+ __u32 netmask;
81549+ __u16 low, high;
81550+ __u8 mode;
81551+ __u32 type;
81552+ __u32 proto[8];
81553+
81554+ /* next two structures not used */
81555+
81556+ struct acl_ip_label *prev;
81557+ struct acl_ip_label *next;
81558+};
81559+
81560+struct gr_arg {
81561+ struct user_acl_role_db role_db;
81562+ unsigned char pw[GR_PW_LEN];
81563+ unsigned char salt[GR_SALT_LEN];
81564+ unsigned char sum[GR_SHA_LEN];
81565+ unsigned char sp_role[GR_SPROLE_LEN];
81566+ struct sprole_pw *sprole_pws;
81567+ dev_t segv_device;
81568+ u64 segv_inode;
81569+ uid_t segv_uid;
81570+ __u16 num_sprole_pws;
81571+ __u16 mode;
81572+};
81573+
81574+struct gr_arg_wrapper {
81575+ struct gr_arg *arg;
81576+ __u32 version;
81577+ __u32 size;
81578+};
81579+
81580+struct subject_map {
81581+ struct acl_subject_label *user;
81582+ struct acl_subject_label *kernel;
81583+ struct subject_map *prev;
81584+ struct subject_map *next;
81585+};
81586+
81587+struct acl_subj_map_db {
81588+ struct subject_map **s_hash;
81589+ __u32 s_size;
81590+};
81591+
81592+struct gr_policy_state {
81593+ struct sprole_pw **acl_special_roles;
81594+ __u16 num_sprole_pws;
81595+ struct acl_role_label *kernel_role;
81596+ struct acl_role_label *role_list;
81597+ struct acl_role_label *default_role;
81598+ struct acl_role_db acl_role_set;
81599+ struct acl_subj_map_db subj_map_set;
81600+ struct name_db name_set;
81601+ struct inodev_db inodev_set;
81602+};
81603+
81604+struct gr_alloc_state {
81605+ unsigned long alloc_stack_next;
81606+ unsigned long alloc_stack_size;
81607+ void **alloc_stack;
81608+};
81609+
81610+struct gr_reload_state {
81611+ struct gr_policy_state oldpolicy;
81612+ struct gr_alloc_state oldalloc;
81613+ struct gr_policy_state newpolicy;
81614+ struct gr_alloc_state newalloc;
81615+ struct gr_policy_state *oldpolicy_ptr;
81616+ struct gr_alloc_state *oldalloc_ptr;
81617+ unsigned char oldmode;
81618+};
81619+
81620+/* End Data Structures Section */
81621+
81622+/* Hash functions generated by empirical testing by Brad Spengler
81623+ Makes good use of the low bits of the inode. Generally 0-1 times
81624+ in loop for successful match. 0-3 for unsuccessful match.
81625+ Shift/add algorithm with modulus of table size and an XOR*/
81626+
81627+static __inline__ unsigned int
81628+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81629+{
81630+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81631+}
81632+
81633+ static __inline__ unsigned int
81634+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81635+{
81636+ return ((const unsigned long)userp % sz);
81637+}
81638+
81639+static __inline__ unsigned int
81640+gr_fhash(const u64 ino, const dev_t dev, const unsigned int sz)
81641+{
81642+ unsigned int rem;
81643+ div_u64_rem((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9)), sz, &rem);
81644+ return rem;
81645+}
81646+
81647+static __inline__ unsigned int
81648+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81649+{
81650+ return full_name_hash((const unsigned char *)name, len) % sz;
81651+}
81652+
81653+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81654+ subj = NULL; \
81655+ iter = 0; \
81656+ while (iter < role->subj_hash_size) { \
81657+ if (subj == NULL) \
81658+ subj = role->subj_hash[iter]; \
81659+ if (subj == NULL) { \
81660+ iter++; \
81661+ continue; \
81662+ }
81663+
81664+#define FOR_EACH_SUBJECT_END(subj,iter) \
81665+ subj = subj->next; \
81666+ if (subj == NULL) \
81667+ iter++; \
81668+ }
81669+
81670+
81671+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81672+ subj = role->hash->first; \
81673+ while (subj != NULL) {
81674+
81675+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81676+ subj = subj->next; \
81677+ }
81678+
81679+#endif
81680+
81681diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81682new file mode 100644
81683index 0000000..af64092
81684--- /dev/null
81685+++ b/include/linux/gracl_compat.h
81686@@ -0,0 +1,156 @@
81687+#ifndef GR_ACL_COMPAT_H
81688+#define GR_ACL_COMPAT_H
81689+
81690+#include <linux/resource.h>
81691+#include <asm/resource.h>
81692+
81693+struct sprole_pw_compat {
81694+ compat_uptr_t rolename;
81695+ unsigned char salt[GR_SALT_LEN];
81696+ unsigned char sum[GR_SHA_LEN];
81697+};
81698+
81699+struct gr_hash_struct_compat {
81700+ compat_uptr_t table;
81701+ compat_uptr_t nametable;
81702+ compat_uptr_t first;
81703+ __u32 table_size;
81704+ __u32 used_size;
81705+ int type;
81706+};
81707+
81708+struct acl_subject_label_compat {
81709+ compat_uptr_t filename;
81710+ compat_u64 inode;
81711+ __u32 device;
81712+ __u32 mode;
81713+ kernel_cap_t cap_mask;
81714+ kernel_cap_t cap_lower;
81715+ kernel_cap_t cap_invert_audit;
81716+
81717+ struct compat_rlimit res[GR_NLIMITS];
81718+ __u32 resmask;
81719+
81720+ __u8 user_trans_type;
81721+ __u8 group_trans_type;
81722+ compat_uptr_t user_transitions;
81723+ compat_uptr_t group_transitions;
81724+ __u16 user_trans_num;
81725+ __u16 group_trans_num;
81726+
81727+ __u32 sock_families[2];
81728+ __u32 ip_proto[8];
81729+ __u32 ip_type;
81730+ compat_uptr_t ips;
81731+ __u32 ip_num;
81732+ __u32 inaddr_any_override;
81733+
81734+ __u32 crashes;
81735+ compat_ulong_t expires;
81736+
81737+ compat_uptr_t parent_subject;
81738+ compat_uptr_t hash;
81739+ compat_uptr_t prev;
81740+ compat_uptr_t next;
81741+
81742+ compat_uptr_t obj_hash;
81743+ __u32 obj_hash_size;
81744+ __u16 pax_flags;
81745+};
81746+
81747+struct role_allowed_ip_compat {
81748+ __u32 addr;
81749+ __u32 netmask;
81750+
81751+ compat_uptr_t prev;
81752+ compat_uptr_t next;
81753+};
81754+
81755+struct role_transition_compat {
81756+ compat_uptr_t rolename;
81757+
81758+ compat_uptr_t prev;
81759+ compat_uptr_t next;
81760+};
81761+
81762+struct acl_role_label_compat {
81763+ compat_uptr_t rolename;
81764+ uid_t uidgid;
81765+ __u16 roletype;
81766+
81767+ __u16 auth_attempts;
81768+ compat_ulong_t expires;
81769+
81770+ compat_uptr_t root_label;
81771+ compat_uptr_t hash;
81772+
81773+ compat_uptr_t prev;
81774+ compat_uptr_t next;
81775+
81776+ compat_uptr_t transitions;
81777+ compat_uptr_t allowed_ips;
81778+ compat_uptr_t domain_children;
81779+ __u16 domain_child_num;
81780+
81781+ umode_t umask;
81782+
81783+ compat_uptr_t subj_hash;
81784+ __u32 subj_hash_size;
81785+};
81786+
81787+struct user_acl_role_db_compat {
81788+ compat_uptr_t r_table;
81789+ __u32 num_pointers;
81790+ __u32 num_roles;
81791+ __u32 num_domain_children;
81792+ __u32 num_subjects;
81793+ __u32 num_objects;
81794+};
81795+
81796+struct acl_object_label_compat {
81797+ compat_uptr_t filename;
81798+ compat_u64 inode;
81799+ __u32 device;
81800+ __u32 mode;
81801+
81802+ compat_uptr_t nested;
81803+ compat_uptr_t globbed;
81804+
81805+ compat_uptr_t prev;
81806+ compat_uptr_t next;
81807+};
81808+
81809+struct acl_ip_label_compat {
81810+ compat_uptr_t iface;
81811+ __u32 addr;
81812+ __u32 netmask;
81813+ __u16 low, high;
81814+ __u8 mode;
81815+ __u32 type;
81816+ __u32 proto[8];
81817+
81818+ compat_uptr_t prev;
81819+ compat_uptr_t next;
81820+};
81821+
81822+struct gr_arg_compat {
81823+ struct user_acl_role_db_compat role_db;
81824+ unsigned char pw[GR_PW_LEN];
81825+ unsigned char salt[GR_SALT_LEN];
81826+ unsigned char sum[GR_SHA_LEN];
81827+ unsigned char sp_role[GR_SPROLE_LEN];
81828+ compat_uptr_t sprole_pws;
81829+ __u32 segv_device;
81830+ compat_u64 segv_inode;
81831+ uid_t segv_uid;
81832+ __u16 num_sprole_pws;
81833+ __u16 mode;
81834+};
81835+
81836+struct gr_arg_wrapper_compat {
81837+ compat_uptr_t arg;
81838+ __u32 version;
81839+ __u32 size;
81840+};
81841+
81842+#endif
81843diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81844new file mode 100644
81845index 0000000..323ecf2
81846--- /dev/null
81847+++ b/include/linux/gralloc.h
81848@@ -0,0 +1,9 @@
81849+#ifndef __GRALLOC_H
81850+#define __GRALLOC_H
81851+
81852+void acl_free_all(void);
81853+int acl_alloc_stack_init(unsigned long size);
81854+void *acl_alloc(unsigned long len);
81855+void *acl_alloc_num(unsigned long num, unsigned long len);
81856+
81857+#endif
81858diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81859new file mode 100644
81860index 0000000..be66033
81861--- /dev/null
81862+++ b/include/linux/grdefs.h
81863@@ -0,0 +1,140 @@
81864+#ifndef GRDEFS_H
81865+#define GRDEFS_H
81866+
81867+/* Begin grsecurity status declarations */
81868+
81869+enum {
81870+ GR_READY = 0x01,
81871+ GR_STATUS_INIT = 0x00 // disabled state
81872+};
81873+
81874+/* Begin ACL declarations */
81875+
81876+/* Role flags */
81877+
81878+enum {
81879+ GR_ROLE_USER = 0x0001,
81880+ GR_ROLE_GROUP = 0x0002,
81881+ GR_ROLE_DEFAULT = 0x0004,
81882+ GR_ROLE_SPECIAL = 0x0008,
81883+ GR_ROLE_AUTH = 0x0010,
81884+ GR_ROLE_NOPW = 0x0020,
81885+ GR_ROLE_GOD = 0x0040,
81886+ GR_ROLE_LEARN = 0x0080,
81887+ GR_ROLE_TPE = 0x0100,
81888+ GR_ROLE_DOMAIN = 0x0200,
81889+ GR_ROLE_PAM = 0x0400,
81890+ GR_ROLE_PERSIST = 0x0800
81891+};
81892+
81893+/* ACL Subject and Object mode flags */
81894+enum {
81895+ GR_DELETED = 0x80000000
81896+};
81897+
81898+/* ACL Object-only mode flags */
81899+enum {
81900+ GR_READ = 0x00000001,
81901+ GR_APPEND = 0x00000002,
81902+ GR_WRITE = 0x00000004,
81903+ GR_EXEC = 0x00000008,
81904+ GR_FIND = 0x00000010,
81905+ GR_INHERIT = 0x00000020,
81906+ GR_SETID = 0x00000040,
81907+ GR_CREATE = 0x00000080,
81908+ GR_DELETE = 0x00000100,
81909+ GR_LINK = 0x00000200,
81910+ GR_AUDIT_READ = 0x00000400,
81911+ GR_AUDIT_APPEND = 0x00000800,
81912+ GR_AUDIT_WRITE = 0x00001000,
81913+ GR_AUDIT_EXEC = 0x00002000,
81914+ GR_AUDIT_FIND = 0x00004000,
81915+ GR_AUDIT_INHERIT= 0x00008000,
81916+ GR_AUDIT_SETID = 0x00010000,
81917+ GR_AUDIT_CREATE = 0x00020000,
81918+ GR_AUDIT_DELETE = 0x00040000,
81919+ GR_AUDIT_LINK = 0x00080000,
81920+ GR_PTRACERD = 0x00100000,
81921+ GR_NOPTRACE = 0x00200000,
81922+ GR_SUPPRESS = 0x00400000,
81923+ GR_NOLEARN = 0x00800000,
81924+ GR_INIT_TRANSFER= 0x01000000
81925+};
81926+
81927+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81928+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81929+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81930+
81931+/* ACL subject-only mode flags */
81932+enum {
81933+ GR_KILL = 0x00000001,
81934+ GR_VIEW = 0x00000002,
81935+ GR_PROTECTED = 0x00000004,
81936+ GR_LEARN = 0x00000008,
81937+ GR_OVERRIDE = 0x00000010,
81938+ /* just a placeholder, this mode is only used in userspace */
81939+ GR_DUMMY = 0x00000020,
81940+ GR_PROTSHM = 0x00000040,
81941+ GR_KILLPROC = 0x00000080,
81942+ GR_KILLIPPROC = 0x00000100,
81943+ /* just a placeholder, this mode is only used in userspace */
81944+ GR_NOTROJAN = 0x00000200,
81945+ GR_PROTPROCFD = 0x00000400,
81946+ GR_PROCACCT = 0x00000800,
81947+ GR_RELAXPTRACE = 0x00001000,
81948+ //GR_NESTED = 0x00002000,
81949+ GR_INHERITLEARN = 0x00004000,
81950+ GR_PROCFIND = 0x00008000,
81951+ GR_POVERRIDE = 0x00010000,
81952+ GR_KERNELAUTH = 0x00020000,
81953+ GR_ATSECURE = 0x00040000,
81954+ GR_SHMEXEC = 0x00080000
81955+};
81956+
81957+enum {
81958+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81959+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81960+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81961+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
81962+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
81963+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
81964+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
81965+ GR_PAX_DISABLE_MPROTECT = 0x0400,
81966+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
81967+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
81968+};
81969+
81970+enum {
81971+ GR_ID_USER = 0x01,
81972+ GR_ID_GROUP = 0x02,
81973+};
81974+
81975+enum {
81976+ GR_ID_ALLOW = 0x01,
81977+ GR_ID_DENY = 0x02,
81978+};
81979+
81980+#define GR_CRASH_RES 31
81981+#define GR_UIDTABLE_MAX 500
81982+
81983+/* begin resource learning section */
81984+enum {
81985+ GR_RLIM_CPU_BUMP = 60,
81986+ GR_RLIM_FSIZE_BUMP = 50000,
81987+ GR_RLIM_DATA_BUMP = 10000,
81988+ GR_RLIM_STACK_BUMP = 1000,
81989+ GR_RLIM_CORE_BUMP = 10000,
81990+ GR_RLIM_RSS_BUMP = 500000,
81991+ GR_RLIM_NPROC_BUMP = 1,
81992+ GR_RLIM_NOFILE_BUMP = 5,
81993+ GR_RLIM_MEMLOCK_BUMP = 50000,
81994+ GR_RLIM_AS_BUMP = 500000,
81995+ GR_RLIM_LOCKS_BUMP = 2,
81996+ GR_RLIM_SIGPENDING_BUMP = 5,
81997+ GR_RLIM_MSGQUEUE_BUMP = 10000,
81998+ GR_RLIM_NICE_BUMP = 1,
81999+ GR_RLIM_RTPRIO_BUMP = 1,
82000+ GR_RLIM_RTTIME_BUMP = 1000000
82001+};
82002+
82003+#endif
82004diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82005new file mode 100644
82006index 0000000..fb1de5d
82007--- /dev/null
82008+++ b/include/linux/grinternal.h
82009@@ -0,0 +1,230 @@
82010+#ifndef __GRINTERNAL_H
82011+#define __GRINTERNAL_H
82012+
82013+#ifdef CONFIG_GRKERNSEC
82014+
82015+#include <linux/fs.h>
82016+#include <linux/mnt_namespace.h>
82017+#include <linux/nsproxy.h>
82018+#include <linux/gracl.h>
82019+#include <linux/grdefs.h>
82020+#include <linux/grmsg.h>
82021+
82022+void gr_add_learn_entry(const char *fmt, ...)
82023+ __attribute__ ((format (printf, 1, 2)));
82024+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82025+ const struct vfsmount *mnt);
82026+__u32 gr_check_create(const struct dentry *new_dentry,
82027+ const struct dentry *parent,
82028+ const struct vfsmount *mnt, const __u32 mode);
82029+int gr_check_protected_task(const struct task_struct *task);
82030+__u32 to_gr_audit(const __u32 reqmode);
82031+int gr_set_acls(const int type);
82032+int gr_acl_is_enabled(void);
82033+char gr_roletype_to_char(void);
82034+
82035+void gr_handle_alertkill(struct task_struct *task);
82036+char *gr_to_filename(const struct dentry *dentry,
82037+ const struct vfsmount *mnt);
82038+char *gr_to_filename1(const struct dentry *dentry,
82039+ const struct vfsmount *mnt);
82040+char *gr_to_filename2(const struct dentry *dentry,
82041+ const struct vfsmount *mnt);
82042+char *gr_to_filename3(const struct dentry *dentry,
82043+ const struct vfsmount *mnt);
82044+
82045+extern int grsec_enable_ptrace_readexec;
82046+extern int grsec_enable_harden_ptrace;
82047+extern int grsec_enable_link;
82048+extern int grsec_enable_fifo;
82049+extern int grsec_enable_execve;
82050+extern int grsec_enable_shm;
82051+extern int grsec_enable_execlog;
82052+extern int grsec_enable_signal;
82053+extern int grsec_enable_audit_ptrace;
82054+extern int grsec_enable_forkfail;
82055+extern int grsec_enable_time;
82056+extern int grsec_enable_rofs;
82057+extern int grsec_deny_new_usb;
82058+extern int grsec_enable_chroot_shmat;
82059+extern int grsec_enable_chroot_mount;
82060+extern int grsec_enable_chroot_double;
82061+extern int grsec_enable_chroot_pivot;
82062+extern int grsec_enable_chroot_chdir;
82063+extern int grsec_enable_chroot_chmod;
82064+extern int grsec_enable_chroot_mknod;
82065+extern int grsec_enable_chroot_fchdir;
82066+extern int grsec_enable_chroot_nice;
82067+extern int grsec_enable_chroot_execlog;
82068+extern int grsec_enable_chroot_caps;
82069+extern int grsec_enable_chroot_rename;
82070+extern int grsec_enable_chroot_sysctl;
82071+extern int grsec_enable_chroot_unix;
82072+extern int grsec_enable_symlinkown;
82073+extern kgid_t grsec_symlinkown_gid;
82074+extern int grsec_enable_tpe;
82075+extern kgid_t grsec_tpe_gid;
82076+extern int grsec_enable_tpe_all;
82077+extern int grsec_enable_tpe_invert;
82078+extern int grsec_enable_socket_all;
82079+extern kgid_t grsec_socket_all_gid;
82080+extern int grsec_enable_socket_client;
82081+extern kgid_t grsec_socket_client_gid;
82082+extern int grsec_enable_socket_server;
82083+extern kgid_t grsec_socket_server_gid;
82084+extern kgid_t grsec_audit_gid;
82085+extern int grsec_enable_group;
82086+extern int grsec_enable_log_rwxmaps;
82087+extern int grsec_enable_mount;
82088+extern int grsec_enable_chdir;
82089+extern int grsec_resource_logging;
82090+extern int grsec_enable_blackhole;
82091+extern int grsec_lastack_retries;
82092+extern int grsec_enable_brute;
82093+extern int grsec_enable_harden_ipc;
82094+extern int grsec_lock;
82095+
82096+extern spinlock_t grsec_alert_lock;
82097+extern unsigned long grsec_alert_wtime;
82098+extern unsigned long grsec_alert_fyet;
82099+
82100+extern spinlock_t grsec_audit_lock;
82101+
82102+extern rwlock_t grsec_exec_file_lock;
82103+
82104+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82105+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82106+ (tsk)->exec_file->f_path.mnt) : "/")
82107+
82108+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82109+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82110+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82111+
82112+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82113+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82114+ (tsk)->exec_file->f_path.mnt) : "/")
82115+
82116+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82117+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82118+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82119+
82120+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82121+
82122+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82123+
82124+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82125+{
82126+ if (file1 && file2) {
82127+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82128+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82129+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82130+ return true;
82131+ }
82132+
82133+ return false;
82134+}
82135+
82136+#define GR_CHROOT_CAPS {{ \
82137+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82138+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82139+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82140+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82141+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82142+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82143+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82144+
82145+#define security_learn(normal_msg,args...) \
82146+({ \
82147+ read_lock(&grsec_exec_file_lock); \
82148+ gr_add_learn_entry(normal_msg "\n", ## args); \
82149+ read_unlock(&grsec_exec_file_lock); \
82150+})
82151+
82152+enum {
82153+ GR_DO_AUDIT,
82154+ GR_DONT_AUDIT,
82155+ /* used for non-audit messages that we shouldn't kill the task on */
82156+ GR_DONT_AUDIT_GOOD
82157+};
82158+
82159+enum {
82160+ GR_TTYSNIFF,
82161+ GR_RBAC,
82162+ GR_RBAC_STR,
82163+ GR_STR_RBAC,
82164+ GR_RBAC_MODE2,
82165+ GR_RBAC_MODE3,
82166+ GR_FILENAME,
82167+ GR_SYSCTL_HIDDEN,
82168+ GR_NOARGS,
82169+ GR_ONE_INT,
82170+ GR_ONE_INT_TWO_STR,
82171+ GR_ONE_STR,
82172+ GR_STR_INT,
82173+ GR_TWO_STR_INT,
82174+ GR_TWO_INT,
82175+ GR_TWO_U64,
82176+ GR_THREE_INT,
82177+ GR_FIVE_INT_TWO_STR,
82178+ GR_TWO_STR,
82179+ GR_THREE_STR,
82180+ GR_FOUR_STR,
82181+ GR_STR_FILENAME,
82182+ GR_FILENAME_STR,
82183+ GR_FILENAME_TWO_INT,
82184+ GR_FILENAME_TWO_INT_STR,
82185+ GR_TEXTREL,
82186+ GR_PTRACE,
82187+ GR_RESOURCE,
82188+ GR_CAP,
82189+ GR_SIG,
82190+ GR_SIG2,
82191+ GR_CRASH1,
82192+ GR_CRASH2,
82193+ GR_PSACCT,
82194+ GR_RWXMAP,
82195+ GR_RWXMAPVMA
82196+};
82197+
82198+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82199+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82200+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82201+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82202+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82203+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82204+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82205+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82206+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82207+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82208+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82209+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82210+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82211+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82212+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82213+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82214+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82215+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82216+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82217+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82218+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82219+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82220+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82221+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82222+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82223+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82224+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82225+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82226+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82227+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82228+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82229+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82230+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82231+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82232+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82233+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82234+
82235+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82236+
82237+#endif
82238+
82239+#endif
82240diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82241new file mode 100644
82242index 0000000..26ef560
82243--- /dev/null
82244+++ b/include/linux/grmsg.h
82245@@ -0,0 +1,118 @@
82246+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82247+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82248+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82249+#define GR_STOPMOD_MSG "denied modification of module state by "
82250+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82251+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82252+#define GR_IOPERM_MSG "denied use of ioperm() by "
82253+#define GR_IOPL_MSG "denied use of iopl() by "
82254+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82255+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82256+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82257+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82258+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82259+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82260+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82261+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82262+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82263+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82264+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82265+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82266+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82267+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82268+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82269+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82270+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82271+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82272+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82273+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82274+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82275+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82276+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82277+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82278+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82279+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82280+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82281+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82282+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82283+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82284+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82285+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82286+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82287+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82288+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82289+#define GR_CHROOT_RENAME_MSG "denied bad rename of %.950s out of a chroot by "
82290+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82291+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82292+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82293+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82294+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82295+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82296+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82297+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82298+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82299+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82300+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82301+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82302+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82303+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82304+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82305+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82306+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82307+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82308+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82309+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82310+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82311+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82312+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82313+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82314+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82315+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82316+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82317+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82318+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82319+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82320+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82321+#define GR_NICE_CHROOT_MSG "denied priority change by "
82322+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82323+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82324+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82325+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82326+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82327+#define GR_TIME_MSG "time set by "
82328+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82329+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82330+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82331+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82332+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82333+#define GR_BIND_MSG "denied bind() by "
82334+#define GR_CONNECT_MSG "denied connect() by "
82335+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82336+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82337+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82338+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82339+#define GR_CAP_ACL_MSG "use of %s denied for "
82340+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82341+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82342+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82343+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82344+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82345+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82346+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82347+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82348+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82349+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82350+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82351+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82352+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82353+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82354+#define GR_VM86_MSG "denied use of vm86 by "
82355+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82356+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82357+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82358+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82359+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82360+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82361+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82362+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82363+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82364diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82365new file mode 100644
82366index 0000000..63c1850
82367--- /dev/null
82368+++ b/include/linux/grsecurity.h
82369@@ -0,0 +1,250 @@
82370+#ifndef GR_SECURITY_H
82371+#define GR_SECURITY_H
82372+#include <linux/fs.h>
82373+#include <linux/fs_struct.h>
82374+#include <linux/binfmts.h>
82375+#include <linux/gracl.h>
82376+
82377+/* notify of brain-dead configs */
82378+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82379+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82380+#endif
82381+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82382+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82383+#endif
82384+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82385+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82386+#endif
82387+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82388+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82389+#endif
82390+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82391+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82392+#endif
82393+
82394+int gr_handle_new_usb(void);
82395+
82396+void gr_handle_brute_attach(int dumpable);
82397+void gr_handle_brute_check(void);
82398+void gr_handle_kernel_exploit(void);
82399+
82400+char gr_roletype_to_char(void);
82401+
82402+int gr_proc_is_restricted(void);
82403+
82404+int gr_acl_enable_at_secure(void);
82405+
82406+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82407+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82408+
82409+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82410+
82411+void gr_del_task_from_ip_table(struct task_struct *p);
82412+
82413+int gr_pid_is_chrooted(struct task_struct *p);
82414+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82415+int gr_handle_chroot_nice(void);
82416+int gr_handle_chroot_sysctl(const int op);
82417+int gr_handle_chroot_setpriority(struct task_struct *p,
82418+ const int niceval);
82419+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82420+int gr_chroot_fhandle(void);
82421+int gr_handle_chroot_chroot(const struct dentry *dentry,
82422+ const struct vfsmount *mnt);
82423+void gr_handle_chroot_chdir(const struct path *path);
82424+int gr_handle_chroot_chmod(const struct dentry *dentry,
82425+ const struct vfsmount *mnt, const int mode);
82426+int gr_handle_chroot_mknod(const struct dentry *dentry,
82427+ const struct vfsmount *mnt, const int mode);
82428+int gr_handle_chroot_mount(const struct dentry *dentry,
82429+ const struct vfsmount *mnt,
82430+ const char *dev_name);
82431+int gr_handle_chroot_pivot(void);
82432+int gr_handle_chroot_unix(const pid_t pid);
82433+
82434+int gr_handle_rawio(const struct inode *inode);
82435+
82436+void gr_handle_ioperm(void);
82437+void gr_handle_iopl(void);
82438+void gr_handle_msr_write(void);
82439+
82440+umode_t gr_acl_umask(void);
82441+
82442+int gr_tpe_allow(const struct file *file);
82443+
82444+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82445+void gr_clear_chroot_entries(struct task_struct *task);
82446+
82447+void gr_log_forkfail(const int retval);
82448+void gr_log_timechange(void);
82449+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82450+void gr_log_chdir(const struct dentry *dentry,
82451+ const struct vfsmount *mnt);
82452+void gr_log_chroot_exec(const struct dentry *dentry,
82453+ const struct vfsmount *mnt);
82454+void gr_log_remount(const char *devname, const int retval);
82455+void gr_log_unmount(const char *devname, const int retval);
82456+void gr_log_mount(const char *from, struct path *to, const int retval);
82457+void gr_log_textrel(struct vm_area_struct *vma);
82458+void gr_log_ptgnustack(struct file *file);
82459+void gr_log_rwxmmap(struct file *file);
82460+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82461+
82462+int gr_handle_follow_link(const struct inode *parent,
82463+ const struct inode *inode,
82464+ const struct dentry *dentry,
82465+ const struct vfsmount *mnt);
82466+int gr_handle_fifo(const struct dentry *dentry,
82467+ const struct vfsmount *mnt,
82468+ const struct dentry *dir, const int flag,
82469+ const int acc_mode);
82470+int gr_handle_hardlink(const struct dentry *dentry,
82471+ const struct vfsmount *mnt,
82472+ struct inode *inode,
82473+ const int mode, const struct filename *to);
82474+
82475+int gr_is_capable(const int cap);
82476+int gr_is_capable_nolog(const int cap);
82477+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82478+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82479+
82480+void gr_copy_label(struct task_struct *tsk);
82481+void gr_handle_crash(struct task_struct *task, const int sig);
82482+int gr_handle_signal(const struct task_struct *p, const int sig);
82483+int gr_check_crash_uid(const kuid_t uid);
82484+int gr_check_protected_task(const struct task_struct *task);
82485+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82486+int gr_acl_handle_mmap(const struct file *file,
82487+ const unsigned long prot);
82488+int gr_acl_handle_mprotect(const struct file *file,
82489+ const unsigned long prot);
82490+int gr_check_hidden_task(const struct task_struct *tsk);
82491+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82492+ const struct vfsmount *mnt);
82493+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82494+ const struct vfsmount *mnt);
82495+__u32 gr_acl_handle_access(const struct dentry *dentry,
82496+ const struct vfsmount *mnt, const int fmode);
82497+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82498+ const struct vfsmount *mnt, umode_t *mode);
82499+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82500+ const struct vfsmount *mnt);
82501+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82502+ const struct vfsmount *mnt);
82503+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82504+ const struct vfsmount *mnt);
82505+int gr_handle_ptrace(struct task_struct *task, const long request);
82506+int gr_handle_proc_ptrace(struct task_struct *task);
82507+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82508+ const struct vfsmount *mnt);
82509+int gr_check_crash_exec(const struct file *filp);
82510+int gr_acl_is_enabled(void);
82511+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82512+ const kgid_t gid);
82513+int gr_set_proc_label(const struct dentry *dentry,
82514+ const struct vfsmount *mnt,
82515+ const int unsafe_flags);
82516+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82517+ const struct vfsmount *mnt);
82518+__u32 gr_acl_handle_open(const struct dentry *dentry,
82519+ const struct vfsmount *mnt, int acc_mode);
82520+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82521+ const struct dentry *p_dentry,
82522+ const struct vfsmount *p_mnt,
82523+ int open_flags, int acc_mode, const int imode);
82524+void gr_handle_create(const struct dentry *dentry,
82525+ const struct vfsmount *mnt);
82526+void gr_handle_proc_create(const struct dentry *dentry,
82527+ const struct inode *inode);
82528+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82529+ const struct dentry *parent_dentry,
82530+ const struct vfsmount *parent_mnt,
82531+ const int mode);
82532+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82533+ const struct dentry *parent_dentry,
82534+ const struct vfsmount *parent_mnt);
82535+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82536+ const struct vfsmount *mnt);
82537+void gr_handle_delete(const u64 ino, const dev_t dev);
82538+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82539+ const struct vfsmount *mnt);
82540+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82541+ const struct dentry *parent_dentry,
82542+ const struct vfsmount *parent_mnt,
82543+ const struct filename *from);
82544+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82545+ const struct dentry *parent_dentry,
82546+ const struct vfsmount *parent_mnt,
82547+ const struct dentry *old_dentry,
82548+ const struct vfsmount *old_mnt, const struct filename *to);
82549+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82550+int gr_acl_handle_rename(struct dentry *new_dentry,
82551+ struct dentry *parent_dentry,
82552+ const struct vfsmount *parent_mnt,
82553+ struct dentry *old_dentry,
82554+ struct inode *old_parent_inode,
82555+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82556+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82557+ struct dentry *old_dentry,
82558+ struct dentry *new_dentry,
82559+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82560+__u32 gr_check_link(const struct dentry *new_dentry,
82561+ const struct dentry *parent_dentry,
82562+ const struct vfsmount *parent_mnt,
82563+ const struct dentry *old_dentry,
82564+ const struct vfsmount *old_mnt);
82565+int gr_acl_handle_filldir(const struct file *file, const char *name,
82566+ const unsigned int namelen, const u64 ino);
82567+
82568+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82569+ const struct vfsmount *mnt);
82570+void gr_acl_handle_exit(void);
82571+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82572+int gr_acl_handle_procpidmem(const struct task_struct *task);
82573+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82574+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82575+void gr_audit_ptrace(struct task_struct *task);
82576+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82577+u64 gr_get_ino_from_dentry(struct dentry *dentry);
82578+void gr_put_exec_file(struct task_struct *task);
82579+
82580+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82581+
82582+void gr_inc_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82583+void gr_dec_chroot_refcnts(struct dentry *dentry, struct vfsmount *mnt);
82584+int gr_bad_chroot_rename(struct dentry *olddentry, struct vfsmount *oldmnt,
82585+ struct dentry *newdentry, struct vfsmount *newmnt);
82586+
82587+#ifdef CONFIG_GRKERNSEC_RESLOG
82588+extern void gr_log_resource(const struct task_struct *task, const int res,
82589+ const unsigned long wanted, const int gt);
82590+#else
82591+static inline void gr_log_resource(const struct task_struct *task, const int res,
82592+ const unsigned long wanted, const int gt)
82593+{
82594+}
82595+#endif
82596+
82597+#ifdef CONFIG_GRKERNSEC
82598+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82599+void gr_handle_vm86(void);
82600+void gr_handle_mem_readwrite(u64 from, u64 to);
82601+
82602+void gr_log_badprocpid(const char *entry);
82603+
82604+extern int grsec_enable_dmesg;
82605+extern int grsec_disable_privio;
82606+
82607+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82608+extern kgid_t grsec_proc_gid;
82609+#endif
82610+
82611+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82612+extern int grsec_enable_chroot_findtask;
82613+#endif
82614+#ifdef CONFIG_GRKERNSEC_SETXID
82615+extern int grsec_enable_setxid;
82616+#endif
82617+#endif
82618+
82619+#endif
82620diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82621new file mode 100644
82622index 0000000..e7ffaaf
82623--- /dev/null
82624+++ b/include/linux/grsock.h
82625@@ -0,0 +1,19 @@
82626+#ifndef __GRSOCK_H
82627+#define __GRSOCK_H
82628+
82629+extern void gr_attach_curr_ip(const struct sock *sk);
82630+extern int gr_handle_sock_all(const int family, const int type,
82631+ const int protocol);
82632+extern int gr_handle_sock_server(const struct sockaddr *sck);
82633+extern int gr_handle_sock_server_other(const struct sock *sck);
82634+extern int gr_handle_sock_client(const struct sockaddr *sck);
82635+extern int gr_search_connect(struct socket * sock,
82636+ struct sockaddr_in * addr);
82637+extern int gr_search_bind(struct socket * sock,
82638+ struct sockaddr_in * addr);
82639+extern int gr_search_listen(struct socket * sock);
82640+extern int gr_search_accept(struct socket * sock);
82641+extern int gr_search_socket(const int domain, const int type,
82642+ const int protocol);
82643+
82644+#endif
82645diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82646index 9286a46..373f27f 100644
82647--- a/include/linux/highmem.h
82648+++ b/include/linux/highmem.h
82649@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82650 kunmap_atomic(kaddr);
82651 }
82652
82653+static inline void sanitize_highpage(struct page *page)
82654+{
82655+ void *kaddr;
82656+ unsigned long flags;
82657+
82658+ local_irq_save(flags);
82659+ kaddr = kmap_atomic(page);
82660+ clear_page(kaddr);
82661+ kunmap_atomic(kaddr);
82662+ local_irq_restore(flags);
82663+}
82664+
82665 static inline void zero_user_segments(struct page *page,
82666 unsigned start1, unsigned end1,
82667 unsigned start2, unsigned end2)
82668diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82669index 1c7b89a..7dda400 100644
82670--- a/include/linux/hwmon-sysfs.h
82671+++ b/include/linux/hwmon-sysfs.h
82672@@ -25,7 +25,8 @@
82673 struct sensor_device_attribute{
82674 struct device_attribute dev_attr;
82675 int index;
82676-};
82677+} __do_const;
82678+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82679 #define to_sensor_dev_attr(_dev_attr) \
82680 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82681
82682@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82683 struct device_attribute dev_attr;
82684 u8 index;
82685 u8 nr;
82686-};
82687+} __do_const;
82688+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82689 #define to_sensor_dev_attr_2(_dev_attr) \
82690 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82691
82692diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82693index 7c76959..153e597 100644
82694--- a/include/linux/i2c.h
82695+++ b/include/linux/i2c.h
82696@@ -413,6 +413,7 @@ struct i2c_algorithm {
82697 int (*unreg_slave)(struct i2c_client *client);
82698 #endif
82699 };
82700+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82701
82702 /**
82703 * struct i2c_bus_recovery_info - I2C bus recovery information
82704diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82705index d23c3c2..eb63c81 100644
82706--- a/include/linux/i2o.h
82707+++ b/include/linux/i2o.h
82708@@ -565,7 +565,7 @@ struct i2o_controller {
82709 struct i2o_device *exec; /* Executive */
82710 #if BITS_PER_LONG == 64
82711 spinlock_t context_list_lock; /* lock for context_list */
82712- atomic_t context_list_counter; /* needed for unique contexts */
82713+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82714 struct list_head context_list; /* list of context id's
82715 and pointers */
82716 #endif
82717diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82718index aff7ad8..3942bbd 100644
82719--- a/include/linux/if_pppox.h
82720+++ b/include/linux/if_pppox.h
82721@@ -76,7 +76,7 @@ struct pppox_proto {
82722 int (*ioctl)(struct socket *sock, unsigned int cmd,
82723 unsigned long arg);
82724 struct module *owner;
82725-};
82726+} __do_const;
82727
82728 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82729 extern void unregister_pppox_proto(int proto_num);
82730diff --git a/include/linux/init.h b/include/linux/init.h
82731index 2df8e8d..3e1280d 100644
82732--- a/include/linux/init.h
82733+++ b/include/linux/init.h
82734@@ -37,9 +37,17 @@
82735 * section.
82736 */
82737
82738+#define add_init_latent_entropy __latent_entropy
82739+
82740+#ifdef CONFIG_MEMORY_HOTPLUG
82741+#define add_meminit_latent_entropy
82742+#else
82743+#define add_meminit_latent_entropy __latent_entropy
82744+#endif
82745+
82746 /* These are for everybody (although not all archs will actually
82747 discard it in modules) */
82748-#define __init __section(.init.text) __cold notrace
82749+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82750 #define __initdata __section(.init.data)
82751 #define __initconst __constsection(.init.rodata)
82752 #define __exitdata __section(.exit.data)
82753@@ -100,7 +108,7 @@
82754 #define __cpuexitconst
82755
82756 /* Used for MEMORY_HOTPLUG */
82757-#define __meminit __section(.meminit.text) __cold notrace
82758+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82759 #define __meminitdata __section(.meminit.data)
82760 #define __meminitconst __constsection(.meminit.rodata)
82761 #define __memexit __section(.memexit.text) __exitused __cold notrace
82762diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82763index 3037fc0..c6527ce 100644
82764--- a/include/linux/init_task.h
82765+++ b/include/linux/init_task.h
82766@@ -158,6 +158,12 @@ extern struct task_group root_task_group;
82767
82768 #define INIT_TASK_COMM "swapper"
82769
82770+#ifdef CONFIG_X86
82771+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82772+#else
82773+#define INIT_TASK_THREAD_INFO
82774+#endif
82775+
82776 #ifdef CONFIG_RT_MUTEXES
82777 # define INIT_RT_MUTEXES(tsk) \
82778 .pi_waiters = RB_ROOT, \
82779@@ -214,6 +220,7 @@ extern struct task_group root_task_group;
82780 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82781 .comm = INIT_TASK_COMM, \
82782 .thread = INIT_THREAD, \
82783+ INIT_TASK_THREAD_INFO \
82784 .fs = &init_fs, \
82785 .files = &init_files, \
82786 .signal = &init_signals, \
82787diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82788index d9b05b5..e5f5b7b 100644
82789--- a/include/linux/interrupt.h
82790+++ b/include/linux/interrupt.h
82791@@ -413,8 +413,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82792
82793 struct softirq_action
82794 {
82795- void (*action)(struct softirq_action *);
82796-};
82797+ void (*action)(void);
82798+} __no_const;
82799
82800 asmlinkage void do_softirq(void);
82801 asmlinkage void __do_softirq(void);
82802@@ -428,7 +428,7 @@ static inline void do_softirq_own_stack(void)
82803 }
82804 #endif
82805
82806-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82807+extern void open_softirq(int nr, void (*action)(void));
82808 extern void softirq_init(void);
82809 extern void __raise_softirq_irqoff(unsigned int nr);
82810
82811diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82812index 38daa45..4de4317 100644
82813--- a/include/linux/iommu.h
82814+++ b/include/linux/iommu.h
82815@@ -147,7 +147,7 @@ struct iommu_ops {
82816
82817 unsigned long pgsize_bitmap;
82818 void *priv;
82819-};
82820+} __do_const;
82821
82822 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82823 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82824diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82825index 2c525022..345b106 100644
82826--- a/include/linux/ioport.h
82827+++ b/include/linux/ioport.h
82828@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82829 int adjust_resource(struct resource *res, resource_size_t start,
82830 resource_size_t size);
82831 resource_size_t resource_alignment(struct resource *res);
82832-static inline resource_size_t resource_size(const struct resource *res)
82833+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82834 {
82835 return res->end - res->start + 1;
82836 }
82837diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82838index 1eee6bc..9cf4912 100644
82839--- a/include/linux/ipc_namespace.h
82840+++ b/include/linux/ipc_namespace.h
82841@@ -60,7 +60,7 @@ struct ipc_namespace {
82842 struct user_namespace *user_ns;
82843
82844 struct ns_common ns;
82845-};
82846+} __randomize_layout;
82847
82848 extern struct ipc_namespace init_ipc_ns;
82849 extern atomic_t nr_ipc_ns;
82850diff --git a/include/linux/irq.h b/include/linux/irq.h
82851index d09ec7a..f373eb5 100644
82852--- a/include/linux/irq.h
82853+++ b/include/linux/irq.h
82854@@ -364,7 +364,8 @@ struct irq_chip {
82855 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
82856
82857 unsigned long flags;
82858-};
82859+} __do_const;
82860+typedef struct irq_chip __no_const irq_chip_no_const;
82861
82862 /*
82863 * irq_chip specific flags
82864diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82865index 71d706d..817cdec 100644
82866--- a/include/linux/irqchip/arm-gic.h
82867+++ b/include/linux/irqchip/arm-gic.h
82868@@ -95,7 +95,7 @@
82869
82870 struct device_node;
82871
82872-extern struct irq_chip gic_arch_extn;
82873+extern irq_chip_no_const gic_arch_extn;
82874
82875 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82876 u32 offset, struct device_node *);
82877diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
82878index faf433a..7dcb186 100644
82879--- a/include/linux/irqdesc.h
82880+++ b/include/linux/irqdesc.h
82881@@ -61,7 +61,7 @@ struct irq_desc {
82882 unsigned int irq_count; /* For detecting broken IRQs */
82883 unsigned long last_unhandled; /* Aging timer for unhandled count */
82884 unsigned int irqs_unhandled;
82885- atomic_t threads_handled;
82886+ atomic_unchecked_t threads_handled;
82887 int threads_handled_last;
82888 raw_spinlock_t lock;
82889 struct cpumask *percpu_enabled;
82890diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82891index c367cbd..c9b79e6 100644
82892--- a/include/linux/jiffies.h
82893+++ b/include/linux/jiffies.h
82894@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82895 /*
82896 * Convert various time units to each other:
82897 */
82898-extern unsigned int jiffies_to_msecs(const unsigned long j);
82899-extern unsigned int jiffies_to_usecs(const unsigned long j);
82900+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82901+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82902
82903-static inline u64 jiffies_to_nsecs(const unsigned long j)
82904+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82905 {
82906 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82907 }
82908
82909-extern unsigned long msecs_to_jiffies(const unsigned int m);
82910-extern unsigned long usecs_to_jiffies(const unsigned int u);
82911+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82912+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82913 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82914 extern void jiffies_to_timespec(const unsigned long jiffies,
82915- struct timespec *value);
82916-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82917+ struct timespec *value) __intentional_overflow(-1);
82918+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82919 extern void jiffies_to_timeval(const unsigned long jiffies,
82920 struct timeval *value);
82921
82922diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82923index 6883e19..e854fcb 100644
82924--- a/include/linux/kallsyms.h
82925+++ b/include/linux/kallsyms.h
82926@@ -15,7 +15,8 @@
82927
82928 struct module;
82929
82930-#ifdef CONFIG_KALLSYMS
82931+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82932+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82933 /* Lookup the address for a symbol. Returns 0 if not found. */
82934 unsigned long kallsyms_lookup_name(const char *name);
82935
82936@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82937 /* Stupid that this does nothing, but I didn't create this mess. */
82938 #define __print_symbol(fmt, addr)
82939 #endif /*CONFIG_KALLSYMS*/
82940+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82941+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82942+extern unsigned long kallsyms_lookup_name(const char *name);
82943+extern void __print_symbol(const char *fmt, unsigned long address);
82944+extern int sprint_backtrace(char *buffer, unsigned long address);
82945+extern int sprint_symbol(char *buffer, unsigned long address);
82946+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82947+const char *kallsyms_lookup(unsigned long addr,
82948+ unsigned long *symbolsize,
82949+ unsigned long *offset,
82950+ char **modname, char *namebuf);
82951+extern int kallsyms_lookup_size_offset(unsigned long addr,
82952+ unsigned long *symbolsize,
82953+ unsigned long *offset);
82954+#endif
82955
82956 /* This macro allows us to keep printk typechecking */
82957 static __printf(1, 2)
82958diff --git a/include/linux/kernel.h b/include/linux/kernel.h
82959index 64ce58b..6bcdbfa 100644
82960--- a/include/linux/kernel.h
82961+++ b/include/linux/kernel.h
82962@@ -378,7 +378,7 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
82963 /* Obsolete, do not use. Use kstrto<foo> instead */
82964
82965 extern unsigned long simple_strtoul(const char *,char **,unsigned int);
82966-extern long simple_strtol(const char *,char **,unsigned int);
82967+extern long simple_strtol(const char *,char **,unsigned int) __intentional_overflow(-1);
82968 extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
82969 extern long long simple_strtoll(const char *,char **,unsigned int);
82970
82971diff --git a/include/linux/key-type.h b/include/linux/key-type.h
82972index ff9f1d3..6712be5 100644
82973--- a/include/linux/key-type.h
82974+++ b/include/linux/key-type.h
82975@@ -152,7 +152,7 @@ struct key_type {
82976 /* internal fields */
82977 struct list_head link; /* link in types list */
82978 struct lock_class_key lock_class; /* key->sem lock class */
82979-};
82980+} __do_const;
82981
82982 extern struct key_type key_type_keyring;
82983
82984diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
82985index e465bb1..19f605fd 100644
82986--- a/include/linux/kgdb.h
82987+++ b/include/linux/kgdb.h
82988@@ -52,7 +52,7 @@ extern int kgdb_connected;
82989 extern int kgdb_io_module_registered;
82990
82991 extern atomic_t kgdb_setting_breakpoint;
82992-extern atomic_t kgdb_cpu_doing_single_step;
82993+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
82994
82995 extern struct task_struct *kgdb_usethread;
82996 extern struct task_struct *kgdb_contthread;
82997@@ -254,7 +254,7 @@ struct kgdb_arch {
82998 void (*correct_hw_break)(void);
82999
83000 void (*enable_nmi)(bool on);
83001-};
83002+} __do_const;
83003
83004 /**
83005 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83006@@ -279,7 +279,7 @@ struct kgdb_io {
83007 void (*pre_exception) (void);
83008 void (*post_exception) (void);
83009 int is_console;
83010-};
83011+} __do_const;
83012
83013 extern struct kgdb_arch arch_kgdb_ops;
83014
83015diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
83016index e705467..a92471d 100644
83017--- a/include/linux/kmemleak.h
83018+++ b/include/linux/kmemleak.h
83019@@ -27,7 +27,7 @@
83020
83021 extern void kmemleak_init(void) __ref;
83022 extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83023- gfp_t gfp) __ref;
83024+ gfp_t gfp) __ref __size_overflow(2);
83025 extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
83026 extern void kmemleak_free(const void *ptr) __ref;
83027 extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
83028@@ -62,7 +62,7 @@ static inline void kmemleak_erase(void **ptr)
83029 static inline void kmemleak_init(void)
83030 {
83031 }
83032-static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
83033+static inline void __size_overflow(2) kmemleak_alloc(const void *ptr, size_t size, int min_count,
83034 gfp_t gfp)
83035 {
83036 }
83037diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83038index 0555cc6..40116ce 100644
83039--- a/include/linux/kmod.h
83040+++ b/include/linux/kmod.h
83041@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83042 * usually useless though. */
83043 extern __printf(2, 3)
83044 int __request_module(bool wait, const char *name, ...);
83045+extern __printf(3, 4)
83046+int ___request_module(bool wait, char *param_name, const char *name, ...);
83047 #define request_module(mod...) __request_module(true, mod)
83048 #define request_module_nowait(mod...) __request_module(false, mod)
83049 #define try_then_request_module(x, mod...) \
83050@@ -57,6 +59,9 @@ struct subprocess_info {
83051 struct work_struct work;
83052 struct completion *complete;
83053 char *path;
83054+#ifdef CONFIG_GRKERNSEC
83055+ char *origpath;
83056+#endif
83057 char **argv;
83058 char **envp;
83059 int wait;
83060diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83061index 2d61b90..a1d0a13 100644
83062--- a/include/linux/kobject.h
83063+++ b/include/linux/kobject.h
83064@@ -118,7 +118,7 @@ struct kobj_type {
83065 struct attribute **default_attrs;
83066 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83067 const void *(*namespace)(struct kobject *kobj);
83068-};
83069+} __do_const;
83070
83071 struct kobj_uevent_env {
83072 char *argv[3];
83073@@ -142,6 +142,7 @@ struct kobj_attribute {
83074 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83075 const char *buf, size_t count);
83076 };
83077+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83078
83079 extern const struct sysfs_ops kobj_sysfs_ops;
83080
83081@@ -169,7 +170,7 @@ struct kset {
83082 spinlock_t list_lock;
83083 struct kobject kobj;
83084 const struct kset_uevent_ops *uevent_ops;
83085-};
83086+} __randomize_layout;
83087
83088 extern void kset_init(struct kset *kset);
83089 extern int __must_check kset_register(struct kset *kset);
83090diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83091index df32d25..fb52e27 100644
83092--- a/include/linux/kobject_ns.h
83093+++ b/include/linux/kobject_ns.h
83094@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83095 const void *(*netlink_ns)(struct sock *sk);
83096 const void *(*initial_ns)(void);
83097 void (*drop_ns)(void *);
83098-};
83099+} __do_const;
83100
83101 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83102 int kobj_ns_type_registered(enum kobj_ns_type type);
83103diff --git a/include/linux/kref.h b/include/linux/kref.h
83104index 484604d..0f6c5b6 100644
83105--- a/include/linux/kref.h
83106+++ b/include/linux/kref.h
83107@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83108 static inline int kref_sub(struct kref *kref, unsigned int count,
83109 void (*release)(struct kref *kref))
83110 {
83111- WARN_ON(release == NULL);
83112+ BUG_ON(release == NULL);
83113
83114 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83115 release(kref);
83116diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83117index 26f1060..bafc04a 100644
83118--- a/include/linux/kvm_host.h
83119+++ b/include/linux/kvm_host.h
83120@@ -470,7 +470,7 @@ static inline void kvm_irqfd_exit(void)
83121 {
83122 }
83123 #endif
83124-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83125+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83126 struct module *module);
83127 void kvm_exit(void);
83128
83129@@ -639,7 +639,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83130 struct kvm_guest_debug *dbg);
83131 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83132
83133-int kvm_arch_init(void *opaque);
83134+int kvm_arch_init(const void *opaque);
83135 void kvm_arch_exit(void);
83136
83137 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83138diff --git a/include/linux/libata.h b/include/linux/libata.h
83139index 91f705d..24be831 100644
83140--- a/include/linux/libata.h
83141+++ b/include/linux/libata.h
83142@@ -979,7 +979,7 @@ struct ata_port_operations {
83143 * fields must be pointers.
83144 */
83145 const struct ata_port_operations *inherits;
83146-};
83147+} __do_const;
83148
83149 struct ata_port_info {
83150 unsigned long flags;
83151diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83152index a6a42dd..6c5ebce 100644
83153--- a/include/linux/linkage.h
83154+++ b/include/linux/linkage.h
83155@@ -36,6 +36,7 @@
83156 #endif
83157
83158 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83159+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83160 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83161
83162 /*
83163diff --git a/include/linux/list.h b/include/linux/list.h
83164index feb773c..98f3075 100644
83165--- a/include/linux/list.h
83166+++ b/include/linux/list.h
83167@@ -113,6 +113,19 @@ extern void __list_del_entry(struct list_head *entry);
83168 extern void list_del(struct list_head *entry);
83169 #endif
83170
83171+extern void __pax_list_add(struct list_head *new,
83172+ struct list_head *prev,
83173+ struct list_head *next);
83174+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83175+{
83176+ __pax_list_add(new, head, head->next);
83177+}
83178+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83179+{
83180+ __pax_list_add(new, head->prev, head);
83181+}
83182+extern void pax_list_del(struct list_head *entry);
83183+
83184 /**
83185 * list_replace - replace old entry by new one
83186 * @old : the element to be replaced
83187@@ -146,6 +159,8 @@ static inline void list_del_init(struct list_head *entry)
83188 INIT_LIST_HEAD(entry);
83189 }
83190
83191+extern void pax_list_del_init(struct list_head *entry);
83192+
83193 /**
83194 * list_move - delete from one list and add as another's head
83195 * @list: the entry to move
83196diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83197index 4bfde0e..d6e2e09 100644
83198--- a/include/linux/lockref.h
83199+++ b/include/linux/lockref.h
83200@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83201 return ((int)l->count < 0);
83202 }
83203
83204+static inline unsigned int __lockref_read(struct lockref *lockref)
83205+{
83206+ return lockref->count;
83207+}
83208+
83209+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83210+{
83211+ lockref->count = count;
83212+}
83213+
83214+static inline void __lockref_inc(struct lockref *lockref)
83215+{
83216+
83217+#ifdef CONFIG_PAX_REFCOUNT
83218+ atomic_inc((atomic_t *)&lockref->count);
83219+#else
83220+ lockref->count++;
83221+#endif
83222+
83223+}
83224+
83225+static inline void __lockref_dec(struct lockref *lockref)
83226+{
83227+
83228+#ifdef CONFIG_PAX_REFCOUNT
83229+ atomic_dec((atomic_t *)&lockref->count);
83230+#else
83231+ lockref->count--;
83232+#endif
83233+
83234+}
83235+
83236 #endif /* __LINUX_LOCKREF_H */
83237diff --git a/include/linux/math64.h b/include/linux/math64.h
83238index c45c089..298841c 100644
83239--- a/include/linux/math64.h
83240+++ b/include/linux/math64.h
83241@@ -15,7 +15,7 @@
83242 * This is commonly provided by 32bit archs to provide an optimized 64bit
83243 * divide.
83244 */
83245-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83246+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83247 {
83248 *remainder = dividend % divisor;
83249 return dividend / divisor;
83250@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83251 /**
83252 * div64_u64 - unsigned 64bit divide with 64bit divisor
83253 */
83254-static inline u64 div64_u64(u64 dividend, u64 divisor)
83255+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83256 {
83257 return dividend / divisor;
83258 }
83259@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83260 #define div64_ul(x, y) div_u64((x), (y))
83261
83262 #ifndef div_u64_rem
83263-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83264+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83265 {
83266 *remainder = do_div(dividend, divisor);
83267 return dividend;
83268@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83269 #endif
83270
83271 #ifndef div64_u64
83272-extern u64 div64_u64(u64 dividend, u64 divisor);
83273+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83274 #endif
83275
83276 #ifndef div64_s64
83277@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83278 * divide.
83279 */
83280 #ifndef div_u64
83281-static inline u64 div_u64(u64 dividend, u32 divisor)
83282+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83283 {
83284 u32 remainder;
83285 return div_u64_rem(dividend, divisor, &remainder);
83286diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83287index 3d385c8..deacb6a 100644
83288--- a/include/linux/mempolicy.h
83289+++ b/include/linux/mempolicy.h
83290@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83291 }
83292
83293 #define vma_policy(vma) ((vma)->vm_policy)
83294+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83295+{
83296+ vma->vm_policy = pol;
83297+}
83298
83299 static inline void mpol_get(struct mempolicy *pol)
83300 {
83301@@ -229,6 +233,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83302 }
83303
83304 #define vma_policy(vma) NULL
83305+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83306+{
83307+}
83308
83309 static inline int
83310 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83311diff --git a/include/linux/mm.h b/include/linux/mm.h
83312index dd5ea30..cf81cd1 100644
83313--- a/include/linux/mm.h
83314+++ b/include/linux/mm.h
83315@@ -135,6 +135,11 @@ extern unsigned int kobjsize(const void *objp);
83316
83317 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
83318 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
83319+
83320+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83321+#define VM_PAGEEXEC 0x00080000 /* vma->vm_page_prot needs special handling */
83322+#endif
83323+
83324 #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
83325 #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
83326 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83327@@ -256,8 +261,8 @@ struct vm_operations_struct {
83328 /* called by access_process_vm when get_user_pages() fails, typically
83329 * for use by special VMAs that can switch between memory and hardware
83330 */
83331- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83332- void *buf, int len, int write);
83333+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83334+ void *buf, size_t len, int write);
83335
83336 /* Called by the /proc/PID/maps code to ask the vma whether it
83337 * has a special name. Returning non-NULL will also cause this
83338@@ -291,6 +296,7 @@ struct vm_operations_struct {
83339 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83340 unsigned long size, pgoff_t pgoff);
83341 };
83342+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83343
83344 struct mmu_gather;
83345 struct inode;
83346@@ -1183,8 +1189,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83347 unsigned long *pfn);
83348 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83349 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83350-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83351- void *buf, int len, int write);
83352+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83353+ void *buf, size_t len, int write);
83354
83355 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83356 loff_t const holebegin, loff_t const holelen)
83357@@ -1224,9 +1230,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83358 }
83359 #endif
83360
83361-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83362-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83363- void *buf, int len, int write);
83364+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83365+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83366+ void *buf, size_t len, int write);
83367
83368 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83369 unsigned long start, unsigned long nr_pages,
83370@@ -1258,34 +1264,6 @@ int set_page_dirty_lock(struct page *page);
83371 int clear_page_dirty_for_io(struct page *page);
83372 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83373
83374-/* Is the vma a continuation of the stack vma above it? */
83375-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83376-{
83377- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83378-}
83379-
83380-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83381- unsigned long addr)
83382-{
83383- return (vma->vm_flags & VM_GROWSDOWN) &&
83384- (vma->vm_start == addr) &&
83385- !vma_growsdown(vma->vm_prev, addr);
83386-}
83387-
83388-/* Is the vma a continuation of the stack vma below it? */
83389-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83390-{
83391- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83392-}
83393-
83394-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83395- unsigned long addr)
83396-{
83397- return (vma->vm_flags & VM_GROWSUP) &&
83398- (vma->vm_end == addr) &&
83399- !vma_growsup(vma->vm_next, addr);
83400-}
83401-
83402 extern struct task_struct *task_of_stack(struct task_struct *task,
83403 struct vm_area_struct *vma, bool in_group);
83404
83405@@ -1403,8 +1381,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83406 {
83407 return 0;
83408 }
83409+
83410+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83411+ unsigned long address)
83412+{
83413+ return 0;
83414+}
83415 #else
83416 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83417+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83418 #endif
83419
83420 #ifdef __PAGETABLE_PMD_FOLDED
83421@@ -1413,8 +1398,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83422 {
83423 return 0;
83424 }
83425+
83426+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83427+ unsigned long address)
83428+{
83429+ return 0;
83430+}
83431 #else
83432 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83433+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83434 #endif
83435
83436 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83437@@ -1432,11 +1424,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83438 NULL: pud_offset(pgd, address);
83439 }
83440
83441+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83442+{
83443+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83444+ NULL: pud_offset(pgd, address);
83445+}
83446+
83447 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83448 {
83449 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83450 NULL: pmd_offset(pud, address);
83451 }
83452+
83453+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83454+{
83455+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83456+ NULL: pmd_offset(pud, address);
83457+}
83458 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83459
83460 #if USE_SPLIT_PTE_PTLOCKS
83461@@ -1819,12 +1823,23 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
83462 bool *need_rmap_locks);
83463 extern void exit_mmap(struct mm_struct *);
83464
83465+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
83466+extern void gr_learn_resource(const struct task_struct *task, const int res,
83467+ const unsigned long wanted, const int gt);
83468+#else
83469+static inline void gr_learn_resource(const struct task_struct *task, const int res,
83470+ const unsigned long wanted, const int gt)
83471+{
83472+}
83473+#endif
83474+
83475 static inline int check_data_rlimit(unsigned long rlim,
83476 unsigned long new,
83477 unsigned long start,
83478 unsigned long end_data,
83479 unsigned long start_data)
83480 {
83481+ gr_learn_resource(current, RLIMIT_DATA, (new - start) + (end_data - start_data), 1);
83482 if (rlim < RLIM_INFINITY) {
83483 if (((new - start) + (end_data - start_data)) > rlim)
83484 return -ENOSPC;
83485@@ -1849,7 +1864,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83486 unsigned long addr, unsigned long len,
83487 unsigned long flags, struct page **pages);
83488
83489-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83490+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83491
83492 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83493 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83494@@ -1857,6 +1872,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83495 unsigned long len, unsigned long prot, unsigned long flags,
83496 unsigned long pgoff, unsigned long *populate);
83497 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83498+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83499
83500 #ifdef CONFIG_MMU
83501 extern int __mm_populate(unsigned long addr, unsigned long len,
83502@@ -1885,10 +1901,11 @@ struct vm_unmapped_area_info {
83503 unsigned long high_limit;
83504 unsigned long align_mask;
83505 unsigned long align_offset;
83506+ unsigned long threadstack_offset;
83507 };
83508
83509-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83510-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83511+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83512+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83513
83514 /*
83515 * Search for an unmapped address range.
83516@@ -1900,7 +1917,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83517 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83518 */
83519 static inline unsigned long
83520-vm_unmapped_area(struct vm_unmapped_area_info *info)
83521+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83522 {
83523 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83524 return unmapped_area(info);
83525@@ -1962,6 +1979,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83526 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83527 struct vm_area_struct **pprev);
83528
83529+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83530+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83531+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83532+
83533 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83534 NULL if none. Assume start_addr < end_addr. */
83535 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83536@@ -1991,10 +2012,10 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83537 }
83538
83539 #ifdef CONFIG_MMU
83540-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83541+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83542 void vma_set_page_prot(struct vm_area_struct *vma);
83543 #else
83544-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83545+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83546 {
83547 return __pgprot(0);
83548 }
83549@@ -2056,6 +2077,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83550 static inline void vm_stat_account(struct mm_struct *mm,
83551 unsigned long flags, struct file *file, long pages)
83552 {
83553+
83554+#ifdef CONFIG_PAX_RANDMMAP
83555+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83556+#endif
83557+
83558 mm->total_vm += pages;
83559 }
83560 #endif /* CONFIG_PROC_FS */
83561@@ -2159,7 +2185,7 @@ extern int unpoison_memory(unsigned long pfn);
83562 extern int sysctl_memory_failure_early_kill;
83563 extern int sysctl_memory_failure_recovery;
83564 extern void shake_page(struct page *p, int access);
83565-extern atomic_long_t num_poisoned_pages;
83566+extern atomic_long_unchecked_t num_poisoned_pages;
83567 extern int soft_offline_page(struct page *page, int flags);
83568
83569 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83570@@ -2210,5 +2236,11 @@ void __init setup_nr_node_ids(void);
83571 static inline void setup_nr_node_ids(void) {}
83572 #endif
83573
83574+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83575+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83576+#else
83577+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83578+#endif
83579+
83580 #endif /* __KERNEL__ */
83581 #endif /* _LINUX_MM_H */
83582diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83583index 6d34aa2..d73d848 100644
83584--- a/include/linux/mm_types.h
83585+++ b/include/linux/mm_types.h
83586@@ -309,7 +309,9 @@ struct vm_area_struct {
83587 #ifdef CONFIG_NUMA
83588 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83589 #endif
83590-};
83591+
83592+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83593+} __randomize_layout;
83594
83595 struct core_thread {
83596 struct task_struct *task;
83597@@ -459,7 +461,25 @@ struct mm_struct {
83598 /* address of the bounds directory */
83599 void __user *bd_addr;
83600 #endif
83601-};
83602+
83603+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83604+ unsigned long pax_flags;
83605+#endif
83606+
83607+#ifdef CONFIG_PAX_DLRESOLVE
83608+ unsigned long call_dl_resolve;
83609+#endif
83610+
83611+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83612+ unsigned long call_syscall;
83613+#endif
83614+
83615+#ifdef CONFIG_PAX_ASLR
83616+ unsigned long delta_mmap; /* randomized offset */
83617+ unsigned long delta_stack; /* randomized offset */
83618+#endif
83619+
83620+} __randomize_layout;
83621
83622 static inline void mm_init_cpumask(struct mm_struct *mm)
83623 {
83624diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83625index c5d5278..f0b68c8 100644
83626--- a/include/linux/mmiotrace.h
83627+++ b/include/linux/mmiotrace.h
83628@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83629 /* Called from ioremap.c */
83630 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83631 void __iomem *addr);
83632-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83633+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83634
83635 /* For anyone to insert markers. Remember trailing newline. */
83636 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83637@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83638 {
83639 }
83640
83641-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83642+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83643 {
83644 }
83645
83646diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83647index 2f0856d..5a4bc1e 100644
83648--- a/include/linux/mmzone.h
83649+++ b/include/linux/mmzone.h
83650@@ -527,7 +527,7 @@ struct zone {
83651
83652 ZONE_PADDING(_pad3_)
83653 /* Zone statistics */
83654- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83655+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83656 } ____cacheline_internodealigned_in_smp;
83657
83658 enum zone_flags {
83659diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83660index 745def8..08a820b 100644
83661--- a/include/linux/mod_devicetable.h
83662+++ b/include/linux/mod_devicetable.h
83663@@ -139,7 +139,7 @@ struct usb_device_id {
83664 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83665 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83666
83667-#define HID_ANY_ID (~0)
83668+#define HID_ANY_ID (~0U)
83669 #define HID_BUS_ANY 0xffff
83670 #define HID_GROUP_ANY 0x0000
83671
83672@@ -475,7 +475,7 @@ struct dmi_system_id {
83673 const char *ident;
83674 struct dmi_strmatch matches[4];
83675 void *driver_data;
83676-};
83677+} __do_const;
83678 /*
83679 * struct dmi_device_id appears during expansion of
83680 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83681diff --git a/include/linux/module.h b/include/linux/module.h
83682index b653d7c..22a238f 100644
83683--- a/include/linux/module.h
83684+++ b/include/linux/module.h
83685@@ -17,9 +17,11 @@
83686 #include <linux/moduleparam.h>
83687 #include <linux/jump_label.h>
83688 #include <linux/export.h>
83689+#include <linux/fs.h>
83690
83691 #include <linux/percpu.h>
83692 #include <asm/module.h>
83693+#include <asm/pgtable.h>
83694
83695 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83696 #define MODULE_SIG_STRING "~Module signature appended~\n"
83697@@ -42,7 +44,7 @@ struct module_kobject {
83698 struct kobject *drivers_dir;
83699 struct module_param_attrs *mp;
83700 struct completion *kobj_completion;
83701-};
83702+} __randomize_layout;
83703
83704 struct module_attribute {
83705 struct attribute attr;
83706@@ -54,12 +56,13 @@ struct module_attribute {
83707 int (*test)(struct module *);
83708 void (*free)(struct module *);
83709 };
83710+typedef struct module_attribute __no_const module_attribute_no_const;
83711
83712 struct module_version_attribute {
83713 struct module_attribute mattr;
83714 const char *module_name;
83715 const char *version;
83716-} __attribute__ ((__aligned__(sizeof(void *))));
83717+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83718
83719 extern ssize_t __modver_version_show(struct module_attribute *,
83720 struct module_kobject *, char *);
83721@@ -221,7 +224,7 @@ struct module {
83722
83723 /* Sysfs stuff. */
83724 struct module_kobject mkobj;
83725- struct module_attribute *modinfo_attrs;
83726+ module_attribute_no_const *modinfo_attrs;
83727 const char *version;
83728 const char *srcversion;
83729 struct kobject *holders_dir;
83730@@ -270,19 +273,16 @@ struct module {
83731 int (*init)(void);
83732
83733 /* If this is non-NULL, vfree after init() returns */
83734- void *module_init;
83735+ void *module_init_rx, *module_init_rw;
83736
83737 /* Here is the actual code + data, vfree'd on unload. */
83738- void *module_core;
83739+ void *module_core_rx, *module_core_rw;
83740
83741 /* Here are the sizes of the init and core sections */
83742- unsigned int init_size, core_size;
83743+ unsigned int init_size_rw, core_size_rw;
83744
83745 /* The size of the executable code in each section. */
83746- unsigned int init_text_size, core_text_size;
83747-
83748- /* Size of RO sections of the module (text+rodata) */
83749- unsigned int init_ro_size, core_ro_size;
83750+ unsigned int init_size_rx, core_size_rx;
83751
83752 /* Arch-specific module values */
83753 struct mod_arch_specific arch;
83754@@ -338,6 +338,10 @@ struct module {
83755 #ifdef CONFIG_EVENT_TRACING
83756 struct ftrace_event_call **trace_events;
83757 unsigned int num_trace_events;
83758+ struct file_operations trace_id;
83759+ struct file_operations trace_enable;
83760+ struct file_operations trace_format;
83761+ struct file_operations trace_filter;
83762 #endif
83763 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83764 unsigned int num_ftrace_callsites;
83765@@ -361,7 +365,7 @@ struct module {
83766 ctor_fn_t *ctors;
83767 unsigned int num_ctors;
83768 #endif
83769-};
83770+} __randomize_layout;
83771 #ifndef MODULE_ARCH_INIT
83772 #define MODULE_ARCH_INIT {}
83773 #endif
83774@@ -382,18 +386,48 @@ bool is_module_address(unsigned long addr);
83775 bool is_module_percpu_address(unsigned long addr);
83776 bool is_module_text_address(unsigned long addr);
83777
83778+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83779+{
83780+
83781+#ifdef CONFIG_PAX_KERNEXEC
83782+ if (ktla_ktva(addr) >= (unsigned long)start &&
83783+ ktla_ktva(addr) < (unsigned long)start + size)
83784+ return 1;
83785+#endif
83786+
83787+ return ((void *)addr >= start && (void *)addr < start + size);
83788+}
83789+
83790+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83791+{
83792+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83793+}
83794+
83795+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83796+{
83797+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83798+}
83799+
83800+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83801+{
83802+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83803+}
83804+
83805+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83806+{
83807+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83808+}
83809+
83810 static inline bool within_module_core(unsigned long addr,
83811 const struct module *mod)
83812 {
83813- return (unsigned long)mod->module_core <= addr &&
83814- addr < (unsigned long)mod->module_core + mod->core_size;
83815+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83816 }
83817
83818 static inline bool within_module_init(unsigned long addr,
83819 const struct module *mod)
83820 {
83821- return (unsigned long)mod->module_init <= addr &&
83822- addr < (unsigned long)mod->module_init + mod->init_size;
83823+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83824 }
83825
83826 static inline bool within_module(unsigned long addr, const struct module *mod)
83827diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83828index f755626..641f822 100644
83829--- a/include/linux/moduleloader.h
83830+++ b/include/linux/moduleloader.h
83831@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83832 sections. Returns NULL on failure. */
83833 void *module_alloc(unsigned long size);
83834
83835+#ifdef CONFIG_PAX_KERNEXEC
83836+void *module_alloc_exec(unsigned long size);
83837+#else
83838+#define module_alloc_exec(x) module_alloc(x)
83839+#endif
83840+
83841 /* Free memory returned from module_alloc. */
83842 void module_memfree(void *module_region);
83843
83844+#ifdef CONFIG_PAX_KERNEXEC
83845+void module_memfree_exec(void *module_region);
83846+#else
83847+#define module_memfree_exec(x) module_memfree((x))
83848+#endif
83849+
83850 /*
83851 * Apply the given relocation to the (simplified) ELF. Return -error
83852 * or 0.
83853@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83854 unsigned int relsec,
83855 struct module *me)
83856 {
83857+#ifdef CONFIG_MODULES
83858 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83859 module_name(me));
83860+#endif
83861 return -ENOEXEC;
83862 }
83863 #endif
83864@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83865 unsigned int relsec,
83866 struct module *me)
83867 {
83868+#ifdef CONFIG_MODULES
83869 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83870 module_name(me));
83871+#endif
83872 return -ENOEXEC;
83873 }
83874 #endif
83875diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83876index 1c9effa..1160bdd 100644
83877--- a/include/linux/moduleparam.h
83878+++ b/include/linux/moduleparam.h
83879@@ -323,7 +323,7 @@ static inline void __kernel_param_unlock(void)
83880 * @len is usually just sizeof(string).
83881 */
83882 #define module_param_string(name, string, len, perm) \
83883- static const struct kparam_string __param_string_##name \
83884+ static const struct kparam_string __param_string_##name __used \
83885 = { len, string }; \
83886 __module_param_call(MODULE_PARAM_PREFIX, name, \
83887 &param_ops_string, \
83888@@ -467,7 +467,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83889 */
83890 #define module_param_array_named(name, array, type, nump, perm) \
83891 param_check_##type(name, &(array)[0]); \
83892- static const struct kparam_array __param_arr_##name \
83893+ static const struct kparam_array __param_arr_##name __used \
83894 = { .max = ARRAY_SIZE(array), .num = nump, \
83895 .ops = &param_ops_##type, \
83896 .elemsize = sizeof(array[0]), .elem = array }; \
83897diff --git a/include/linux/mount.h b/include/linux/mount.h
83898index c2c561d..a5f2a8c 100644
83899--- a/include/linux/mount.h
83900+++ b/include/linux/mount.h
83901@@ -66,7 +66,7 @@ struct vfsmount {
83902 struct dentry *mnt_root; /* root of the mounted tree */
83903 struct super_block *mnt_sb; /* pointer to superblock */
83904 int mnt_flags;
83905-};
83906+} __randomize_layout;
83907
83908 struct file; /* forward dec */
83909 struct path;
83910diff --git a/include/linux/namei.h b/include/linux/namei.h
83911index c899077..b9a2010 100644
83912--- a/include/linux/namei.h
83913+++ b/include/linux/namei.h
83914@@ -71,8 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
83915 extern void unlock_rename(struct dentry *, struct dentry *);
83916
83917 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83918-extern void nd_set_link(struct nameidata *nd, char *path);
83919-extern char *nd_get_link(struct nameidata *nd);
83920+extern void nd_set_link(struct nameidata *nd, const char *path);
83921+extern const char *nd_get_link(const struct nameidata *nd);
83922
83923 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
83924 {
83925diff --git a/include/linux/net.h b/include/linux/net.h
83926index 17d8339..81656c0 100644
83927--- a/include/linux/net.h
83928+++ b/include/linux/net.h
83929@@ -192,7 +192,7 @@ struct net_proto_family {
83930 int (*create)(struct net *net, struct socket *sock,
83931 int protocol, int kern);
83932 struct module *owner;
83933-};
83934+} __do_const;
83935
83936 struct iovec;
83937 struct kvec;
83938diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83939index 52fd8e8..19430a1 100644
83940--- a/include/linux/netdevice.h
83941+++ b/include/linux/netdevice.h
83942@@ -1191,6 +1191,7 @@ struct net_device_ops {
83943 u8 state);
83944 #endif
83945 };
83946+typedef struct net_device_ops __no_const net_device_ops_no_const;
83947
83948 /**
83949 * enum net_device_priv_flags - &struct net_device priv_flags
83950@@ -1537,10 +1538,10 @@ struct net_device {
83951
83952 struct net_device_stats stats;
83953
83954- atomic_long_t rx_dropped;
83955- atomic_long_t tx_dropped;
83956+ atomic_long_unchecked_t rx_dropped;
83957+ atomic_long_unchecked_t tx_dropped;
83958
83959- atomic_t carrier_changes;
83960+ atomic_unchecked_t carrier_changes;
83961
83962 #ifdef CONFIG_WIRELESS_EXT
83963 const struct iw_handler_def * wireless_handlers;
83964diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83965index 2517ece..0bbfcfb 100644
83966--- a/include/linux/netfilter.h
83967+++ b/include/linux/netfilter.h
83968@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83969 #endif
83970 /* Use the module struct to lock set/get code in place */
83971 struct module *owner;
83972-};
83973+} __do_const;
83974
83975 /* Function to register/unregister hook points. */
83976 int nf_register_hook(struct nf_hook_ops *reg);
83977diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83978index e955d47..04a5338 100644
83979--- a/include/linux/netfilter/nfnetlink.h
83980+++ b/include/linux/netfilter/nfnetlink.h
83981@@ -19,7 +19,7 @@ struct nfnl_callback {
83982 const struct nlattr * const cda[]);
83983 const struct nla_policy *policy; /* netlink attribute policy */
83984 const u_int16_t attr_count; /* number of nlattr's */
83985-};
83986+} __do_const;
83987
83988 struct nfnetlink_subsystem {
83989 const char *name;
83990diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
83991new file mode 100644
83992index 0000000..33f4af8
83993--- /dev/null
83994+++ b/include/linux/netfilter/xt_gradm.h
83995@@ -0,0 +1,9 @@
83996+#ifndef _LINUX_NETFILTER_XT_GRADM_H
83997+#define _LINUX_NETFILTER_XT_GRADM_H 1
83998+
83999+struct xt_gradm_mtinfo {
84000+ __u16 flags;
84001+ __u16 invflags;
84002+};
84003+
84004+#endif
84005diff --git a/include/linux/nls.h b/include/linux/nls.h
84006index 520681b..2b7fabb 100644
84007--- a/include/linux/nls.h
84008+++ b/include/linux/nls.h
84009@@ -31,7 +31,7 @@ struct nls_table {
84010 const unsigned char *charset2upper;
84011 struct module *owner;
84012 struct nls_table *next;
84013-};
84014+} __do_const;
84015
84016 /* this value hold the maximum octet of charset */
84017 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84018@@ -46,7 +46,7 @@ enum utf16_endian {
84019 /* nls_base.c */
84020 extern int __register_nls(struct nls_table *, struct module *);
84021 extern int unregister_nls(struct nls_table *);
84022-extern struct nls_table *load_nls(char *);
84023+extern struct nls_table *load_nls(const char *);
84024 extern void unload_nls(struct nls_table *);
84025 extern struct nls_table *load_nls_default(void);
84026 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84027diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84028index d14a4c3..a078786 100644
84029--- a/include/linux/notifier.h
84030+++ b/include/linux/notifier.h
84031@@ -54,7 +54,8 @@ struct notifier_block {
84032 notifier_fn_t notifier_call;
84033 struct notifier_block __rcu *next;
84034 int priority;
84035-};
84036+} __do_const;
84037+typedef struct notifier_block __no_const notifier_block_no_const;
84038
84039 struct atomic_notifier_head {
84040 spinlock_t lock;
84041diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84042index b2a0f15..4d7da32 100644
84043--- a/include/linux/oprofile.h
84044+++ b/include/linux/oprofile.h
84045@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84046 int oprofilefs_create_ro_ulong(struct dentry * root,
84047 char const * name, ulong * val);
84048
84049-/** Create a file for read-only access to an atomic_t. */
84050+/** Create a file for read-only access to an atomic_unchecked_t. */
84051 int oprofilefs_create_ro_atomic(struct dentry * root,
84052- char const * name, atomic_t * val);
84053+ char const * name, atomic_unchecked_t * val);
84054
84055 /** create a directory */
84056 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84057diff --git a/include/linux/padata.h b/include/linux/padata.h
84058index 4386946..f50c615 100644
84059--- a/include/linux/padata.h
84060+++ b/include/linux/padata.h
84061@@ -129,7 +129,7 @@ struct parallel_data {
84062 struct padata_serial_queue __percpu *squeue;
84063 atomic_t reorder_objects;
84064 atomic_t refcnt;
84065- atomic_t seq_nr;
84066+ atomic_unchecked_t seq_nr;
84067 struct padata_cpumask cpumask;
84068 spinlock_t lock ____cacheline_aligned;
84069 unsigned int processed;
84070diff --git a/include/linux/path.h b/include/linux/path.h
84071index d137218..be0c176 100644
84072--- a/include/linux/path.h
84073+++ b/include/linux/path.h
84074@@ -1,13 +1,15 @@
84075 #ifndef _LINUX_PATH_H
84076 #define _LINUX_PATH_H
84077
84078+#include <linux/compiler.h>
84079+
84080 struct dentry;
84081 struct vfsmount;
84082
84083 struct path {
84084 struct vfsmount *mnt;
84085 struct dentry *dentry;
84086-};
84087+} __randomize_layout;
84088
84089 extern void path_get(const struct path *);
84090 extern void path_put(const struct path *);
84091diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84092index 8c78950..0d74ed9 100644
84093--- a/include/linux/pci_hotplug.h
84094+++ b/include/linux/pci_hotplug.h
84095@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84096 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84097 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84098 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84099-};
84100+} __do_const;
84101+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84102
84103 /**
84104 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84105diff --git a/include/linux/percpu.h b/include/linux/percpu.h
84106index caebf2a..4c3ae9d 100644
84107--- a/include/linux/percpu.h
84108+++ b/include/linux/percpu.h
84109@@ -34,7 +34,7 @@
84110 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
84111 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
84112 */
84113-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
84114+#define PERCPU_DYNAMIC_EARLY_SLOTS 256
84115 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
84116
84117 /*
84118diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84119index 664de5a..b3e1bf4 100644
84120--- a/include/linux/perf_event.h
84121+++ b/include/linux/perf_event.h
84122@@ -336,8 +336,8 @@ struct perf_event {
84123
84124 enum perf_event_active_state state;
84125 unsigned int attach_state;
84126- local64_t count;
84127- atomic64_t child_count;
84128+ local64_t count; /* PaX: fix it one day */
84129+ atomic64_unchecked_t child_count;
84130
84131 /*
84132 * These are the total time in nanoseconds that the event
84133@@ -388,8 +388,8 @@ struct perf_event {
84134 * These accumulate total time (in nanoseconds) that children
84135 * events have been enabled and running, respectively.
84136 */
84137- atomic64_t child_total_time_enabled;
84138- atomic64_t child_total_time_running;
84139+ atomic64_unchecked_t child_total_time_enabled;
84140+ atomic64_unchecked_t child_total_time_running;
84141
84142 /*
84143 * Protect attach/detach and child_list:
84144@@ -733,7 +733,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84145 entry->ip[entry->nr++] = ip;
84146 }
84147
84148-extern int sysctl_perf_event_paranoid;
84149+extern int sysctl_perf_event_legitimately_concerned;
84150 extern int sysctl_perf_event_mlock;
84151 extern int sysctl_perf_event_sample_rate;
84152 extern int sysctl_perf_cpu_time_max_percent;
84153@@ -748,19 +748,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84154 loff_t *ppos);
84155
84156
84157+static inline bool perf_paranoid_any(void)
84158+{
84159+ return sysctl_perf_event_legitimately_concerned > 2;
84160+}
84161+
84162 static inline bool perf_paranoid_tracepoint_raw(void)
84163 {
84164- return sysctl_perf_event_paranoid > -1;
84165+ return sysctl_perf_event_legitimately_concerned > -1;
84166 }
84167
84168 static inline bool perf_paranoid_cpu(void)
84169 {
84170- return sysctl_perf_event_paranoid > 0;
84171+ return sysctl_perf_event_legitimately_concerned > 0;
84172 }
84173
84174 static inline bool perf_paranoid_kernel(void)
84175 {
84176- return sysctl_perf_event_paranoid > 1;
84177+ return sysctl_perf_event_legitimately_concerned > 1;
84178 }
84179
84180 extern void perf_event_init(void);
84181@@ -891,7 +896,7 @@ struct perf_pmu_events_attr {
84182 struct device_attribute attr;
84183 u64 id;
84184 const char *event_str;
84185-};
84186+} __do_const;
84187
84188 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84189 static struct perf_pmu_events_attr _var = { \
84190diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84191index b9cf6c5..5462472 100644
84192--- a/include/linux/pid_namespace.h
84193+++ b/include/linux/pid_namespace.h
84194@@ -45,7 +45,7 @@ struct pid_namespace {
84195 int hide_pid;
84196 int reboot; /* group exit code if this pidns was rebooted */
84197 struct ns_common ns;
84198-};
84199+} __randomize_layout;
84200
84201 extern struct pid_namespace init_pid_ns;
84202
84203diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84204index eb8b8ac..62649e1 100644
84205--- a/include/linux/pipe_fs_i.h
84206+++ b/include/linux/pipe_fs_i.h
84207@@ -47,10 +47,10 @@ struct pipe_inode_info {
84208 struct mutex mutex;
84209 wait_queue_head_t wait;
84210 unsigned int nrbufs, curbuf, buffers;
84211- unsigned int readers;
84212- unsigned int writers;
84213- unsigned int files;
84214- unsigned int waiting_writers;
84215+ atomic_t readers;
84216+ atomic_t writers;
84217+ atomic_t files;
84218+ atomic_t waiting_writers;
84219 unsigned int r_counter;
84220 unsigned int w_counter;
84221 struct page *tmp_page;
84222diff --git a/include/linux/pm.h b/include/linux/pm.h
84223index 8b59763..8a05939 100644
84224--- a/include/linux/pm.h
84225+++ b/include/linux/pm.h
84226@@ -608,6 +608,7 @@ struct dev_pm_domain {
84227 struct dev_pm_ops ops;
84228 void (*detach)(struct device *dev, bool power_off);
84229 };
84230+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84231
84232 /*
84233 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84234diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84235index a9edab2..8bada56 100644
84236--- a/include/linux/pm_domain.h
84237+++ b/include/linux/pm_domain.h
84238@@ -39,11 +39,11 @@ struct gpd_dev_ops {
84239 int (*save_state)(struct device *dev);
84240 int (*restore_state)(struct device *dev);
84241 bool (*active_wakeup)(struct device *dev);
84242-};
84243+} __no_const;
84244
84245 struct gpd_cpuidle_data {
84246 unsigned int saved_exit_latency;
84247- struct cpuidle_state *idle_state;
84248+ cpuidle_state_no_const *idle_state;
84249 };
84250
84251 struct generic_pm_domain {
84252diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84253index 30e84d4..22278b4 100644
84254--- a/include/linux/pm_runtime.h
84255+++ b/include/linux/pm_runtime.h
84256@@ -115,7 +115,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84257
84258 static inline void pm_runtime_mark_last_busy(struct device *dev)
84259 {
84260- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84261+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84262 }
84263
84264 static inline bool pm_runtime_is_irq_safe(struct device *dev)
84265diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84266index 195aafc..49a7bc2 100644
84267--- a/include/linux/pnp.h
84268+++ b/include/linux/pnp.h
84269@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84270 struct pnp_fixup {
84271 char id[7];
84272 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84273-};
84274+} __do_const;
84275
84276 /* config parameters */
84277 #define PNP_CONFIG_NORMAL 0x0001
84278diff --git a/include/linux/poison.h b/include/linux/poison.h
84279index 2110a81..13a11bb 100644
84280--- a/include/linux/poison.h
84281+++ b/include/linux/poison.h
84282@@ -19,8 +19,8 @@
84283 * under normal circumstances, used to verify that nobody uses
84284 * non-initialized list entries.
84285 */
84286-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84287-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84288+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84289+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84290
84291 /********** include/linux/timer.h **********/
84292 /*
84293diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84294index d8b187c3..9a9257a 100644
84295--- a/include/linux/power/smartreflex.h
84296+++ b/include/linux/power/smartreflex.h
84297@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84298 int (*notify)(struct omap_sr *sr, u32 status);
84299 u8 notify_flags;
84300 u8 class_type;
84301-};
84302+} __do_const;
84303
84304 /**
84305 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84306diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84307index 4ea1d37..80f4b33 100644
84308--- a/include/linux/ppp-comp.h
84309+++ b/include/linux/ppp-comp.h
84310@@ -84,7 +84,7 @@ struct compressor {
84311 struct module *owner;
84312 /* Extra skb space needed by the compressor algorithm */
84313 unsigned int comp_extra;
84314-};
84315+} __do_const;
84316
84317 /*
84318 * The return value from decompress routine is the length of the
84319diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84320index de83b4e..c4b997d 100644
84321--- a/include/linux/preempt.h
84322+++ b/include/linux/preempt.h
84323@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84324 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84325 #endif
84326
84327+#define raw_preempt_count_add(val) __preempt_count_add(val)
84328+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84329+
84330 #define __preempt_count_inc() __preempt_count_add(1)
84331 #define __preempt_count_dec() __preempt_count_sub(1)
84332
84333 #define preempt_count_inc() preempt_count_add(1)
84334+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84335 #define preempt_count_dec() preempt_count_sub(1)
84336+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84337
84338 #ifdef CONFIG_PREEMPT_COUNT
84339
84340@@ -41,6 +46,12 @@ do { \
84341 barrier(); \
84342 } while (0)
84343
84344+#define raw_preempt_disable() \
84345+do { \
84346+ raw_preempt_count_inc(); \
84347+ barrier(); \
84348+} while (0)
84349+
84350 #define sched_preempt_enable_no_resched() \
84351 do { \
84352 barrier(); \
84353@@ -49,6 +60,12 @@ do { \
84354
84355 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84356
84357+#define raw_preempt_enable_no_resched() \
84358+do { \
84359+ barrier(); \
84360+ raw_preempt_count_dec(); \
84361+} while (0)
84362+
84363 #ifdef CONFIG_PREEMPT
84364 #define preempt_enable() \
84365 do { \
84366@@ -113,8 +130,10 @@ do { \
84367 * region.
84368 */
84369 #define preempt_disable() barrier()
84370+#define raw_preempt_disable() barrier()
84371 #define sched_preempt_enable_no_resched() barrier()
84372 #define preempt_enable_no_resched() barrier()
84373+#define raw_preempt_enable_no_resched() barrier()
84374 #define preempt_enable() barrier()
84375 #define preempt_check_resched() do { } while (0)
84376
84377@@ -128,11 +147,13 @@ do { \
84378 /*
84379 * Modules have no business playing preemption tricks.
84380 */
84381+#ifndef CONFIG_PAX_KERNEXEC
84382 #undef sched_preempt_enable_no_resched
84383 #undef preempt_enable_no_resched
84384 #undef preempt_enable_no_resched_notrace
84385 #undef preempt_check_resched
84386 #endif
84387+#endif
84388
84389 #define preempt_set_need_resched() \
84390 do { \
84391diff --git a/include/linux/printk.h b/include/linux/printk.h
84392index 4d5bf57..d94eccf 100644
84393--- a/include/linux/printk.h
84394+++ b/include/linux/printk.h
84395@@ -121,6 +121,7 @@ void early_printk(const char *s, ...) { }
84396 #endif
84397
84398 typedef int(*printk_func_t)(const char *fmt, va_list args);
84399+extern int kptr_restrict;
84400
84401 #ifdef CONFIG_PRINTK
84402 asmlinkage __printf(5, 0)
84403@@ -156,7 +157,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84404
84405 extern int printk_delay_msec;
84406 extern int dmesg_restrict;
84407-extern int kptr_restrict;
84408
84409 extern void wake_up_klogd(void);
84410
84411diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84412index b97bf2e..f14c92d4 100644
84413--- a/include/linux/proc_fs.h
84414+++ b/include/linux/proc_fs.h
84415@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84416 extern struct proc_dir_entry *proc_symlink(const char *,
84417 struct proc_dir_entry *, const char *);
84418 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84419+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84420 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84421 struct proc_dir_entry *, void *);
84422+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84423+ struct proc_dir_entry *, void *);
84424 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84425 struct proc_dir_entry *);
84426
84427@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84428 return proc_create_data(name, mode, parent, proc_fops, NULL);
84429 }
84430
84431+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84432+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84433+{
84434+#ifdef CONFIG_GRKERNSEC_PROC_USER
84435+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84436+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84437+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84438+#else
84439+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84440+#endif
84441+}
84442+
84443+
84444 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84445 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84446 extern void *PDE_DATA(const struct inode *);
84447@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84448 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84449 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84450 struct proc_dir_entry *parent) {return NULL;}
84451+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84452+ struct proc_dir_entry *parent) { return NULL; }
84453 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84454 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84455+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84456+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84457 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84458 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84459 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84460@@ -79,7 +99,7 @@ struct net;
84461 static inline struct proc_dir_entry *proc_net_mkdir(
84462 struct net *net, const char *name, struct proc_dir_entry *parent)
84463 {
84464- return proc_mkdir_data(name, 0, parent, net);
84465+ return proc_mkdir_data_restrict(name, 0, parent, net);
84466 }
84467
84468 #endif /* _LINUX_PROC_FS_H */
84469diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84470index 42dfc61..8113a99 100644
84471--- a/include/linux/proc_ns.h
84472+++ b/include/linux/proc_ns.h
84473@@ -16,7 +16,7 @@ struct proc_ns_operations {
84474 struct ns_common *(*get)(struct task_struct *task);
84475 void (*put)(struct ns_common *ns);
84476 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
84477-};
84478+} __do_const __randomize_layout;
84479
84480 extern const struct proc_ns_operations netns_operations;
84481 extern const struct proc_ns_operations utsns_operations;
84482diff --git a/include/linux/quota.h b/include/linux/quota.h
84483index b86df49..8002997 100644
84484--- a/include/linux/quota.h
84485+++ b/include/linux/quota.h
84486@@ -75,7 +75,7 @@ struct kqid { /* Type in which we store the quota identifier */
84487
84488 extern bool qid_eq(struct kqid left, struct kqid right);
84489 extern bool qid_lt(struct kqid left, struct kqid right);
84490-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84491+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84492 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84493 extern bool qid_valid(struct kqid qid);
84494
84495diff --git a/include/linux/random.h b/include/linux/random.h
84496index b05856e..0a9f14e 100644
84497--- a/include/linux/random.h
84498+++ b/include/linux/random.h
84499@@ -9,9 +9,19 @@
84500 #include <uapi/linux/random.h>
84501
84502 extern void add_device_randomness(const void *, unsigned int);
84503+
84504+static inline void add_latent_entropy(void)
84505+{
84506+
84507+#ifdef LATENT_ENTROPY_PLUGIN
84508+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84509+#endif
84510+
84511+}
84512+
84513 extern void add_input_randomness(unsigned int type, unsigned int code,
84514- unsigned int value);
84515-extern void add_interrupt_randomness(int irq, int irq_flags);
84516+ unsigned int value) __latent_entropy;
84517+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84518
84519 extern void get_random_bytes(void *buf, int nbytes);
84520 extern void get_random_bytes_arch(void *buf, int nbytes);
84521@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84522 extern const struct file_operations random_fops, urandom_fops;
84523 #endif
84524
84525-unsigned int get_random_int(void);
84526+unsigned int __intentional_overflow(-1) get_random_int(void);
84527 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84528
84529-u32 prandom_u32(void);
84530+u32 prandom_u32(void) __intentional_overflow(-1);
84531 void prandom_bytes(void *buf, size_t nbytes);
84532 void prandom_seed(u32 seed);
84533 void prandom_reseed_late(void);
84534@@ -37,6 +47,11 @@ struct rnd_state {
84535 u32 prandom_u32_state(struct rnd_state *state);
84536 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84537
84538+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84539+{
84540+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84541+}
84542+
84543 /**
84544 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84545 * @ep_ro: right open interval endpoint
84546@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
84547 *
84548 * Returns: pseudo-random number in interval [0, ep_ro)
84549 */
84550-static inline u32 prandom_u32_max(u32 ep_ro)
84551+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84552 {
84553 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84554 }
84555diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84556index 378c5ee..aa84a47 100644
84557--- a/include/linux/rbtree_augmented.h
84558+++ b/include/linux/rbtree_augmented.h
84559@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84560 old->rbaugmented = rbcompute(old); \
84561 } \
84562 rbstatic const struct rb_augment_callbacks rbname = { \
84563- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84564+ .propagate = rbname ## _propagate, \
84565+ .copy = rbname ## _copy, \
84566+ .rotate = rbname ## _rotate \
84567 };
84568
84569
84570diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84571index 529bc94..82ce778 100644
84572--- a/include/linux/rculist.h
84573+++ b/include/linux/rculist.h
84574@@ -29,8 +29,8 @@
84575 */
84576 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84577 {
84578- ACCESS_ONCE(list->next) = list;
84579- ACCESS_ONCE(list->prev) = list;
84580+ ACCESS_ONCE_RW(list->next) = list;
84581+ ACCESS_ONCE_RW(list->prev) = list;
84582 }
84583
84584 /*
84585@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84586 struct list_head *prev, struct list_head *next);
84587 #endif
84588
84589+void __pax_list_add_rcu(struct list_head *new,
84590+ struct list_head *prev, struct list_head *next);
84591+
84592 /**
84593 * list_add_rcu - add a new entry to rcu-protected list
84594 * @new: new entry to be added
84595@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84596 __list_add_rcu(new, head, head->next);
84597 }
84598
84599+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84600+{
84601+ __pax_list_add_rcu(new, head, head->next);
84602+}
84603+
84604 /**
84605 * list_add_tail_rcu - add a new entry to rcu-protected list
84606 * @new: new entry to be added
84607@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84608 __list_add_rcu(new, head->prev, head);
84609 }
84610
84611+static inline void pax_list_add_tail_rcu(struct list_head *new,
84612+ struct list_head *head)
84613+{
84614+ __pax_list_add_rcu(new, head->prev, head);
84615+}
84616+
84617 /**
84618 * list_del_rcu - deletes entry from list without re-initialization
84619 * @entry: the element to delete from the list.
84620@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84621 entry->prev = LIST_POISON2;
84622 }
84623
84624+extern void pax_list_del_rcu(struct list_head *entry);
84625+
84626 /**
84627 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84628 * @n: the element to delete from the hash list.
84629diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
84630index ed4f593..8a51501 100644
84631--- a/include/linux/rcupdate.h
84632+++ b/include/linux/rcupdate.h
84633@@ -332,7 +332,7 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
84634 #define rcu_note_voluntary_context_switch(t) \
84635 do { \
84636 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
84637- ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
84638+ ACCESS_ONCE_RW((t)->rcu_tasks_holdout) = false; \
84639 } while (0)
84640 #else /* #ifdef CONFIG_TASKS_RCU */
84641 #define TASKS_RCU(x) do { } while (0)
84642diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84643index 67fc8fc..a90f7d8 100644
84644--- a/include/linux/reboot.h
84645+++ b/include/linux/reboot.h
84646@@ -47,9 +47,9 @@ extern void do_kernel_restart(char *cmd);
84647 */
84648
84649 extern void migrate_to_reboot_cpu(void);
84650-extern void machine_restart(char *cmd);
84651-extern void machine_halt(void);
84652-extern void machine_power_off(void);
84653+extern void machine_restart(char *cmd) __noreturn;
84654+extern void machine_halt(void) __noreturn;
84655+extern void machine_power_off(void) __noreturn;
84656
84657 extern void machine_shutdown(void);
84658 struct pt_regs;
84659@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84660 */
84661
84662 extern void kernel_restart_prepare(char *cmd);
84663-extern void kernel_restart(char *cmd);
84664-extern void kernel_halt(void);
84665-extern void kernel_power_off(void);
84666+extern void kernel_restart(char *cmd) __noreturn;
84667+extern void kernel_halt(void) __noreturn;
84668+extern void kernel_power_off(void) __noreturn;
84669
84670 extern int C_A_D; /* for sysctl */
84671 void ctrl_alt_del(void);
84672@@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
84673 * Emergency restart, callable from an interrupt handler.
84674 */
84675
84676-extern void emergency_restart(void);
84677+extern void emergency_restart(void) __noreturn;
84678 #include <asm/emergency-restart.h>
84679
84680 #endif /* _LINUX_REBOOT_H */
84681diff --git a/include/linux/regset.h b/include/linux/regset.h
84682index 8e0c9fe..ac4d221 100644
84683--- a/include/linux/regset.h
84684+++ b/include/linux/regset.h
84685@@ -161,7 +161,8 @@ struct user_regset {
84686 unsigned int align;
84687 unsigned int bias;
84688 unsigned int core_note_type;
84689-};
84690+} __do_const;
84691+typedef struct user_regset __no_const user_regset_no_const;
84692
84693 /**
84694 * struct user_regset_view - available regsets
84695diff --git a/include/linux/relay.h b/include/linux/relay.h
84696index d7c8359..818daf5 100644
84697--- a/include/linux/relay.h
84698+++ b/include/linux/relay.h
84699@@ -157,7 +157,7 @@ struct rchan_callbacks
84700 * The callback should return 0 if successful, negative if not.
84701 */
84702 int (*remove_buf_file)(struct dentry *dentry);
84703-};
84704+} __no_const;
84705
84706 /*
84707 * CONFIG_RELAY kernel API, kernel/relay.c
84708diff --git a/include/linux/rio.h b/include/linux/rio.h
84709index 6bda06f..bf39a9b 100644
84710--- a/include/linux/rio.h
84711+++ b/include/linux/rio.h
84712@@ -358,7 +358,7 @@ struct rio_ops {
84713 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84714 u64 rstart, u32 size, u32 flags);
84715 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84716-};
84717+} __no_const;
84718
84719 #define RIO_RESOURCE_MEM 0x00000100
84720 #define RIO_RESOURCE_DOORBELL 0x00000200
84721diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84722index d9d7e7e..86f47ac 100644
84723--- a/include/linux/rmap.h
84724+++ b/include/linux/rmap.h
84725@@ -154,8 +154,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84726 void anon_vma_init(void); /* create anon_vma_cachep */
84727 int anon_vma_prepare(struct vm_area_struct *);
84728 void unlink_anon_vmas(struct vm_area_struct *);
84729-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84730-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84731+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84732+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84733
84734 static inline void anon_vma_merge(struct vm_area_struct *vma,
84735 struct vm_area_struct *next)
84736diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84737index ed8f9e7..999bc96 100644
84738--- a/include/linux/scatterlist.h
84739+++ b/include/linux/scatterlist.h
84740@@ -1,6 +1,7 @@
84741 #ifndef _LINUX_SCATTERLIST_H
84742 #define _LINUX_SCATTERLIST_H
84743
84744+#include <linux/sched.h>
84745 #include <linux/string.h>
84746 #include <linux/bug.h>
84747 #include <linux/mm.h>
84748@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84749 #ifdef CONFIG_DEBUG_SG
84750 BUG_ON(!virt_addr_valid(buf));
84751 #endif
84752+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84753+ if (object_starts_on_stack(buf)) {
84754+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84755+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84756+ } else
84757+#endif
84758 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84759 }
84760
84761diff --git a/include/linux/sched.h b/include/linux/sched.h
84762index 8db31ef..0af1f81 100644
84763--- a/include/linux/sched.h
84764+++ b/include/linux/sched.h
84765@@ -133,6 +133,7 @@ struct fs_struct;
84766 struct perf_event_context;
84767 struct blk_plug;
84768 struct filename;
84769+struct linux_binprm;
84770
84771 #define VMACACHE_BITS 2
84772 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84773@@ -415,7 +416,7 @@ extern char __sched_text_start[], __sched_text_end[];
84774 extern int in_sched_functions(unsigned long addr);
84775
84776 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84777-extern signed long schedule_timeout(signed long timeout);
84778+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84779 extern signed long schedule_timeout_interruptible(signed long timeout);
84780 extern signed long schedule_timeout_killable(signed long timeout);
84781 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84782@@ -426,6 +427,19 @@ struct nsproxy;
84783 struct user_namespace;
84784
84785 #ifdef CONFIG_MMU
84786+
84787+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84788+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84789+#else
84790+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84791+{
84792+ return 0;
84793+}
84794+#endif
84795+
84796+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84797+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84798+
84799 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84800 extern unsigned long
84801 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84802@@ -724,6 +738,17 @@ struct signal_struct {
84803 #ifdef CONFIG_TASKSTATS
84804 struct taskstats *stats;
84805 #endif
84806+
84807+#ifdef CONFIG_GRKERNSEC
84808+ u32 curr_ip;
84809+ u32 saved_ip;
84810+ u32 gr_saddr;
84811+ u32 gr_daddr;
84812+ u16 gr_sport;
84813+ u16 gr_dport;
84814+ u8 used_accept:1;
84815+#endif
84816+
84817 #ifdef CONFIG_AUDIT
84818 unsigned audit_tty;
84819 unsigned audit_tty_log_passwd;
84820@@ -750,7 +775,7 @@ struct signal_struct {
84821 struct mutex cred_guard_mutex; /* guard against foreign influences on
84822 * credential calculations
84823 * (notably. ptrace) */
84824-};
84825+} __randomize_layout;
84826
84827 /*
84828 * Bits in flags field of signal_struct.
84829@@ -803,6 +828,14 @@ struct user_struct {
84830 struct key *session_keyring; /* UID's default session keyring */
84831 #endif
84832
84833+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84834+ unsigned char kernel_banned;
84835+#endif
84836+#ifdef CONFIG_GRKERNSEC_BRUTE
84837+ unsigned char suid_banned;
84838+ unsigned long suid_ban_expires;
84839+#endif
84840+
84841 /* Hash table maintenance information */
84842 struct hlist_node uidhash_node;
84843 kuid_t uid;
84844@@ -810,7 +843,7 @@ struct user_struct {
84845 #ifdef CONFIG_PERF_EVENTS
84846 atomic_long_t locked_vm;
84847 #endif
84848-};
84849+} __randomize_layout;
84850
84851 extern int uids_sysfs_init(void);
84852
84853@@ -1274,6 +1307,9 @@ enum perf_event_task_context {
84854 struct task_struct {
84855 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84856 void *stack;
84857+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84858+ void *lowmem_stack;
84859+#endif
84860 atomic_t usage;
84861 unsigned int flags; /* per process flags, defined below */
84862 unsigned int ptrace;
84863@@ -1405,8 +1441,8 @@ struct task_struct {
84864 struct list_head thread_node;
84865
84866 struct completion *vfork_done; /* for vfork() */
84867- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84868- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84869+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84870+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84871
84872 cputime_t utime, stime, utimescaled, stimescaled;
84873 cputime_t gtime;
84874@@ -1431,11 +1467,6 @@ struct task_struct {
84875 struct task_cputime cputime_expires;
84876 struct list_head cpu_timers[3];
84877
84878-/* process credentials */
84879- const struct cred __rcu *real_cred; /* objective and real subjective task
84880- * credentials (COW) */
84881- const struct cred __rcu *cred; /* effective (overridable) subjective task
84882- * credentials (COW) */
84883 char comm[TASK_COMM_LEN]; /* executable name excluding path
84884 - access with [gs]et_task_comm (which lock
84885 it with task_lock())
84886@@ -1453,6 +1484,10 @@ struct task_struct {
84887 #endif
84888 /* CPU-specific state of this task */
84889 struct thread_struct thread;
84890+/* thread_info moved to task_struct */
84891+#ifdef CONFIG_X86
84892+ struct thread_info tinfo;
84893+#endif
84894 /* filesystem information */
84895 struct fs_struct *fs;
84896 /* open file information */
84897@@ -1527,6 +1562,10 @@ struct task_struct {
84898 gfp_t lockdep_reclaim_gfp;
84899 #endif
84900
84901+/* process credentials */
84902+ const struct cred __rcu *real_cred; /* objective and real subjective task
84903+ * credentials (COW) */
84904+
84905 /* journalling filesystem info */
84906 void *journal_info;
84907
84908@@ -1565,6 +1604,10 @@ struct task_struct {
84909 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84910 struct list_head cg_list;
84911 #endif
84912+
84913+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84914+ * credentials (COW) */
84915+
84916 #ifdef CONFIG_FUTEX
84917 struct robust_list_head __user *robust_list;
84918 #ifdef CONFIG_COMPAT
84919@@ -1673,7 +1716,7 @@ struct task_struct {
84920 * Number of functions that haven't been traced
84921 * because of depth overrun.
84922 */
84923- atomic_t trace_overrun;
84924+ atomic_unchecked_t trace_overrun;
84925 /* Pause for the tracing */
84926 atomic_t tracing_graph_pause;
84927 #endif
84928@@ -1701,7 +1744,78 @@ struct task_struct {
84929 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
84930 unsigned long task_state_change;
84931 #endif
84932-};
84933+
84934+#ifdef CONFIG_GRKERNSEC
84935+ /* grsecurity */
84936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84937+ u64 exec_id;
84938+#endif
84939+#ifdef CONFIG_GRKERNSEC_SETXID
84940+ const struct cred *delayed_cred;
84941+#endif
84942+ struct dentry *gr_chroot_dentry;
84943+ struct acl_subject_label *acl;
84944+ struct acl_subject_label *tmpacl;
84945+ struct acl_role_label *role;
84946+ struct file *exec_file;
84947+ unsigned long brute_expires;
84948+ u16 acl_role_id;
84949+ u8 inherited;
84950+ /* is this the task that authenticated to the special role */
84951+ u8 acl_sp_role;
84952+ u8 is_writable;
84953+ u8 brute;
84954+ u8 gr_is_chrooted;
84955+#endif
84956+
84957+} __randomize_layout;
84958+
84959+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84960+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84961+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84962+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84963+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84964+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84965+
84966+#ifdef CONFIG_PAX_SOFTMODE
84967+extern int pax_softmode;
84968+#endif
84969+
84970+extern int pax_check_flags(unsigned long *);
84971+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84972+
84973+/* if tsk != current then task_lock must be held on it */
84974+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84975+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84976+{
84977+ if (likely(tsk->mm))
84978+ return tsk->mm->pax_flags;
84979+ else
84980+ return 0UL;
84981+}
84982+
84983+/* if tsk != current then task_lock must be held on it */
84984+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84985+{
84986+ if (likely(tsk->mm)) {
84987+ tsk->mm->pax_flags = flags;
84988+ return 0;
84989+ }
84990+ return -EINVAL;
84991+}
84992+#endif
84993+
84994+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84995+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84996+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84997+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84998+#endif
84999+
85000+struct path;
85001+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
85002+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
85003+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
85004+extern void pax_report_refcount_overflow(struct pt_regs *regs);
85005
85006 /* Future-safe accessor for struct task_struct's cpus_allowed. */
85007 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
85008@@ -1783,7 +1897,7 @@ struct pid_namespace;
85009 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85010 struct pid_namespace *ns);
85011
85012-static inline pid_t task_pid_nr(struct task_struct *tsk)
85013+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85014 {
85015 return tsk->pid;
85016 }
85017@@ -2150,6 +2264,25 @@ extern u64 sched_clock_cpu(int cpu);
85018
85019 extern void sched_clock_init(void);
85020
85021+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85022+static inline void populate_stack(void)
85023+{
85024+ struct task_struct *curtask = current;
85025+ int c;
85026+ int *ptr = curtask->stack;
85027+ int *end = curtask->stack + THREAD_SIZE;
85028+
85029+ while (ptr < end) {
85030+ c = *(volatile int *)ptr;
85031+ ptr += PAGE_SIZE/sizeof(int);
85032+ }
85033+}
85034+#else
85035+static inline void populate_stack(void)
85036+{
85037+}
85038+#endif
85039+
85040 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85041 static inline void sched_clock_tick(void)
85042 {
85043@@ -2283,7 +2416,9 @@ void yield(void);
85044 extern struct exec_domain default_exec_domain;
85045
85046 union thread_union {
85047+#ifndef CONFIG_X86
85048 struct thread_info thread_info;
85049+#endif
85050 unsigned long stack[THREAD_SIZE/sizeof(long)];
85051 };
85052
85053@@ -2316,6 +2451,7 @@ extern struct pid_namespace init_pid_ns;
85054 */
85055
85056 extern struct task_struct *find_task_by_vpid(pid_t nr);
85057+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85058 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85059 struct pid_namespace *ns);
85060
85061@@ -2480,7 +2616,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85062 extern void exit_itimers(struct signal_struct *);
85063 extern void flush_itimer_signals(void);
85064
85065-extern void do_group_exit(int);
85066+extern __noreturn void do_group_exit(int);
85067
85068 extern int do_execve(struct filename *,
85069 const char __user * const __user *,
85070@@ -2701,9 +2837,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85071 #define task_stack_end_corrupted(task) \
85072 (*(end_of_stack(task)) != STACK_END_MAGIC)
85073
85074-static inline int object_is_on_stack(void *obj)
85075+static inline int object_starts_on_stack(const void *obj)
85076 {
85077- void *stack = task_stack_page(current);
85078+ const void *stack = task_stack_page(current);
85079
85080 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85081 }
85082diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85083index 596a0e0..bea77ec 100644
85084--- a/include/linux/sched/sysctl.h
85085+++ b/include/linux/sched/sysctl.h
85086@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85087 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85088
85089 extern int sysctl_max_map_count;
85090+extern unsigned long sysctl_heap_stack_gap;
85091
85092 extern unsigned int sysctl_sched_latency;
85093 extern unsigned int sysctl_sched_min_granularity;
85094diff --git a/include/linux/security.h b/include/linux/security.h
85095index ba96471..74fb3f6 100644
85096--- a/include/linux/security.h
85097+++ b/include/linux/security.h
85098@@ -27,6 +27,7 @@
85099 #include <linux/slab.h>
85100 #include <linux/err.h>
85101 #include <linux/string.h>
85102+#include <linux/grsecurity.h>
85103
85104 struct linux_binprm;
85105 struct cred;
85106@@ -116,8 +117,6 @@ struct seq_file;
85107
85108 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85109
85110-void reset_security_ops(void);
85111-
85112 #ifdef CONFIG_MMU
85113 extern unsigned long mmap_min_addr;
85114 extern unsigned long dac_mmap_min_addr;
85115@@ -1729,7 +1728,7 @@ struct security_operations {
85116 struct audit_context *actx);
85117 void (*audit_rule_free) (void *lsmrule);
85118 #endif /* CONFIG_AUDIT */
85119-};
85120+} __randomize_layout;
85121
85122 /* prototypes */
85123 extern int security_init(void);
85124diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85125index dc368b8..e895209 100644
85126--- a/include/linux/semaphore.h
85127+++ b/include/linux/semaphore.h
85128@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85129 }
85130
85131 extern void down(struct semaphore *sem);
85132-extern int __must_check down_interruptible(struct semaphore *sem);
85133+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85134 extern int __must_check down_killable(struct semaphore *sem);
85135 extern int __must_check down_trylock(struct semaphore *sem);
85136 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85137diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85138index cf6a9da..bd86b1f 100644
85139--- a/include/linux/seq_file.h
85140+++ b/include/linux/seq_file.h
85141@@ -27,6 +27,9 @@ struct seq_file {
85142 struct mutex lock;
85143 const struct seq_operations *op;
85144 int poll_event;
85145+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85146+ u64 exec_id;
85147+#endif
85148 #ifdef CONFIG_USER_NS
85149 struct user_namespace *user_ns;
85150 #endif
85151@@ -39,6 +42,7 @@ struct seq_operations {
85152 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85153 int (*show) (struct seq_file *m, void *v);
85154 };
85155+typedef struct seq_operations __no_const seq_operations_no_const;
85156
85157 #define SEQ_SKIP 1
85158
85159@@ -111,6 +115,7 @@ void seq_pad(struct seq_file *m, char c);
85160
85161 char *mangle_path(char *s, const char *p, const char *esc);
85162 int seq_open(struct file *, const struct seq_operations *);
85163+int seq_open_restrict(struct file *, const struct seq_operations *);
85164 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85165 loff_t seq_lseek(struct file *, loff_t, int);
85166 int seq_release(struct inode *, struct file *);
85167@@ -153,6 +158,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85168 }
85169
85170 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85171+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85172 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85173 int single_release(struct inode *, struct file *);
85174 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85175diff --git a/include/linux/shm.h b/include/linux/shm.h
85176index 6fb8016..ab4465e 100644
85177--- a/include/linux/shm.h
85178+++ b/include/linux/shm.h
85179@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85180 /* The task created the shm object. NULL if the task is dead. */
85181 struct task_struct *shm_creator;
85182 struct list_head shm_clist; /* list by creator */
85183+#ifdef CONFIG_GRKERNSEC
85184+ u64 shm_createtime;
85185+ pid_t shm_lapid;
85186+#endif
85187 };
85188
85189 /* shm_mode upper byte flags */
85190diff --git a/include/linux/signal.h b/include/linux/signal.h
85191index ab1e039..ad4229e 100644
85192--- a/include/linux/signal.h
85193+++ b/include/linux/signal.h
85194@@ -289,7 +289,7 @@ static inline void allow_signal(int sig)
85195 * know it'll be handled, so that they don't get converted to
85196 * SIGKILL or just silently dropped.
85197 */
85198- kernel_sigaction(sig, (__force __sighandler_t)2);
85199+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85200 }
85201
85202 static inline void disallow_signal(int sig)
85203diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85204index 85ab7d7..eb1585a 100644
85205--- a/include/linux/skbuff.h
85206+++ b/include/linux/skbuff.h
85207@@ -763,7 +763,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85208 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85209 int node);
85210 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85211-static inline struct sk_buff *alloc_skb(unsigned int size,
85212+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85213 gfp_t priority)
85214 {
85215 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85216@@ -1952,7 +1952,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85217 return skb->inner_transport_header - skb->inner_network_header;
85218 }
85219
85220-static inline int skb_network_offset(const struct sk_buff *skb)
85221+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85222 {
85223 return skb_network_header(skb) - skb->data;
85224 }
85225@@ -2012,7 +2012,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
85226 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85227 */
85228 #ifndef NET_SKB_PAD
85229-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85230+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85231 #endif
85232
85233 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85234@@ -2655,9 +2655,9 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85235 int *err);
85236 unsigned int datagram_poll(struct file *file, struct socket *sock,
85237 struct poll_table_struct *wait);
85238-int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85239+int __intentional_overflow(0) skb_copy_datagram_iter(const struct sk_buff *from, int offset,
85240 struct iov_iter *to, int size);
85241-static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85242+static inline int __intentional_overflow(2,4) skb_copy_datagram_msg(const struct sk_buff *from, int offset,
85243 struct msghdr *msg, int size)
85244 {
85245 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
85246@@ -3131,6 +3131,9 @@ static inline void nf_reset(struct sk_buff *skb)
85247 nf_bridge_put(skb->nf_bridge);
85248 skb->nf_bridge = NULL;
85249 #endif
85250+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85251+ skb->nf_trace = 0;
85252+#endif
85253 }
85254
85255 static inline void nf_reset_trace(struct sk_buff *skb)
85256diff --git a/include/linux/slab.h b/include/linux/slab.h
85257index 9a139b6..aab37b4 100644
85258--- a/include/linux/slab.h
85259+++ b/include/linux/slab.h
85260@@ -14,15 +14,29 @@
85261 #include <linux/gfp.h>
85262 #include <linux/types.h>
85263 #include <linux/workqueue.h>
85264-
85265+#include <linux/err.h>
85266
85267 /*
85268 * Flags to pass to kmem_cache_create().
85269 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85270 */
85271 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85272+
85273+#ifdef CONFIG_PAX_USERCOPY_SLABS
85274+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85275+#else
85276+#define SLAB_USERCOPY 0x00000000UL
85277+#endif
85278+
85279 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85280 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85281+
85282+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85283+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85284+#else
85285+#define SLAB_NO_SANITIZE 0x00000000UL
85286+#endif
85287+
85288 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85289 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85290 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85291@@ -98,10 +112,13 @@
85292 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85293 * Both make kfree a no-op.
85294 */
85295-#define ZERO_SIZE_PTR ((void *)16)
85296+#define ZERO_SIZE_PTR \
85297+({ \
85298+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85299+ (void *)(-MAX_ERRNO-1L); \
85300+})
85301
85302-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85303- (unsigned long)ZERO_SIZE_PTR)
85304+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85305
85306 #include <linux/kmemleak.h>
85307
85308@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85309 void kfree(const void *);
85310 void kzfree(const void *);
85311 size_t ksize(const void *);
85312+const char *check_heap_object(const void *ptr, unsigned long n);
85313+bool is_usercopy_object(const void *ptr);
85314
85315 /*
85316 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85317@@ -236,6 +255,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85318 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85319 #endif
85320
85321+#ifdef CONFIG_PAX_USERCOPY_SLABS
85322+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85323+#endif
85324+
85325 /*
85326 * Figure out which kmalloc slab an allocation of a certain size
85327 * belongs to.
85328@@ -244,7 +267,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85329 * 2 = 120 .. 192 bytes
85330 * n = 2^(n-1) .. 2^n -1
85331 */
85332-static __always_inline int kmalloc_index(size_t size)
85333+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85334 {
85335 if (!size)
85336 return 0;
85337@@ -287,14 +310,14 @@ static __always_inline int kmalloc_index(size_t size)
85338 }
85339 #endif /* !CONFIG_SLOB */
85340
85341-void *__kmalloc(size_t size, gfp_t flags);
85342+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
85343 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85344
85345 #ifdef CONFIG_NUMA
85346-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85347+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1) __size_overflow(1);
85348 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85349 #else
85350-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85351+static __always_inline void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
85352 {
85353 return __kmalloc(size, flags);
85354 }
85355diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85356index b869d16..1453c73 100644
85357--- a/include/linux/slab_def.h
85358+++ b/include/linux/slab_def.h
85359@@ -40,7 +40,7 @@ struct kmem_cache {
85360 /* 4) cache creation/removal */
85361 const char *name;
85362 struct list_head list;
85363- int refcount;
85364+ atomic_t refcount;
85365 int object_size;
85366 int align;
85367
85368@@ -56,10 +56,14 @@ struct kmem_cache {
85369 unsigned long node_allocs;
85370 unsigned long node_frees;
85371 unsigned long node_overflow;
85372- atomic_t allochit;
85373- atomic_t allocmiss;
85374- atomic_t freehit;
85375- atomic_t freemiss;
85376+ atomic_unchecked_t allochit;
85377+ atomic_unchecked_t allocmiss;
85378+ atomic_unchecked_t freehit;
85379+ atomic_unchecked_t freemiss;
85380+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85381+ atomic_unchecked_t sanitized;
85382+ atomic_unchecked_t not_sanitized;
85383+#endif
85384
85385 /*
85386 * If debugging is enabled, then the allocator can add additional
85387diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85388index d82abd4..408c3a0 100644
85389--- a/include/linux/slub_def.h
85390+++ b/include/linux/slub_def.h
85391@@ -74,7 +74,7 @@ struct kmem_cache {
85392 struct kmem_cache_order_objects max;
85393 struct kmem_cache_order_objects min;
85394 gfp_t allocflags; /* gfp flags to use on each alloc */
85395- int refcount; /* Refcount for slab cache destroy */
85396+ atomic_t refcount; /* Refcount for slab cache destroy */
85397 void (*ctor)(void *);
85398 int inuse; /* Offset to metadata */
85399 int align; /* Alignment */
85400diff --git a/include/linux/smp.h b/include/linux/smp.h
85401index 93dff5f..933c561 100644
85402--- a/include/linux/smp.h
85403+++ b/include/linux/smp.h
85404@@ -176,7 +176,9 @@ static inline void wake_up_all_idle_cpus(void) { }
85405 #endif
85406
85407 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85408+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85409 #define put_cpu() preempt_enable()
85410+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85411
85412 /*
85413 * Callback to arch code if there's nosmp or maxcpus=0 on the
85414diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85415index 46cca4c..3323536 100644
85416--- a/include/linux/sock_diag.h
85417+++ b/include/linux/sock_diag.h
85418@@ -11,7 +11,7 @@ struct sock;
85419 struct sock_diag_handler {
85420 __u8 family;
85421 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85422-};
85423+} __do_const;
85424
85425 int sock_diag_register(const struct sock_diag_handler *h);
85426 void sock_diag_unregister(const struct sock_diag_handler *h);
85427diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85428index 680f9a3..f13aeb0 100644
85429--- a/include/linux/sonet.h
85430+++ b/include/linux/sonet.h
85431@@ -7,7 +7,7 @@
85432 #include <uapi/linux/sonet.h>
85433
85434 struct k_sonet_stats {
85435-#define __HANDLE_ITEM(i) atomic_t i
85436+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85437 __SONET_ITEMS
85438 #undef __HANDLE_ITEM
85439 };
85440diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85441index 07d8e53..dc934c9 100644
85442--- a/include/linux/sunrpc/addr.h
85443+++ b/include/linux/sunrpc/addr.h
85444@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85445 {
85446 switch (sap->sa_family) {
85447 case AF_INET:
85448- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85449+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85450 case AF_INET6:
85451- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85452+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85453 }
85454 return 0;
85455 }
85456@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85457 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85458 const struct sockaddr *src)
85459 {
85460- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85461+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85462 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85463
85464 dsin->sin_family = ssin->sin_family;
85465@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85466 if (sa->sa_family != AF_INET6)
85467 return 0;
85468
85469- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85470+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85471 }
85472
85473 #endif /* _LINUX_SUNRPC_ADDR_H */
85474diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85475index 598ba80..d90cba6 100644
85476--- a/include/linux/sunrpc/clnt.h
85477+++ b/include/linux/sunrpc/clnt.h
85478@@ -100,7 +100,7 @@ struct rpc_procinfo {
85479 unsigned int p_timer; /* Which RTT timer to use */
85480 u32 p_statidx; /* Which procedure to account */
85481 const char * p_name; /* name of procedure */
85482-};
85483+} __do_const;
85484
85485 #ifdef __KERNEL__
85486
85487diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85488index 6f22cfe..9fd0909 100644
85489--- a/include/linux/sunrpc/svc.h
85490+++ b/include/linux/sunrpc/svc.h
85491@@ -420,7 +420,7 @@ struct svc_procedure {
85492 unsigned int pc_count; /* call count */
85493 unsigned int pc_cachetype; /* cache info (NFS) */
85494 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85495-};
85496+} __do_const;
85497
85498 /*
85499 * Function prototypes.
85500diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85501index 975da75..318c083 100644
85502--- a/include/linux/sunrpc/svc_rdma.h
85503+++ b/include/linux/sunrpc/svc_rdma.h
85504@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85505 extern unsigned int svcrdma_max_requests;
85506 extern unsigned int svcrdma_max_req_size;
85507
85508-extern atomic_t rdma_stat_recv;
85509-extern atomic_t rdma_stat_read;
85510-extern atomic_t rdma_stat_write;
85511-extern atomic_t rdma_stat_sq_starve;
85512-extern atomic_t rdma_stat_rq_starve;
85513-extern atomic_t rdma_stat_rq_poll;
85514-extern atomic_t rdma_stat_rq_prod;
85515-extern atomic_t rdma_stat_sq_poll;
85516-extern atomic_t rdma_stat_sq_prod;
85517+extern atomic_unchecked_t rdma_stat_recv;
85518+extern atomic_unchecked_t rdma_stat_read;
85519+extern atomic_unchecked_t rdma_stat_write;
85520+extern atomic_unchecked_t rdma_stat_sq_starve;
85521+extern atomic_unchecked_t rdma_stat_rq_starve;
85522+extern atomic_unchecked_t rdma_stat_rq_poll;
85523+extern atomic_unchecked_t rdma_stat_rq_prod;
85524+extern atomic_unchecked_t rdma_stat_sq_poll;
85525+extern atomic_unchecked_t rdma_stat_sq_prod;
85526
85527 #define RPCRDMA_VERSION 1
85528
85529diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85530index 8d71d65..f79586e 100644
85531--- a/include/linux/sunrpc/svcauth.h
85532+++ b/include/linux/sunrpc/svcauth.h
85533@@ -120,7 +120,7 @@ struct auth_ops {
85534 int (*release)(struct svc_rqst *rq);
85535 void (*domain_release)(struct auth_domain *);
85536 int (*set_client)(struct svc_rqst *rq);
85537-};
85538+} __do_const;
85539
85540 #define SVC_GARBAGE 1
85541 #define SVC_SYSERR 2
85542diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85543index e7a018e..49f8b17 100644
85544--- a/include/linux/swiotlb.h
85545+++ b/include/linux/swiotlb.h
85546@@ -60,7 +60,8 @@ extern void
85547
85548 extern void
85549 swiotlb_free_coherent(struct device *hwdev, size_t size,
85550- void *vaddr, dma_addr_t dma_handle);
85551+ void *vaddr, dma_addr_t dma_handle,
85552+ struct dma_attrs *attrs);
85553
85554 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85555 unsigned long offset, size_t size,
85556diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85557index 85893d7..e78c660 100644
85558--- a/include/linux/syscalls.h
85559+++ b/include/linux/syscalls.h
85560@@ -102,7 +102,12 @@ union bpf_attr;
85561 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85562 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85563 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85564-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85565+#define __SC_LONG(t, a) __typeof__( \
85566+ __builtin_choose_expr( \
85567+ sizeof(t) > sizeof(int), \
85568+ (t) 0, \
85569+ __builtin_choose_expr(__type_is_unsigned(t), 0UL, 0L) \
85570+ )) a
85571 #define __SC_CAST(t, a) (t) a
85572 #define __SC_ARGS(t, a) a
85573 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85574@@ -384,11 +389,11 @@ asmlinkage long sys_sync(void);
85575 asmlinkage long sys_fsync(unsigned int fd);
85576 asmlinkage long sys_fdatasync(unsigned int fd);
85577 asmlinkage long sys_bdflush(int func, long data);
85578-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85579- char __user *type, unsigned long flags,
85580+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85581+ const char __user *type, unsigned long flags,
85582 void __user *data);
85583-asmlinkage long sys_umount(char __user *name, int flags);
85584-asmlinkage long sys_oldumount(char __user *name);
85585+asmlinkage long sys_umount(const char __user *name, int flags);
85586+asmlinkage long sys_oldumount(const char __user *name);
85587 asmlinkage long sys_truncate(const char __user *path, long length);
85588 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85589 asmlinkage long sys_stat(const char __user *filename,
85590@@ -600,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85591 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85592 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85593 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85594- struct sockaddr __user *, int);
85595+ struct sockaddr __user *, int) __intentional_overflow(0);
85596 asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
85597 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85598 unsigned int vlen, unsigned flags);
85599diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85600index 27b3b0b..e093dd9 100644
85601--- a/include/linux/syscore_ops.h
85602+++ b/include/linux/syscore_ops.h
85603@@ -16,7 +16,7 @@ struct syscore_ops {
85604 int (*suspend)(void);
85605 void (*resume)(void);
85606 void (*shutdown)(void);
85607-};
85608+} __do_const;
85609
85610 extern void register_syscore_ops(struct syscore_ops *ops);
85611 extern void unregister_syscore_ops(struct syscore_ops *ops);
85612diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85613index b7361f8..341a15a 100644
85614--- a/include/linux/sysctl.h
85615+++ b/include/linux/sysctl.h
85616@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85617
85618 extern int proc_dostring(struct ctl_table *, int,
85619 void __user *, size_t *, loff_t *);
85620+extern int proc_dostring_modpriv(struct ctl_table *, int,
85621+ void __user *, size_t *, loff_t *);
85622 extern int proc_dointvec(struct ctl_table *, int,
85623 void __user *, size_t *, loff_t *);
85624 extern int proc_dointvec_minmax(struct ctl_table *, int,
85625@@ -113,7 +115,8 @@ struct ctl_table
85626 struct ctl_table_poll *poll;
85627 void *extra1;
85628 void *extra2;
85629-};
85630+} __do_const __randomize_layout;
85631+typedef struct ctl_table __no_const ctl_table_no_const;
85632
85633 struct ctl_node {
85634 struct rb_node node;
85635diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85636index ddad161..a3efd26 100644
85637--- a/include/linux/sysfs.h
85638+++ b/include/linux/sysfs.h
85639@@ -34,7 +34,8 @@ struct attribute {
85640 struct lock_class_key *key;
85641 struct lock_class_key skey;
85642 #endif
85643-};
85644+} __do_const;
85645+typedef struct attribute __no_const attribute_no_const;
85646
85647 /**
85648 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85649@@ -63,7 +64,8 @@ struct attribute_group {
85650 struct attribute *, int);
85651 struct attribute **attrs;
85652 struct bin_attribute **bin_attrs;
85653-};
85654+} __do_const;
85655+typedef struct attribute_group __no_const attribute_group_no_const;
85656
85657 /**
85658 * Use these macros to make defining attributes easier. See include/linux/device.h
85659@@ -137,7 +139,8 @@ struct bin_attribute {
85660 char *, loff_t, size_t);
85661 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85662 struct vm_area_struct *vma);
85663-};
85664+} __do_const;
85665+typedef struct bin_attribute __no_const bin_attribute_no_const;
85666
85667 /**
85668 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85669diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85670index 387fa7d..3fcde6b 100644
85671--- a/include/linux/sysrq.h
85672+++ b/include/linux/sysrq.h
85673@@ -16,6 +16,7 @@
85674
85675 #include <linux/errno.h>
85676 #include <linux/types.h>
85677+#include <linux/compiler.h>
85678
85679 /* Possible values of bitmask for enabling sysrq functions */
85680 /* 0x0001 is reserved for enable everything */
85681@@ -33,7 +34,7 @@ struct sysrq_key_op {
85682 char *help_msg;
85683 char *action_msg;
85684 int enable_mask;
85685-};
85686+} __do_const;
85687
85688 #ifdef CONFIG_MAGIC_SYSRQ
85689
85690diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85691index ff307b5..f1a4468 100644
85692--- a/include/linux/thread_info.h
85693+++ b/include/linux/thread_info.h
85694@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85695 #error "no set_restore_sigmask() provided and default one won't work"
85696 #endif
85697
85698+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85699+
85700+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85701+{
85702+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85703+}
85704+
85705 #endif /* __KERNEL__ */
85706
85707 #endif /* _LINUX_THREAD_INFO_H */
85708diff --git a/include/linux/tty.h b/include/linux/tty.h
85709index 7d66ae5..0327149 100644
85710--- a/include/linux/tty.h
85711+++ b/include/linux/tty.h
85712@@ -202,7 +202,7 @@ struct tty_port {
85713 const struct tty_port_operations *ops; /* Port operations */
85714 spinlock_t lock; /* Lock protecting tty field */
85715 int blocked_open; /* Waiting to open */
85716- int count; /* Usage count */
85717+ atomic_t count; /* Usage count */
85718 wait_queue_head_t open_wait; /* Open waiters */
85719 wait_queue_head_t close_wait; /* Close waiters */
85720 wait_queue_head_t delta_msr_wait; /* Modem status change */
85721@@ -290,7 +290,7 @@ struct tty_struct {
85722 /* If the tty has a pending do_SAK, queue it here - akpm */
85723 struct work_struct SAK_work;
85724 struct tty_port *port;
85725-};
85726+} __randomize_layout;
85727
85728 /* Each of a tty's open files has private_data pointing to tty_file_private */
85729 struct tty_file_private {
85730@@ -549,7 +549,7 @@ extern int tty_port_open(struct tty_port *port,
85731 struct tty_struct *tty, struct file *filp);
85732 static inline int tty_port_users(struct tty_port *port)
85733 {
85734- return port->count + port->blocked_open;
85735+ return atomic_read(&port->count) + port->blocked_open;
85736 }
85737
85738 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85739diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85740index 92e337c..f46757b 100644
85741--- a/include/linux/tty_driver.h
85742+++ b/include/linux/tty_driver.h
85743@@ -291,7 +291,7 @@ struct tty_operations {
85744 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85745 #endif
85746 const struct file_operations *proc_fops;
85747-};
85748+} __do_const __randomize_layout;
85749
85750 struct tty_driver {
85751 int magic; /* magic number for this structure */
85752@@ -325,7 +325,7 @@ struct tty_driver {
85753
85754 const struct tty_operations *ops;
85755 struct list_head tty_drivers;
85756-};
85757+} __randomize_layout;
85758
85759 extern struct list_head tty_drivers;
85760
85761diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85762index 00c9d68..bc0188b 100644
85763--- a/include/linux/tty_ldisc.h
85764+++ b/include/linux/tty_ldisc.h
85765@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85766
85767 struct module *owner;
85768
85769- int refcount;
85770+ atomic_t refcount;
85771 };
85772
85773 struct tty_ldisc {
85774diff --git a/include/linux/types.h b/include/linux/types.h
85775index a0bb704..f511c77 100644
85776--- a/include/linux/types.h
85777+++ b/include/linux/types.h
85778@@ -177,10 +177,26 @@ typedef struct {
85779 int counter;
85780 } atomic_t;
85781
85782+#ifdef CONFIG_PAX_REFCOUNT
85783+typedef struct {
85784+ int counter;
85785+} atomic_unchecked_t;
85786+#else
85787+typedef atomic_t atomic_unchecked_t;
85788+#endif
85789+
85790 #ifdef CONFIG_64BIT
85791 typedef struct {
85792 long counter;
85793 } atomic64_t;
85794+
85795+#ifdef CONFIG_PAX_REFCOUNT
85796+typedef struct {
85797+ long counter;
85798+} atomic64_unchecked_t;
85799+#else
85800+typedef atomic64_t atomic64_unchecked_t;
85801+#endif
85802 #endif
85803
85804 struct list_head {
85805diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85806index ecd3319..8a36ded 100644
85807--- a/include/linux/uaccess.h
85808+++ b/include/linux/uaccess.h
85809@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85810 long ret; \
85811 mm_segment_t old_fs = get_fs(); \
85812 \
85813- set_fs(KERNEL_DS); \
85814 pagefault_disable(); \
85815- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85816- pagefault_enable(); \
85817+ set_fs(KERNEL_DS); \
85818+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85819 set_fs(old_fs); \
85820+ pagefault_enable(); \
85821 ret; \
85822 })
85823
85824diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85825index 2d1f9b6..d7a9fce 100644
85826--- a/include/linux/uidgid.h
85827+++ b/include/linux/uidgid.h
85828@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85829
85830 #endif /* CONFIG_USER_NS */
85831
85832+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85833+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85834+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85835+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85836+
85837 #endif /* _LINUX_UIDGID_H */
85838diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
85839index 32c0e83..671eb35 100644
85840--- a/include/linux/uio_driver.h
85841+++ b/include/linux/uio_driver.h
85842@@ -67,7 +67,7 @@ struct uio_device {
85843 struct module *owner;
85844 struct device *dev;
85845 int minor;
85846- atomic_t event;
85847+ atomic_unchecked_t event;
85848 struct fasync_struct *async_queue;
85849 wait_queue_head_t wait;
85850 struct uio_info *info;
85851diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85852index 99c1b4d..562e6f3 100644
85853--- a/include/linux/unaligned/access_ok.h
85854+++ b/include/linux/unaligned/access_ok.h
85855@@ -4,34 +4,34 @@
85856 #include <linux/kernel.h>
85857 #include <asm/byteorder.h>
85858
85859-static inline u16 get_unaligned_le16(const void *p)
85860+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85861 {
85862- return le16_to_cpup((__le16 *)p);
85863+ return le16_to_cpup((const __le16 *)p);
85864 }
85865
85866-static inline u32 get_unaligned_le32(const void *p)
85867+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85868 {
85869- return le32_to_cpup((__le32 *)p);
85870+ return le32_to_cpup((const __le32 *)p);
85871 }
85872
85873-static inline u64 get_unaligned_le64(const void *p)
85874+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85875 {
85876- return le64_to_cpup((__le64 *)p);
85877+ return le64_to_cpup((const __le64 *)p);
85878 }
85879
85880-static inline u16 get_unaligned_be16(const void *p)
85881+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85882 {
85883- return be16_to_cpup((__be16 *)p);
85884+ return be16_to_cpup((const __be16 *)p);
85885 }
85886
85887-static inline u32 get_unaligned_be32(const void *p)
85888+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85889 {
85890- return be32_to_cpup((__be32 *)p);
85891+ return be32_to_cpup((const __be32 *)p);
85892 }
85893
85894-static inline u64 get_unaligned_be64(const void *p)
85895+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85896 {
85897- return be64_to_cpup((__be64 *)p);
85898+ return be64_to_cpup((const __be64 *)p);
85899 }
85900
85901 static inline void put_unaligned_le16(u16 val, void *p)
85902diff --git a/include/linux/usb.h b/include/linux/usb.h
85903index 058a769..c17a1c2c 100644
85904--- a/include/linux/usb.h
85905+++ b/include/linux/usb.h
85906@@ -566,7 +566,7 @@ struct usb_device {
85907 int maxchild;
85908
85909 u32 quirks;
85910- atomic_t urbnum;
85911+ atomic_unchecked_t urbnum;
85912
85913 unsigned long active_duration;
85914
85915@@ -1650,7 +1650,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85916
85917 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85918 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85919- void *data, __u16 size, int timeout);
85920+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85921 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85922 void *data, int len, int *actual_length, int timeout);
85923 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85924diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85925index 9fd9e48..e2c5f35 100644
85926--- a/include/linux/usb/renesas_usbhs.h
85927+++ b/include/linux/usb/renesas_usbhs.h
85928@@ -39,7 +39,7 @@ enum {
85929 */
85930 struct renesas_usbhs_driver_callback {
85931 int (*notify_hotplug)(struct platform_device *pdev);
85932-};
85933+} __no_const;
85934
85935 /*
85936 * callback functions for platform
85937diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85938index 8297e5b..0dfae27 100644
85939--- a/include/linux/user_namespace.h
85940+++ b/include/linux/user_namespace.h
85941@@ -39,7 +39,7 @@ struct user_namespace {
85942 struct key *persistent_keyring_register;
85943 struct rw_semaphore persistent_keyring_register_sem;
85944 #endif
85945-};
85946+} __randomize_layout;
85947
85948 extern struct user_namespace init_user_ns;
85949
85950diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85951index 5093f58..c103e58 100644
85952--- a/include/linux/utsname.h
85953+++ b/include/linux/utsname.h
85954@@ -25,7 +25,7 @@ struct uts_namespace {
85955 struct new_utsname name;
85956 struct user_namespace *user_ns;
85957 struct ns_common ns;
85958-};
85959+} __randomize_layout;
85960 extern struct uts_namespace init_uts_ns;
85961
85962 #ifdef CONFIG_UTS_NS
85963diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85964index 6f8fbcf..4efc177 100644
85965--- a/include/linux/vermagic.h
85966+++ b/include/linux/vermagic.h
85967@@ -25,9 +25,42 @@
85968 #define MODULE_ARCH_VERMAGIC ""
85969 #endif
85970
85971+#ifdef CONFIG_PAX_REFCOUNT
85972+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85973+#else
85974+#define MODULE_PAX_REFCOUNT ""
85975+#endif
85976+
85977+#ifdef CONSTIFY_PLUGIN
85978+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85979+#else
85980+#define MODULE_CONSTIFY_PLUGIN ""
85981+#endif
85982+
85983+#ifdef STACKLEAK_PLUGIN
85984+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85985+#else
85986+#define MODULE_STACKLEAK_PLUGIN ""
85987+#endif
85988+
85989+#ifdef RANDSTRUCT_PLUGIN
85990+#include <generated/randomize_layout_hash.h>
85991+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
85992+#else
85993+#define MODULE_RANDSTRUCT_PLUGIN
85994+#endif
85995+
85996+#ifdef CONFIG_GRKERNSEC
85997+#define MODULE_GRSEC "GRSEC "
85998+#else
85999+#define MODULE_GRSEC ""
86000+#endif
86001+
86002 #define VERMAGIC_STRING \
86003 UTS_RELEASE " " \
86004 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86005 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86006- MODULE_ARCH_VERMAGIC
86007+ MODULE_ARCH_VERMAGIC \
86008+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86009+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86010
86011diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86012index b483abd..af305ad 100644
86013--- a/include/linux/vga_switcheroo.h
86014+++ b/include/linux/vga_switcheroo.h
86015@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86016
86017 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86018
86019-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86020+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86021 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86022-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86023+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86024 #else
86025
86026 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86027@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86028
86029 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86030
86031-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86032+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86033 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86034-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86035+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86036
86037 #endif
86038 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86039diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86040index b87696f..1d11de7 100644
86041--- a/include/linux/vmalloc.h
86042+++ b/include/linux/vmalloc.h
86043@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86044 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86045 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86046 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86047+
86048+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86049+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86050+#endif
86051+
86052 /* bits [20..32] reserved for arch specific ioremap internals */
86053
86054 /*
86055@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86056 unsigned long flags, pgprot_t prot);
86057 extern void vunmap(const void *addr);
86058
86059+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86060+extern void unmap_process_stacks(struct task_struct *task);
86061+#endif
86062+
86063 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86064 unsigned long uaddr, void *kaddr,
86065 unsigned long size);
86066@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86067
86068 /* for /dev/kmem */
86069 extern long vread(char *buf, char *addr, unsigned long count);
86070-extern long vwrite(char *buf, char *addr, unsigned long count);
86071+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86072
86073 /*
86074 * Internals. Dont't use..
86075diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86076index 82e7db7..f8ce3d0 100644
86077--- a/include/linux/vmstat.h
86078+++ b/include/linux/vmstat.h
86079@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86080 /*
86081 * Zone based page accounting with per cpu differentials.
86082 */
86083-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86084+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86085
86086 static inline void zone_page_state_add(long x, struct zone *zone,
86087 enum zone_stat_item item)
86088 {
86089- atomic_long_add(x, &zone->vm_stat[item]);
86090- atomic_long_add(x, &vm_stat[item]);
86091+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86092+ atomic_long_add_unchecked(x, &vm_stat[item]);
86093 }
86094
86095-static inline unsigned long global_page_state(enum zone_stat_item item)
86096+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86097 {
86098- long x = atomic_long_read(&vm_stat[item]);
86099+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86100 #ifdef CONFIG_SMP
86101 if (x < 0)
86102 x = 0;
86103@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86104 return x;
86105 }
86106
86107-static inline unsigned long zone_page_state(struct zone *zone,
86108+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86109 enum zone_stat_item item)
86110 {
86111- long x = atomic_long_read(&zone->vm_stat[item]);
86112+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86113 #ifdef CONFIG_SMP
86114 if (x < 0)
86115 x = 0;
86116@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86117 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86118 enum zone_stat_item item)
86119 {
86120- long x = atomic_long_read(&zone->vm_stat[item]);
86121+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86122
86123 #ifdef CONFIG_SMP
86124 int cpu;
86125@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86126
86127 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86128 {
86129- atomic_long_inc(&zone->vm_stat[item]);
86130- atomic_long_inc(&vm_stat[item]);
86131+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86132+ atomic_long_inc_unchecked(&vm_stat[item]);
86133 }
86134
86135 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86136 {
86137- atomic_long_dec(&zone->vm_stat[item]);
86138- atomic_long_dec(&vm_stat[item]);
86139+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86140+ atomic_long_dec_unchecked(&vm_stat[item]);
86141 }
86142
86143 static inline void __inc_zone_page_state(struct page *page,
86144diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86145index 91b0a68..0e9adf6 100644
86146--- a/include/linux/xattr.h
86147+++ b/include/linux/xattr.h
86148@@ -28,7 +28,7 @@ struct xattr_handler {
86149 size_t size, int handler_flags);
86150 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86151 size_t size, int flags, int handler_flags);
86152-};
86153+} __do_const;
86154
86155 struct xattr {
86156 const char *name;
86157@@ -37,6 +37,9 @@ struct xattr {
86158 };
86159
86160 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86161+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86162+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86163+#endif
86164 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86165 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86166 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86167diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86168index 92dbbd3..13ab0b3 100644
86169--- a/include/linux/zlib.h
86170+++ b/include/linux/zlib.h
86171@@ -31,6 +31,7 @@
86172 #define _ZLIB_H
86173
86174 #include <linux/zconf.h>
86175+#include <linux/compiler.h>
86176
86177 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86178 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86179@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86180
86181 /* basic functions */
86182
86183-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86184+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86185 /*
86186 Returns the number of bytes that needs to be allocated for a per-
86187 stream workspace with the specified parameters. A pointer to this
86188diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86189index eb76cfd..9fd0e7c 100644
86190--- a/include/media/v4l2-dev.h
86191+++ b/include/media/v4l2-dev.h
86192@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86193 int (*mmap) (struct file *, struct vm_area_struct *);
86194 int (*open) (struct file *);
86195 int (*release) (struct file *);
86196-};
86197+} __do_const;
86198
86199 /*
86200 * Newer version of video_device, handled by videodev2.c
86201diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86202index ffb69da..040393e 100644
86203--- a/include/media/v4l2-device.h
86204+++ b/include/media/v4l2-device.h
86205@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86206 this function returns 0. If the name ends with a digit (e.g. cx18),
86207 then the name will be set to cx18-0 since cx180 looks really odd. */
86208 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86209- atomic_t *instance);
86210+ atomic_unchecked_t *instance);
86211
86212 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86213 Since the parent disappears this ensures that v4l2_dev doesn't have an
86214diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86215index 2a25dec..bf6dd8a 100644
86216--- a/include/net/9p/transport.h
86217+++ b/include/net/9p/transport.h
86218@@ -62,7 +62,7 @@ struct p9_trans_module {
86219 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86220 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86221 char *, char *, int , int, int, int);
86222-};
86223+} __do_const;
86224
86225 void v9fs_register_trans(struct p9_trans_module *m);
86226 void v9fs_unregister_trans(struct p9_trans_module *m);
86227diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86228index a175ba4..196eb8242 100644
86229--- a/include/net/af_unix.h
86230+++ b/include/net/af_unix.h
86231@@ -36,7 +36,7 @@ struct unix_skb_parms {
86232 u32 secid; /* Security ID */
86233 #endif
86234 u32 consumed;
86235-};
86236+} __randomize_layout;
86237
86238 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86239 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86240diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86241index d1bb342..e12f7d2 100644
86242--- a/include/net/bluetooth/l2cap.h
86243+++ b/include/net/bluetooth/l2cap.h
86244@@ -608,7 +608,7 @@ struct l2cap_ops {
86245 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
86246 unsigned long hdr_len,
86247 unsigned long len, int nb);
86248-};
86249+} __do_const;
86250
86251 struct l2cap_conn {
86252 struct hci_conn *hcon;
86253diff --git a/include/net/bonding.h b/include/net/bonding.h
86254index 983a94b..7aa9b16 100644
86255--- a/include/net/bonding.h
86256+++ b/include/net/bonding.h
86257@@ -647,7 +647,7 @@ extern struct rtnl_link_ops bond_link_ops;
86258
86259 static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb)
86260 {
86261- atomic_long_inc(&dev->tx_dropped);
86262+ atomic_long_inc_unchecked(&dev->tx_dropped);
86263 dev_kfree_skb_any(skb);
86264 }
86265
86266diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86267index f2ae33d..c457cf0 100644
86268--- a/include/net/caif/cfctrl.h
86269+++ b/include/net/caif/cfctrl.h
86270@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86271 void (*radioset_rsp)(void);
86272 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86273 struct cflayer *client_layer);
86274-};
86275+} __no_const;
86276
86277 /* Link Setup Parameters for CAIF-Links. */
86278 struct cfctrl_link_param {
86279@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86280 struct cfctrl {
86281 struct cfsrvl serv;
86282 struct cfctrl_rsp res;
86283- atomic_t req_seq_no;
86284- atomic_t rsp_seq_no;
86285+ atomic_unchecked_t req_seq_no;
86286+ atomic_unchecked_t rsp_seq_no;
86287 struct list_head list;
86288 /* Protects from simultaneous access to first_req list */
86289 spinlock_t info_list_lock;
86290diff --git a/include/net/flow.h b/include/net/flow.h
86291index 8109a15..504466d 100644
86292--- a/include/net/flow.h
86293+++ b/include/net/flow.h
86294@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86295
86296 void flow_cache_flush(struct net *net);
86297 void flow_cache_flush_deferred(struct net *net);
86298-extern atomic_t flow_cache_genid;
86299+extern atomic_unchecked_t flow_cache_genid;
86300
86301 #endif
86302diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86303index 6c92415..3a352d8 100644
86304--- a/include/net/genetlink.h
86305+++ b/include/net/genetlink.h
86306@@ -130,7 +130,7 @@ struct genl_ops {
86307 u8 cmd;
86308 u8 internal_flags;
86309 u8 flags;
86310-};
86311+} __do_const;
86312
86313 int __genl_register_family(struct genl_family *family);
86314
86315diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86316index 734d9b5..48a9a4b 100644
86317--- a/include/net/gro_cells.h
86318+++ b/include/net/gro_cells.h
86319@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86320 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86321
86322 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86323- atomic_long_inc(&dev->rx_dropped);
86324+ atomic_long_inc_unchecked(&dev->rx_dropped);
86325 kfree_skb(skb);
86326 return;
86327 }
86328diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86329index 848e85c..051c7de 100644
86330--- a/include/net/inet_connection_sock.h
86331+++ b/include/net/inet_connection_sock.h
86332@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86333 int (*bind_conflict)(const struct sock *sk,
86334 const struct inet_bind_bucket *tb, bool relax);
86335 void (*mtu_reduced)(struct sock *sk);
86336-};
86337+} __do_const;
86338
86339 /** inet_connection_sock - INET connection oriented sock
86340 *
86341diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86342index 80479ab..0c3f647 100644
86343--- a/include/net/inetpeer.h
86344+++ b/include/net/inetpeer.h
86345@@ -47,7 +47,7 @@ struct inet_peer {
86346 */
86347 union {
86348 struct {
86349- atomic_t rid; /* Frag reception counter */
86350+ atomic_unchecked_t rid; /* Frag reception counter */
86351 };
86352 struct rcu_head rcu;
86353 struct inet_peer *gc_next;
86354diff --git a/include/net/ip.h b/include/net/ip.h
86355index 09cf5ae..ab62fcf 100644
86356--- a/include/net/ip.h
86357+++ b/include/net/ip.h
86358@@ -317,7 +317,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86359 }
86360 }
86361
86362-u32 ip_idents_reserve(u32 hash, int segs);
86363+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86364 void __ip_select_ident(struct iphdr *iph, int segs);
86365
86366 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86367diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86368index 09a819e..3ab9e14 100644
86369--- a/include/net/ip_fib.h
86370+++ b/include/net/ip_fib.h
86371@@ -170,7 +170,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86372
86373 #define FIB_RES_SADDR(net, res) \
86374 ((FIB_RES_NH(res).nh_saddr_genid == \
86375- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86376+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86377 FIB_RES_NH(res).nh_saddr : \
86378 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86379 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86380diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86381index 615b20b..fd4cbd8 100644
86382--- a/include/net/ip_vs.h
86383+++ b/include/net/ip_vs.h
86384@@ -534,7 +534,7 @@ struct ip_vs_conn {
86385 struct ip_vs_conn *control; /* Master control connection */
86386 atomic_t n_control; /* Number of controlled ones */
86387 struct ip_vs_dest *dest; /* real server */
86388- atomic_t in_pkts; /* incoming packet counter */
86389+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86390
86391 /* Packet transmitter for different forwarding methods. If it
86392 * mangles the packet, it must return NF_DROP or better NF_STOLEN,
86393@@ -682,7 +682,7 @@ struct ip_vs_dest {
86394 __be16 port; /* port number of the server */
86395 union nf_inet_addr addr; /* IP address of the server */
86396 volatile unsigned int flags; /* dest status flags */
86397- atomic_t conn_flags; /* flags to copy to conn */
86398+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86399 atomic_t weight; /* server weight */
86400
86401 atomic_t refcnt; /* reference counter */
86402@@ -928,11 +928,11 @@ struct netns_ipvs {
86403 /* ip_vs_lblc */
86404 int sysctl_lblc_expiration;
86405 struct ctl_table_header *lblc_ctl_header;
86406- struct ctl_table *lblc_ctl_table;
86407+ ctl_table_no_const *lblc_ctl_table;
86408 /* ip_vs_lblcr */
86409 int sysctl_lblcr_expiration;
86410 struct ctl_table_header *lblcr_ctl_header;
86411- struct ctl_table *lblcr_ctl_table;
86412+ ctl_table_no_const *lblcr_ctl_table;
86413 /* ip_vs_est */
86414 struct list_head est_list; /* estimator list */
86415 spinlock_t est_lock;
86416diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86417index 8d4f588..2e37ad2 100644
86418--- a/include/net/irda/ircomm_tty.h
86419+++ b/include/net/irda/ircomm_tty.h
86420@@ -33,6 +33,7 @@
86421 #include <linux/termios.h>
86422 #include <linux/timer.h>
86423 #include <linux/tty.h> /* struct tty_struct */
86424+#include <asm/local.h>
86425
86426 #include <net/irda/irias_object.h>
86427 #include <net/irda/ircomm_core.h>
86428diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86429index 714cc9a..ea05f3e 100644
86430--- a/include/net/iucv/af_iucv.h
86431+++ b/include/net/iucv/af_iucv.h
86432@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86433 struct iucv_sock_list {
86434 struct hlist_head head;
86435 rwlock_t lock;
86436- atomic_t autobind_name;
86437+ atomic_unchecked_t autobind_name;
86438 };
86439
86440 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86441diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86442index f3be818..bf46196 100644
86443--- a/include/net/llc_c_ac.h
86444+++ b/include/net/llc_c_ac.h
86445@@ -87,7 +87,7 @@
86446 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86447 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86448
86449-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86450+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86451
86452 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86453 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86454diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86455index 3948cf1..83b28c4 100644
86456--- a/include/net/llc_c_ev.h
86457+++ b/include/net/llc_c_ev.h
86458@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86459 return (struct llc_conn_state_ev *)skb->cb;
86460 }
86461
86462-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86463-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86464+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86465+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86466
86467 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86468 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86469diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86470index 48f3f89..0e92c50 100644
86471--- a/include/net/llc_c_st.h
86472+++ b/include/net/llc_c_st.h
86473@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86474 u8 next_state;
86475 const llc_conn_ev_qfyr_t *ev_qualifiers;
86476 const llc_conn_action_t *ev_actions;
86477-};
86478+} __do_const;
86479
86480 struct llc_conn_state {
86481 u8 current_state;
86482diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86483index a61b98c..aade1eb 100644
86484--- a/include/net/llc_s_ac.h
86485+++ b/include/net/llc_s_ac.h
86486@@ -23,7 +23,7 @@
86487 #define SAP_ACT_TEST_IND 9
86488
86489 /* All action functions must look like this */
86490-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86491+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86492
86493 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86494 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86495diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86496index c4359e2..76dbc4a 100644
86497--- a/include/net/llc_s_st.h
86498+++ b/include/net/llc_s_st.h
86499@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86500 llc_sap_ev_t ev;
86501 u8 next_state;
86502 const llc_sap_action_t *ev_actions;
86503-};
86504+} __do_const;
86505
86506 struct llc_sap_state {
86507 u8 curr_state;
86508diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86509index 29c7be8..746bd73 100644
86510--- a/include/net/mac80211.h
86511+++ b/include/net/mac80211.h
86512@@ -4869,7 +4869,7 @@ struct rate_control_ops {
86513 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86514
86515 u32 (*get_expected_throughput)(void *priv_sta);
86516-};
86517+} __do_const;
86518
86519 static inline int rate_supported(struct ieee80211_sta *sta,
86520 enum ieee80211_band band,
86521diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86522index 76f7084..8f36e39 100644
86523--- a/include/net/neighbour.h
86524+++ b/include/net/neighbour.h
86525@@ -163,7 +163,7 @@ struct neigh_ops {
86526 void (*error_report)(struct neighbour *, struct sk_buff *);
86527 int (*output)(struct neighbour *, struct sk_buff *);
86528 int (*connected_output)(struct neighbour *, struct sk_buff *);
86529-};
86530+} __do_const;
86531
86532 struct pneigh_entry {
86533 struct pneigh_entry *next;
86534@@ -217,7 +217,7 @@ struct neigh_table {
86535 struct neigh_statistics __percpu *stats;
86536 struct neigh_hash_table __rcu *nht;
86537 struct pneigh_entry **phash_buckets;
86538-};
86539+} __randomize_layout;
86540
86541 enum {
86542 NEIGH_ARP_TABLE = 0,
86543diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86544index 2e8756b8..0bd0083 100644
86545--- a/include/net/net_namespace.h
86546+++ b/include/net/net_namespace.h
86547@@ -130,8 +130,8 @@ struct net {
86548 struct netns_ipvs *ipvs;
86549 #endif
86550 struct sock *diag_nlsk;
86551- atomic_t fnhe_genid;
86552-};
86553+ atomic_unchecked_t fnhe_genid;
86554+} __randomize_layout;
86555
86556 #include <linux/seq_file_net.h>
86557
86558@@ -287,7 +287,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86559 #define __net_init __init
86560 #define __net_exit __exit_refok
86561 #define __net_initdata __initdata
86562+#ifdef CONSTIFY_PLUGIN
86563 #define __net_initconst __initconst
86564+#else
86565+#define __net_initconst __initdata
86566+#endif
86567 #endif
86568
86569 struct pernet_operations {
86570@@ -297,7 +301,7 @@ struct pernet_operations {
86571 void (*exit_batch)(struct list_head *net_exit_list);
86572 int *id;
86573 size_t size;
86574-};
86575+} __do_const;
86576
86577 /*
86578 * Use these carefully. If you implement a network device and it
86579@@ -345,12 +349,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86580
86581 static inline int rt_genid_ipv4(struct net *net)
86582 {
86583- return atomic_read(&net->ipv4.rt_genid);
86584+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86585 }
86586
86587 static inline void rt_genid_bump_ipv4(struct net *net)
86588 {
86589- atomic_inc(&net->ipv4.rt_genid);
86590+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86591 }
86592
86593 extern void (*__fib6_flush_trees)(struct net *net);
86594@@ -377,12 +381,12 @@ static inline void rt_genid_bump_all(struct net *net)
86595
86596 static inline int fnhe_genid(struct net *net)
86597 {
86598- return atomic_read(&net->fnhe_genid);
86599+ return atomic_read_unchecked(&net->fnhe_genid);
86600 }
86601
86602 static inline void fnhe_genid_bump(struct net *net)
86603 {
86604- atomic_inc(&net->fnhe_genid);
86605+ atomic_inc_unchecked(&net->fnhe_genid);
86606 }
86607
86608 #endif /* __NET_NET_NAMESPACE_H */
86609diff --git a/include/net/netlink.h b/include/net/netlink.h
86610index 6415835..ab96d87 100644
86611--- a/include/net/netlink.h
86612+++ b/include/net/netlink.h
86613@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86614 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86615 {
86616 if (mark)
86617- skb_trim(skb, (unsigned char *) mark - skb->data);
86618+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86619 }
86620
86621 /**
86622diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86623index 29d6a94..235d3d84 100644
86624--- a/include/net/netns/conntrack.h
86625+++ b/include/net/netns/conntrack.h
86626@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86627 struct nf_proto_net {
86628 #ifdef CONFIG_SYSCTL
86629 struct ctl_table_header *ctl_table_header;
86630- struct ctl_table *ctl_table;
86631+ ctl_table_no_const *ctl_table;
86632 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86633 struct ctl_table_header *ctl_compat_header;
86634- struct ctl_table *ctl_compat_table;
86635+ ctl_table_no_const *ctl_compat_table;
86636 #endif
86637 #endif
86638 unsigned int users;
86639@@ -60,7 +60,7 @@ struct nf_ip_net {
86640 struct nf_icmp_net icmpv6;
86641 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86642 struct ctl_table_header *ctl_table_header;
86643- struct ctl_table *ctl_table;
86644+ ctl_table_no_const *ctl_table;
86645 #endif
86646 };
86647
86648diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86649index 0ffef1a..2ce1ceb 100644
86650--- a/include/net/netns/ipv4.h
86651+++ b/include/net/netns/ipv4.h
86652@@ -84,7 +84,7 @@ struct netns_ipv4 {
86653
86654 struct ping_group_range ping_group_range;
86655
86656- atomic_t dev_addr_genid;
86657+ atomic_unchecked_t dev_addr_genid;
86658
86659 #ifdef CONFIG_SYSCTL
86660 unsigned long *sysctl_local_reserved_ports;
86661@@ -98,6 +98,6 @@ struct netns_ipv4 {
86662 struct fib_rules_ops *mr_rules_ops;
86663 #endif
86664 #endif
86665- atomic_t rt_genid;
86666+ atomic_unchecked_t rt_genid;
86667 };
86668 #endif
86669diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86670index 69ae41f..4f94868 100644
86671--- a/include/net/netns/ipv6.h
86672+++ b/include/net/netns/ipv6.h
86673@@ -75,8 +75,8 @@ struct netns_ipv6 {
86674 struct fib_rules_ops *mr6_rules_ops;
86675 #endif
86676 #endif
86677- atomic_t dev_addr_genid;
86678- atomic_t fib6_sernum;
86679+ atomic_unchecked_t dev_addr_genid;
86680+ atomic_unchecked_t fib6_sernum;
86681 };
86682
86683 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86684diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86685index 730d82a..045f2c4 100644
86686--- a/include/net/netns/xfrm.h
86687+++ b/include/net/netns/xfrm.h
86688@@ -78,7 +78,7 @@ struct netns_xfrm {
86689
86690 /* flow cache part */
86691 struct flow_cache flow_cache_global;
86692- atomic_t flow_cache_genid;
86693+ atomic_unchecked_t flow_cache_genid;
86694 struct list_head flow_cache_gc_list;
86695 spinlock_t flow_cache_gc_lock;
86696 struct work_struct flow_cache_gc_work;
86697diff --git a/include/net/ping.h b/include/net/ping.h
86698index f074060..830fba0 100644
86699--- a/include/net/ping.h
86700+++ b/include/net/ping.h
86701@@ -54,7 +54,7 @@ struct ping_iter_state {
86702
86703 extern struct proto ping_prot;
86704 #if IS_ENABLED(CONFIG_IPV6)
86705-extern struct pingv6_ops pingv6_ops;
86706+extern struct pingv6_ops *pingv6_ops;
86707 #endif
86708
86709 struct pingfakehdr {
86710diff --git a/include/net/protocol.h b/include/net/protocol.h
86711index d6fcc1f..ca277058 100644
86712--- a/include/net/protocol.h
86713+++ b/include/net/protocol.h
86714@@ -49,7 +49,7 @@ struct net_protocol {
86715 * socket lookup?
86716 */
86717 icmp_strict_tag_validation:1;
86718-};
86719+} __do_const;
86720
86721 #if IS_ENABLED(CONFIG_IPV6)
86722 struct inet6_protocol {
86723@@ -62,7 +62,7 @@ struct inet6_protocol {
86724 u8 type, u8 code, int offset,
86725 __be32 info);
86726 unsigned int flags; /* INET6_PROTO_xxx */
86727-};
86728+} __do_const;
86729
86730 #define INET6_PROTO_NOPOLICY 0x1
86731 #define INET6_PROTO_FINAL 0x2
86732diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86733index e21b9f9..0191ef0 100644
86734--- a/include/net/rtnetlink.h
86735+++ b/include/net/rtnetlink.h
86736@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86737 int (*fill_slave_info)(struct sk_buff *skb,
86738 const struct net_device *dev,
86739 const struct net_device *slave_dev);
86740-};
86741+} __do_const;
86742
86743 int __rtnl_link_register(struct rtnl_link_ops *ops);
86744 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86745diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86746index 4a5b9a3..ca27d73 100644
86747--- a/include/net/sctp/checksum.h
86748+++ b/include/net/sctp/checksum.h
86749@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86750 unsigned int offset)
86751 {
86752 struct sctphdr *sh = sctp_hdr(skb);
86753- __le32 ret, old = sh->checksum;
86754- const struct skb_checksum_ops ops = {
86755+ __le32 ret, old = sh->checksum;
86756+ static const struct skb_checksum_ops ops = {
86757 .update = sctp_csum_update,
86758 .combine = sctp_csum_combine,
86759 };
86760diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86761index 487ef34..d457f98 100644
86762--- a/include/net/sctp/sm.h
86763+++ b/include/net/sctp/sm.h
86764@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86765 typedef struct {
86766 sctp_state_fn_t *fn;
86767 const char *name;
86768-} sctp_sm_table_entry_t;
86769+} __do_const sctp_sm_table_entry_t;
86770
86771 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86772 * currently in use.
86773@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86774 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86775
86776 /* Extern declarations for major data structures. */
86777-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86778+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86779
86780
86781 /* Get the size of a DATA chunk payload. */
86782diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86783index 2bb2fcf..d17c291 100644
86784--- a/include/net/sctp/structs.h
86785+++ b/include/net/sctp/structs.h
86786@@ -509,7 +509,7 @@ struct sctp_pf {
86787 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86788 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86789 struct sctp_af *af;
86790-};
86791+} __do_const;
86792
86793
86794 /* Structure to track chunk fragments that have been acked, but peer
86795diff --git a/include/net/sock.h b/include/net/sock.h
86796index 2210fec..2249ad0 100644
86797--- a/include/net/sock.h
86798+++ b/include/net/sock.h
86799@@ -362,7 +362,7 @@ struct sock {
86800 unsigned int sk_napi_id;
86801 unsigned int sk_ll_usec;
86802 #endif
86803- atomic_t sk_drops;
86804+ atomic_unchecked_t sk_drops;
86805 int sk_rcvbuf;
86806
86807 struct sk_filter __rcu *sk_filter;
86808@@ -1061,7 +1061,7 @@ struct proto {
86809 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86810 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86811 #endif
86812-};
86813+} __randomize_layout;
86814
86815 /*
86816 * Bits in struct cg_proto.flags
86817@@ -1239,7 +1239,7 @@ static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
86818 page_counter_uncharge(&prot->memory_allocated, amt);
86819 }
86820
86821-static inline long
86822+static inline long __intentional_overflow(-1)
86823 sk_memory_allocated(const struct sock *sk)
86824 {
86825 struct proto *prot = sk->sk_prot;
86826@@ -1385,7 +1385,7 @@ struct sock_iocb {
86827 struct scm_cookie *scm;
86828 struct msghdr *msg, async_msg;
86829 struct kiocb *kiocb;
86830-};
86831+} __randomize_layout;
86832
86833 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86834 {
86835@@ -1826,7 +1826,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86836 }
86837
86838 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86839- char __user *from, char *to,
86840+ char __user *from, unsigned char *to,
86841 int copy, int offset)
86842 {
86843 if (skb->ip_summed == CHECKSUM_NONE) {
86844@@ -2075,7 +2075,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86845 }
86846 }
86847
86848-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86849+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86850
86851 /**
86852 * sk_page_frag - return an appropriate page_frag
86853diff --git a/include/net/tcp.h b/include/net/tcp.h
86854index 9d9111e..349c847 100644
86855--- a/include/net/tcp.h
86856+++ b/include/net/tcp.h
86857@@ -516,7 +516,7 @@ void tcp_retransmit_timer(struct sock *sk);
86858 void tcp_xmit_retransmit_queue(struct sock *);
86859 void tcp_simple_retransmit(struct sock *);
86860 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86861-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86862+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86863
86864 void tcp_send_probe0(struct sock *);
86865 void tcp_send_partial(struct sock *);
86866@@ -689,8 +689,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
86867 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
86868 */
86869 struct tcp_skb_cb {
86870- __u32 seq; /* Starting sequence number */
86871- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86872+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86873+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86874 union {
86875 /* Note : tcp_tw_isn is used in input path only
86876 * (isn chosen by tcp_timewait_state_process())
86877@@ -715,7 +715,7 @@ struct tcp_skb_cb {
86878
86879 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86880 /* 1 byte hole */
86881- __u32 ack_seq; /* Sequence number ACK'd */
86882+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86883 union {
86884 struct inet_skb_parm h4;
86885 #if IS_ENABLED(CONFIG_IPV6)
86886diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86887index dc4865e..152ee4c 100644
86888--- a/include/net/xfrm.h
86889+++ b/include/net/xfrm.h
86890@@ -285,7 +285,6 @@ struct xfrm_dst;
86891 struct xfrm_policy_afinfo {
86892 unsigned short family;
86893 struct dst_ops *dst_ops;
86894- void (*garbage_collect)(struct net *net);
86895 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86896 const xfrm_address_t *saddr,
86897 const xfrm_address_t *daddr);
86898@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86899 struct net_device *dev,
86900 const struct flowi *fl);
86901 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86902-};
86903+} __do_const;
86904
86905 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86906 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86907@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86908 int (*transport_finish)(struct sk_buff *skb,
86909 int async);
86910 void (*local_error)(struct sk_buff *skb, u32 mtu);
86911-};
86912+} __do_const;
86913
86914 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86915 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86916@@ -437,7 +436,7 @@ struct xfrm_mode {
86917 struct module *owner;
86918 unsigned int encap;
86919 int flags;
86920-};
86921+} __do_const;
86922
86923 /* Flags for xfrm_mode. */
86924 enum {
86925@@ -534,7 +533,7 @@ struct xfrm_policy {
86926 struct timer_list timer;
86927
86928 struct flow_cache_object flo;
86929- atomic_t genid;
86930+ atomic_unchecked_t genid;
86931 u32 priority;
86932 u32 index;
86933 struct xfrm_mark mark;
86934@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86935 }
86936
86937 void xfrm_garbage_collect(struct net *net);
86938+void xfrm_garbage_collect_deferred(struct net *net);
86939
86940 #else
86941
86942@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86943 static inline void xfrm_garbage_collect(struct net *net)
86944 {
86945 }
86946+static inline void xfrm_garbage_collect_deferred(struct net *net)
86947+{
86948+}
86949 #endif
86950
86951 static __inline__
86952diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86953index 1017e0b..227aa4d 100644
86954--- a/include/rdma/iw_cm.h
86955+++ b/include/rdma/iw_cm.h
86956@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86957 int backlog);
86958
86959 int (*destroy_listen)(struct iw_cm_id *cm_id);
86960-};
86961+} __no_const;
86962
86963 /**
86964 * iw_create_cm_id - Create an IW CM identifier.
86965diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86966index 93d14da..734b3d8 100644
86967--- a/include/scsi/libfc.h
86968+++ b/include/scsi/libfc.h
86969@@ -771,6 +771,7 @@ struct libfc_function_template {
86970 */
86971 void (*disc_stop_final) (struct fc_lport *);
86972 };
86973+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86974
86975 /**
86976 * struct fc_disc - Discovery context
86977@@ -875,7 +876,7 @@ struct fc_lport {
86978 struct fc_vport *vport;
86979
86980 /* Operational Information */
86981- struct libfc_function_template tt;
86982+ libfc_function_template_no_const tt;
86983 u8 link_up;
86984 u8 qfull;
86985 enum fc_lport_state state;
86986diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86987index 3a4edd1..feb2e3e 100644
86988--- a/include/scsi/scsi_device.h
86989+++ b/include/scsi/scsi_device.h
86990@@ -185,9 +185,9 @@ struct scsi_device {
86991 unsigned int max_device_blocked; /* what device_blocked counts down from */
86992 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
86993
86994- atomic_t iorequest_cnt;
86995- atomic_t iodone_cnt;
86996- atomic_t ioerr_cnt;
86997+ atomic_unchecked_t iorequest_cnt;
86998+ atomic_unchecked_t iodone_cnt;
86999+ atomic_unchecked_t ioerr_cnt;
87000
87001 struct device sdev_gendev,
87002 sdev_dev;
87003diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87004index 007a0bc..7188db8 100644
87005--- a/include/scsi/scsi_transport_fc.h
87006+++ b/include/scsi/scsi_transport_fc.h
87007@@ -756,7 +756,8 @@ struct fc_function_template {
87008 unsigned long show_host_system_hostname:1;
87009
87010 unsigned long disable_target_scan:1;
87011-};
87012+} __do_const;
87013+typedef struct fc_function_template __no_const fc_function_template_no_const;
87014
87015
87016 /**
87017diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87018index 396e8f7..b037e89 100644
87019--- a/include/sound/compress_driver.h
87020+++ b/include/sound/compress_driver.h
87021@@ -129,7 +129,7 @@ struct snd_compr_ops {
87022 struct snd_compr_caps *caps);
87023 int (*get_codec_caps) (struct snd_compr_stream *stream,
87024 struct snd_compr_codec_caps *codec);
87025-};
87026+} __no_const;
87027
87028 /**
87029 * struct snd_compr: Compressed device
87030diff --git a/include/sound/soc.h b/include/sound/soc.h
87031index ac8b333..59c3692 100644
87032--- a/include/sound/soc.h
87033+++ b/include/sound/soc.h
87034@@ -853,7 +853,7 @@ struct snd_soc_codec_driver {
87035 enum snd_soc_dapm_type, int);
87036
87037 bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
87038-};
87039+} __do_const;
87040
87041 /* SoC platform interface */
87042 struct snd_soc_platform_driver {
87043@@ -880,7 +880,7 @@ struct snd_soc_platform_driver {
87044 const struct snd_compr_ops *compr_ops;
87045
87046 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87047-};
87048+} __do_const;
87049
87050 struct snd_soc_dai_link_component {
87051 const char *name;
87052diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87053index 672150b..9d4bec4 100644
87054--- a/include/target/target_core_base.h
87055+++ b/include/target/target_core_base.h
87056@@ -767,7 +767,7 @@ struct se_device {
87057 atomic_long_t write_bytes;
87058 /* Active commands on this virtual SE device */
87059 atomic_t simple_cmds;
87060- atomic_t dev_ordered_id;
87061+ atomic_unchecked_t dev_ordered_id;
87062 atomic_t dev_ordered_sync;
87063 atomic_t dev_qf_count;
87064 int export_count;
87065diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87066new file mode 100644
87067index 0000000..fb634b7
87068--- /dev/null
87069+++ b/include/trace/events/fs.h
87070@@ -0,0 +1,53 @@
87071+#undef TRACE_SYSTEM
87072+#define TRACE_SYSTEM fs
87073+
87074+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87075+#define _TRACE_FS_H
87076+
87077+#include <linux/fs.h>
87078+#include <linux/tracepoint.h>
87079+
87080+TRACE_EVENT(do_sys_open,
87081+
87082+ TP_PROTO(const char *filename, int flags, int mode),
87083+
87084+ TP_ARGS(filename, flags, mode),
87085+
87086+ TP_STRUCT__entry(
87087+ __string( filename, filename )
87088+ __field( int, flags )
87089+ __field( int, mode )
87090+ ),
87091+
87092+ TP_fast_assign(
87093+ __assign_str(filename, filename);
87094+ __entry->flags = flags;
87095+ __entry->mode = mode;
87096+ ),
87097+
87098+ TP_printk("\"%s\" %x %o",
87099+ __get_str(filename), __entry->flags, __entry->mode)
87100+);
87101+
87102+TRACE_EVENT(open_exec,
87103+
87104+ TP_PROTO(const char *filename),
87105+
87106+ TP_ARGS(filename),
87107+
87108+ TP_STRUCT__entry(
87109+ __string( filename, filename )
87110+ ),
87111+
87112+ TP_fast_assign(
87113+ __assign_str(filename, filename);
87114+ ),
87115+
87116+ TP_printk("\"%s\"",
87117+ __get_str(filename))
87118+);
87119+
87120+#endif /* _TRACE_FS_H */
87121+
87122+/* This part must be outside protection */
87123+#include <trace/define_trace.h>
87124diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87125index 3608beb..df39d8a 100644
87126--- a/include/trace/events/irq.h
87127+++ b/include/trace/events/irq.h
87128@@ -36,7 +36,7 @@ struct softirq_action;
87129 */
87130 TRACE_EVENT(irq_handler_entry,
87131
87132- TP_PROTO(int irq, struct irqaction *action),
87133+ TP_PROTO(int irq, const struct irqaction *action),
87134
87135 TP_ARGS(irq, action),
87136
87137@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87138 */
87139 TRACE_EVENT(irq_handler_exit,
87140
87141- TP_PROTO(int irq, struct irqaction *action, int ret),
87142+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87143
87144 TP_ARGS(irq, action, ret),
87145
87146diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87147index 7caf44c..23c6f27 100644
87148--- a/include/uapi/linux/a.out.h
87149+++ b/include/uapi/linux/a.out.h
87150@@ -39,6 +39,14 @@ enum machine_type {
87151 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87152 };
87153
87154+/* Constants for the N_FLAGS field */
87155+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87156+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87157+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87158+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87159+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87160+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87161+
87162 #if !defined (N_MAGIC)
87163 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87164 #endif
87165diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87166index 22b6ad3..aeba37e 100644
87167--- a/include/uapi/linux/bcache.h
87168+++ b/include/uapi/linux/bcache.h
87169@@ -5,6 +5,7 @@
87170 * Bcache on disk data structures
87171 */
87172
87173+#include <linux/compiler.h>
87174 #include <asm/types.h>
87175
87176 #define BITMASK(name, type, field, offset, size) \
87177@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87178 /* Btree keys - all units are in sectors */
87179
87180 struct bkey {
87181- __u64 high;
87182- __u64 low;
87183+ __u64 high __intentional_overflow(-1);
87184+ __u64 low __intentional_overflow(-1);
87185 __u64 ptr[];
87186 };
87187
87188diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87189index d876736..ccce5c0 100644
87190--- a/include/uapi/linux/byteorder/little_endian.h
87191+++ b/include/uapi/linux/byteorder/little_endian.h
87192@@ -42,51 +42,51 @@
87193
87194 static inline __le64 __cpu_to_le64p(const __u64 *p)
87195 {
87196- return (__force __le64)*p;
87197+ return (__force const __le64)*p;
87198 }
87199-static inline __u64 __le64_to_cpup(const __le64 *p)
87200+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87201 {
87202- return (__force __u64)*p;
87203+ return (__force const __u64)*p;
87204 }
87205 static inline __le32 __cpu_to_le32p(const __u32 *p)
87206 {
87207- return (__force __le32)*p;
87208+ return (__force const __le32)*p;
87209 }
87210 static inline __u32 __le32_to_cpup(const __le32 *p)
87211 {
87212- return (__force __u32)*p;
87213+ return (__force const __u32)*p;
87214 }
87215 static inline __le16 __cpu_to_le16p(const __u16 *p)
87216 {
87217- return (__force __le16)*p;
87218+ return (__force const __le16)*p;
87219 }
87220 static inline __u16 __le16_to_cpup(const __le16 *p)
87221 {
87222- return (__force __u16)*p;
87223+ return (__force const __u16)*p;
87224 }
87225 static inline __be64 __cpu_to_be64p(const __u64 *p)
87226 {
87227- return (__force __be64)__swab64p(p);
87228+ return (__force const __be64)__swab64p(p);
87229 }
87230 static inline __u64 __be64_to_cpup(const __be64 *p)
87231 {
87232- return __swab64p((__u64 *)p);
87233+ return __swab64p((const __u64 *)p);
87234 }
87235 static inline __be32 __cpu_to_be32p(const __u32 *p)
87236 {
87237- return (__force __be32)__swab32p(p);
87238+ return (__force const __be32)__swab32p(p);
87239 }
87240-static inline __u32 __be32_to_cpup(const __be32 *p)
87241+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87242 {
87243- return __swab32p((__u32 *)p);
87244+ return __swab32p((const __u32 *)p);
87245 }
87246 static inline __be16 __cpu_to_be16p(const __u16 *p)
87247 {
87248- return (__force __be16)__swab16p(p);
87249+ return (__force const __be16)__swab16p(p);
87250 }
87251 static inline __u16 __be16_to_cpup(const __be16 *p)
87252 {
87253- return __swab16p((__u16 *)p);
87254+ return __swab16p((const __u16 *)p);
87255 }
87256 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87257 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87258diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87259index 71e1d0e..6cc9caf 100644
87260--- a/include/uapi/linux/elf.h
87261+++ b/include/uapi/linux/elf.h
87262@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87263 #define PT_GNU_EH_FRAME 0x6474e550
87264
87265 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87266+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87267+
87268+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87269+
87270+/* Constants for the e_flags field */
87271+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87272+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87273+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87274+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87275+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87276+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87277
87278 /*
87279 * Extended Numbering
87280@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87281 #define DT_DEBUG 21
87282 #define DT_TEXTREL 22
87283 #define DT_JMPREL 23
87284+#define DT_FLAGS 30
87285+ #define DF_TEXTREL 0x00000004
87286 #define DT_ENCODING 32
87287 #define OLD_DT_LOOS 0x60000000
87288 #define DT_LOOS 0x6000000d
87289@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87290 #define PF_W 0x2
87291 #define PF_X 0x1
87292
87293+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87294+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87295+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87296+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87297+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87298+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87299+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87300+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87301+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87302+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87303+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87304+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87305+
87306 typedef struct elf32_phdr{
87307 Elf32_Word p_type;
87308 Elf32_Off p_offset;
87309@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87310 #define EI_OSABI 7
87311 #define EI_PAD 8
87312
87313+#define EI_PAX 14
87314+
87315 #define ELFMAG0 0x7f /* EI_MAG */
87316 #define ELFMAG1 'E'
87317 #define ELFMAG2 'L'
87318diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87319index aa169c4..6a2771d 100644
87320--- a/include/uapi/linux/personality.h
87321+++ b/include/uapi/linux/personality.h
87322@@ -30,6 +30,7 @@ enum {
87323 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87324 ADDR_NO_RANDOMIZE | \
87325 ADDR_COMPAT_LAYOUT | \
87326+ ADDR_LIMIT_3GB | \
87327 MMAP_PAGE_ZERO)
87328
87329 /*
87330diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87331index 7530e74..e714828 100644
87332--- a/include/uapi/linux/screen_info.h
87333+++ b/include/uapi/linux/screen_info.h
87334@@ -43,7 +43,8 @@ struct screen_info {
87335 __u16 pages; /* 0x32 */
87336 __u16 vesa_attributes; /* 0x34 */
87337 __u32 capabilities; /* 0x36 */
87338- __u8 _reserved[6]; /* 0x3a */
87339+ __u16 vesapm_size; /* 0x3a */
87340+ __u8 _reserved[4]; /* 0x3c */
87341 } __attribute__((packed));
87342
87343 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87344diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87345index 0e011eb..82681b1 100644
87346--- a/include/uapi/linux/swab.h
87347+++ b/include/uapi/linux/swab.h
87348@@ -43,7 +43,7 @@
87349 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87350 */
87351
87352-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87353+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87354 {
87355 #ifdef __HAVE_BUILTIN_BSWAP16__
87356 return __builtin_bswap16(val);
87357@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87358 #endif
87359 }
87360
87361-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87362+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87363 {
87364 #ifdef __HAVE_BUILTIN_BSWAP32__
87365 return __builtin_bswap32(val);
87366@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87367 #endif
87368 }
87369
87370-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87371+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87372 {
87373 #ifdef __HAVE_BUILTIN_BSWAP64__
87374 return __builtin_bswap64(val);
87375diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87376index 1590c49..5eab462 100644
87377--- a/include/uapi/linux/xattr.h
87378+++ b/include/uapi/linux/xattr.h
87379@@ -73,5 +73,9 @@
87380 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87381 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87382
87383+/* User namespace */
87384+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87385+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87386+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87387
87388 #endif /* _UAPI_LINUX_XATTR_H */
87389diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87390index f9466fa..f4e2b81 100644
87391--- a/include/video/udlfb.h
87392+++ b/include/video/udlfb.h
87393@@ -53,10 +53,10 @@ struct dlfb_data {
87394 u32 pseudo_palette[256];
87395 int blank_mode; /*one of FB_BLANK_ */
87396 /* blit-only rendering path metrics, exposed through sysfs */
87397- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87398- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87399- atomic_t bytes_sent; /* to usb, after compression including overhead */
87400- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87401+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87402+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87403+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87404+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87405 };
87406
87407 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87408diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87409index 30f5362..8ed8ac9 100644
87410--- a/include/video/uvesafb.h
87411+++ b/include/video/uvesafb.h
87412@@ -122,6 +122,7 @@ struct uvesafb_par {
87413 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87414 u8 pmi_setpal; /* PMI for palette changes */
87415 u16 *pmi_base; /* protected mode interface location */
87416+ u8 *pmi_code; /* protected mode code location */
87417 void *pmi_start;
87418 void *pmi_pal;
87419 u8 *vbe_state_orig; /*
87420diff --git a/init/Kconfig b/init/Kconfig
87421index 9afb971..27d6fca 100644
87422--- a/init/Kconfig
87423+++ b/init/Kconfig
87424@@ -1129,6 +1129,7 @@ endif # CGROUPS
87425
87426 config CHECKPOINT_RESTORE
87427 bool "Checkpoint/restore support" if EXPERT
87428+ depends on !GRKERNSEC
87429 default n
87430 help
87431 Enables additional kernel features in a sake of checkpoint/restore.
87432@@ -1654,7 +1655,7 @@ config SLUB_DEBUG
87433
87434 config COMPAT_BRK
87435 bool "Disable heap randomization"
87436- default y
87437+ default n
87438 help
87439 Randomizing heap placement makes heap exploits harder, but it
87440 also breaks ancient binaries (including anything libc5 based).
87441@@ -1985,7 +1986,7 @@ config INIT_ALL_POSSIBLE
87442 config STOP_MACHINE
87443 bool
87444 default y
87445- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87446+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87447 help
87448 Need stop_machine() primitive.
87449
87450diff --git a/init/Makefile b/init/Makefile
87451index 7bc47ee..6da2dc7 100644
87452--- a/init/Makefile
87453+++ b/init/Makefile
87454@@ -2,6 +2,9 @@
87455 # Makefile for the linux kernel.
87456 #
87457
87458+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87459+asflags-y := $(GCC_PLUGINS_AFLAGS)
87460+
87461 obj-y := main.o version.o mounts.o
87462 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87463 obj-y += noinitramfs.o
87464diff --git a/init/do_mounts.c b/init/do_mounts.c
87465index eb41008..f5dbbf9 100644
87466--- a/init/do_mounts.c
87467+++ b/init/do_mounts.c
87468@@ -360,11 +360,11 @@ static void __init get_fs_names(char *page)
87469 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87470 {
87471 struct super_block *s;
87472- int err = sys_mount(name, "/root", fs, flags, data);
87473+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87474 if (err)
87475 return err;
87476
87477- sys_chdir("/root");
87478+ sys_chdir((const char __force_user *)"/root");
87479 s = current->fs->pwd.dentry->d_sb;
87480 ROOT_DEV = s->s_dev;
87481 printk(KERN_INFO
87482@@ -487,18 +487,18 @@ void __init change_floppy(char *fmt, ...)
87483 va_start(args, fmt);
87484 vsprintf(buf, fmt, args);
87485 va_end(args);
87486- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87487+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87488 if (fd >= 0) {
87489 sys_ioctl(fd, FDEJECT, 0);
87490 sys_close(fd);
87491 }
87492 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87493- fd = sys_open("/dev/console", O_RDWR, 0);
87494+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87495 if (fd >= 0) {
87496 sys_ioctl(fd, TCGETS, (long)&termios);
87497 termios.c_lflag &= ~ICANON;
87498 sys_ioctl(fd, TCSETSF, (long)&termios);
87499- sys_read(fd, &c, 1);
87500+ sys_read(fd, (char __user *)&c, 1);
87501 termios.c_lflag |= ICANON;
87502 sys_ioctl(fd, TCSETSF, (long)&termios);
87503 sys_close(fd);
87504@@ -592,8 +592,8 @@ void __init prepare_namespace(void)
87505 mount_root();
87506 out:
87507 devtmpfs_mount("dev");
87508- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87509- sys_chroot(".");
87510+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87511+ sys_chroot((const char __force_user *)".");
87512 }
87513
87514 static bool is_tmpfs;
87515diff --git a/init/do_mounts.h b/init/do_mounts.h
87516index f5b978a..69dbfe8 100644
87517--- a/init/do_mounts.h
87518+++ b/init/do_mounts.h
87519@@ -15,15 +15,15 @@ extern int root_mountflags;
87520
87521 static inline int create_dev(char *name, dev_t dev)
87522 {
87523- sys_unlink(name);
87524- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87525+ sys_unlink((char __force_user *)name);
87526+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87527 }
87528
87529 #if BITS_PER_LONG == 32
87530 static inline u32 bstat(char *name)
87531 {
87532 struct stat64 stat;
87533- if (sys_stat64(name, &stat) != 0)
87534+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87535 return 0;
87536 if (!S_ISBLK(stat.st_mode))
87537 return 0;
87538@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87539 static inline u32 bstat(char *name)
87540 {
87541 struct stat stat;
87542- if (sys_newstat(name, &stat) != 0)
87543+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87544 return 0;
87545 if (!S_ISBLK(stat.st_mode))
87546 return 0;
87547diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87548index 3e0878e..8a9d7a0 100644
87549--- a/init/do_mounts_initrd.c
87550+++ b/init/do_mounts_initrd.c
87551@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87552 {
87553 sys_unshare(CLONE_FS | CLONE_FILES);
87554 /* stdin/stdout/stderr for /linuxrc */
87555- sys_open("/dev/console", O_RDWR, 0);
87556+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87557 sys_dup(0);
87558 sys_dup(0);
87559 /* move initrd over / and chdir/chroot in initrd root */
87560- sys_chdir("/root");
87561- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87562- sys_chroot(".");
87563+ sys_chdir((const char __force_user *)"/root");
87564+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87565+ sys_chroot((const char __force_user *)".");
87566 sys_setsid();
87567 return 0;
87568 }
87569@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87570 create_dev("/dev/root.old", Root_RAM0);
87571 /* mount initrd on rootfs' /root */
87572 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87573- sys_mkdir("/old", 0700);
87574- sys_chdir("/old");
87575+ sys_mkdir((const char __force_user *)"/old", 0700);
87576+ sys_chdir((const char __force_user *)"/old");
87577
87578 /* try loading default modules from initrd */
87579 load_default_modules();
87580@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87581 current->flags &= ~PF_FREEZER_SKIP;
87582
87583 /* move initrd to rootfs' /old */
87584- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87585+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87586 /* switch root and cwd back to / of rootfs */
87587- sys_chroot("..");
87588+ sys_chroot((const char __force_user *)"..");
87589
87590 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87591- sys_chdir("/old");
87592+ sys_chdir((const char __force_user *)"/old");
87593 return;
87594 }
87595
87596- sys_chdir("/");
87597+ sys_chdir((const char __force_user *)"/");
87598 ROOT_DEV = new_decode_dev(real_root_dev);
87599 mount_root();
87600
87601 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87602- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87603+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87604 if (!error)
87605 printk("okay\n");
87606 else {
87607- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87608+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87609 if (error == -ENOENT)
87610 printk("/initrd does not exist. Ignored.\n");
87611 else
87612 printk("failed\n");
87613 printk(KERN_NOTICE "Unmounting old root\n");
87614- sys_umount("/old", MNT_DETACH);
87615+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87616 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87617 if (fd < 0) {
87618 error = fd;
87619@@ -127,11 +127,11 @@ int __init initrd_load(void)
87620 * mounted in the normal path.
87621 */
87622 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87623- sys_unlink("/initrd.image");
87624+ sys_unlink((const char __force_user *)"/initrd.image");
87625 handle_initrd();
87626 return 1;
87627 }
87628 }
87629- sys_unlink("/initrd.image");
87630+ sys_unlink((const char __force_user *)"/initrd.image");
87631 return 0;
87632 }
87633diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87634index 8cb6db5..d729f50 100644
87635--- a/init/do_mounts_md.c
87636+++ b/init/do_mounts_md.c
87637@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87638 partitioned ? "_d" : "", minor,
87639 md_setup_args[ent].device_names);
87640
87641- fd = sys_open(name, 0, 0);
87642+ fd = sys_open((char __force_user *)name, 0, 0);
87643 if (fd < 0) {
87644 printk(KERN_ERR "md: open failed - cannot start "
87645 "array %s\n", name);
87646@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87647 * array without it
87648 */
87649 sys_close(fd);
87650- fd = sys_open(name, 0, 0);
87651+ fd = sys_open((char __force_user *)name, 0, 0);
87652 sys_ioctl(fd, BLKRRPART, 0);
87653 }
87654 sys_close(fd);
87655@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87656
87657 wait_for_device_probe();
87658
87659- fd = sys_open("/dev/md0", 0, 0);
87660+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87661 if (fd >= 0) {
87662 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87663 sys_close(fd);
87664diff --git a/init/init_task.c b/init/init_task.c
87665index ba0a7f36..2bcf1d5 100644
87666--- a/init/init_task.c
87667+++ b/init/init_task.c
87668@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87669 * Initial thread structure. Alignment of this is handled by a special
87670 * linker map entry.
87671 */
87672+#ifdef CONFIG_X86
87673+union thread_union init_thread_union __init_task_data;
87674+#else
87675 union thread_union init_thread_union __init_task_data =
87676 { INIT_THREAD_INFO(init_task) };
87677+#endif
87678diff --git a/init/initramfs.c b/init/initramfs.c
87679index ad1bd77..dca2c1b 100644
87680--- a/init/initramfs.c
87681+++ b/init/initramfs.c
87682@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87683
87684 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87685 while (count) {
87686- ssize_t rv = sys_write(fd, p, count);
87687+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87688
87689 if (rv < 0) {
87690 if (rv == -EINTR || rv == -EAGAIN)
87691@@ -107,7 +107,7 @@ static void __init free_hash(void)
87692 }
87693 }
87694
87695-static long __init do_utime(char *filename, time_t mtime)
87696+static long __init do_utime(char __force_user *filename, time_t mtime)
87697 {
87698 struct timespec t[2];
87699
87700@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87701 struct dir_entry *de, *tmp;
87702 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87703 list_del(&de->list);
87704- do_utime(de->name, de->mtime);
87705+ do_utime((char __force_user *)de->name, de->mtime);
87706 kfree(de->name);
87707 kfree(de);
87708 }
87709@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87710 if (nlink >= 2) {
87711 char *old = find_link(major, minor, ino, mode, collected);
87712 if (old)
87713- return (sys_link(old, collected) < 0) ? -1 : 1;
87714+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87715 }
87716 return 0;
87717 }
87718@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t fmode)
87719 {
87720 struct stat st;
87721
87722- if (!sys_newlstat(path, &st) && (st.st_mode ^ fmode) & S_IFMT) {
87723+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode ^ fmode) & S_IFMT) {
87724 if (S_ISDIR(st.st_mode))
87725- sys_rmdir(path);
87726+ sys_rmdir((char __force_user *)path);
87727 else
87728- sys_unlink(path);
87729+ sys_unlink((char __force_user *)path);
87730 }
87731 }
87732
87733@@ -338,7 +338,7 @@ static int __init do_name(void)
87734 int openflags = O_WRONLY|O_CREAT;
87735 if (ml != 1)
87736 openflags |= O_TRUNC;
87737- wfd = sys_open(collected, openflags, mode);
87738+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87739
87740 if (wfd >= 0) {
87741 sys_fchown(wfd, uid, gid);
87742@@ -350,17 +350,17 @@ static int __init do_name(void)
87743 }
87744 }
87745 } else if (S_ISDIR(mode)) {
87746- sys_mkdir(collected, mode);
87747- sys_chown(collected, uid, gid);
87748- sys_chmod(collected, mode);
87749+ sys_mkdir((char __force_user *)collected, mode);
87750+ sys_chown((char __force_user *)collected, uid, gid);
87751+ sys_chmod((char __force_user *)collected, mode);
87752 dir_add(collected, mtime);
87753 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87754 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87755 if (maybe_link() == 0) {
87756- sys_mknod(collected, mode, rdev);
87757- sys_chown(collected, uid, gid);
87758- sys_chmod(collected, mode);
87759- do_utime(collected, mtime);
87760+ sys_mknod((char __force_user *)collected, mode, rdev);
87761+ sys_chown((char __force_user *)collected, uid, gid);
87762+ sys_chmod((char __force_user *)collected, mode);
87763+ do_utime((char __force_user *)collected, mtime);
87764 }
87765 }
87766 return 0;
87767@@ -372,7 +372,7 @@ static int __init do_copy(void)
87768 if (xwrite(wfd, victim, body_len) != body_len)
87769 error("write error");
87770 sys_close(wfd);
87771- do_utime(vcollected, mtime);
87772+ do_utime((char __force_user *)vcollected, mtime);
87773 kfree(vcollected);
87774 eat(body_len);
87775 state = SkipIt;
87776@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87777 {
87778 collected[N_ALIGN(name_len) + body_len] = '\0';
87779 clean_path(collected, 0);
87780- sys_symlink(collected + N_ALIGN(name_len), collected);
87781- sys_lchown(collected, uid, gid);
87782- do_utime(collected, mtime);
87783+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87784+ sys_lchown((char __force_user *)collected, uid, gid);
87785+ do_utime((char __force_user *)collected, mtime);
87786 state = SkipIt;
87787 next_state = Reset;
87788 return 0;
87789diff --git a/init/main.c b/init/main.c
87790index 61b99376..1e346cb 100644
87791--- a/init/main.c
87792+++ b/init/main.c
87793@@ -100,6 +100,8 @@ extern void radix_tree_init(void);
87794 static inline void mark_rodata_ro(void) { }
87795 #endif
87796
87797+extern void grsecurity_init(void);
87798+
87799 /*
87800 * Debug helper: via this flag we know that we are in 'early bootup code'
87801 * where only the boot processor is running with IRQ disabled. This means
87802@@ -161,6 +163,85 @@ static int __init set_reset_devices(char *str)
87803
87804 __setup("reset_devices", set_reset_devices);
87805
87806+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87807+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87808+static int __init setup_grsec_proc_gid(char *str)
87809+{
87810+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87811+ return 1;
87812+}
87813+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87814+#endif
87815+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
87816+int grsec_enable_sysfs_restrict = 1;
87817+static int __init setup_grsec_sysfs_restrict(char *str)
87818+{
87819+ if (!simple_strtol(str, NULL, 0))
87820+ grsec_enable_sysfs_restrict = 0;
87821+ return 1;
87822+}
87823+__setup("grsec_sysfs_restrict", setup_grsec_sysfs_restrict);
87824+#endif
87825+
87826+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87827+unsigned long pax_user_shadow_base __read_only;
87828+EXPORT_SYMBOL(pax_user_shadow_base);
87829+extern char pax_enter_kernel_user[];
87830+extern char pax_exit_kernel_user[];
87831+#endif
87832+
87833+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87834+static int __init setup_pax_nouderef(char *str)
87835+{
87836+#ifdef CONFIG_X86_32
87837+ unsigned int cpu;
87838+ struct desc_struct *gdt;
87839+
87840+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87841+ gdt = get_cpu_gdt_table(cpu);
87842+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87843+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87844+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87845+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87846+ }
87847+ loadsegment(ds, __KERNEL_DS);
87848+ loadsegment(es, __KERNEL_DS);
87849+ loadsegment(ss, __KERNEL_DS);
87850+#else
87851+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87852+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87853+ clone_pgd_mask = ~(pgdval_t)0UL;
87854+ pax_user_shadow_base = 0UL;
87855+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87856+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87857+#endif
87858+
87859+ return 0;
87860+}
87861+early_param("pax_nouderef", setup_pax_nouderef);
87862+
87863+#ifdef CONFIG_X86_64
87864+static int __init setup_pax_weakuderef(char *str)
87865+{
87866+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87867+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87868+ return 1;
87869+}
87870+__setup("pax_weakuderef", setup_pax_weakuderef);
87871+#endif
87872+#endif
87873+
87874+#ifdef CONFIG_PAX_SOFTMODE
87875+int pax_softmode;
87876+
87877+static int __init setup_pax_softmode(char *str)
87878+{
87879+ get_option(&str, &pax_softmode);
87880+ return 1;
87881+}
87882+__setup("pax_softmode=", setup_pax_softmode);
87883+#endif
87884+
87885 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87886 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87887 static const char *panic_later, *panic_param;
87888@@ -735,7 +816,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87889 struct blacklist_entry *entry;
87890 char *fn_name;
87891
87892- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87893+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87894 if (!fn_name)
87895 return false;
87896
87897@@ -787,7 +868,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87898 {
87899 int count = preempt_count();
87900 int ret;
87901- char msgbuf[64];
87902+ const char *msg1 = "", *msg2 = "";
87903
87904 if (initcall_blacklisted(fn))
87905 return -EPERM;
87906@@ -797,18 +878,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87907 else
87908 ret = fn();
87909
87910- msgbuf[0] = 0;
87911-
87912 if (preempt_count() != count) {
87913- sprintf(msgbuf, "preemption imbalance ");
87914+ msg1 = " preemption imbalance";
87915 preempt_count_set(count);
87916 }
87917 if (irqs_disabled()) {
87918- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87919+ msg2 = " disabled interrupts";
87920 local_irq_enable();
87921 }
87922- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87923+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87924
87925+ add_latent_entropy();
87926 return ret;
87927 }
87928
87929@@ -914,8 +994,8 @@ static int run_init_process(const char *init_filename)
87930 {
87931 argv_init[0] = init_filename;
87932 return do_execve(getname_kernel(init_filename),
87933- (const char __user *const __user *)argv_init,
87934- (const char __user *const __user *)envp_init);
87935+ (const char __user *const __force_user *)argv_init,
87936+ (const char __user *const __force_user *)envp_init);
87937 }
87938
87939 static int try_to_run_init_process(const char *init_filename)
87940@@ -932,6 +1012,10 @@ static int try_to_run_init_process(const char *init_filename)
87941 return ret;
87942 }
87943
87944+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87945+extern int gr_init_ran;
87946+#endif
87947+
87948 static noinline void __init kernel_init_freeable(void);
87949
87950 static int __ref kernel_init(void *unused)
87951@@ -956,6 +1040,11 @@ static int __ref kernel_init(void *unused)
87952 ramdisk_execute_command, ret);
87953 }
87954
87955+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87956+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87957+ gr_init_ran = 1;
87958+#endif
87959+
87960 /*
87961 * We try each of these until one succeeds.
87962 *
87963@@ -1016,7 +1105,7 @@ static noinline void __init kernel_init_freeable(void)
87964 do_basic_setup();
87965
87966 /* Open the /dev/console on the rootfs, this should never fail */
87967- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87968+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87969 pr_err("Warning: unable to open an initial console.\n");
87970
87971 (void) sys_dup(0);
87972@@ -1029,11 +1118,13 @@ static noinline void __init kernel_init_freeable(void)
87973 if (!ramdisk_execute_command)
87974 ramdisk_execute_command = "/init";
87975
87976- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
87977+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
87978 ramdisk_execute_command = NULL;
87979 prepare_namespace();
87980 }
87981
87982+ grsecurity_init();
87983+
87984 /*
87985 * Ok, we have completed the initial bootup, and
87986 * we're essentially up and running. Get rid of the
87987diff --git a/ipc/compat.c b/ipc/compat.c
87988index 9b3c85f..1c4d897 100644
87989--- a/ipc/compat.c
87990+++ b/ipc/compat.c
87991@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
87992 COMPAT_SHMLBA);
87993 if (err < 0)
87994 return err;
87995- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
87996+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
87997 }
87998 case SHMDT:
87999 return sys_shmdt(compat_ptr(ptr));
88000diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88001index 8ad93c2..efd80f8 100644
88002--- a/ipc/ipc_sysctl.c
88003+++ b/ipc/ipc_sysctl.c
88004@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88005 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88006 void __user *buffer, size_t *lenp, loff_t *ppos)
88007 {
88008- struct ctl_table ipc_table;
88009+ ctl_table_no_const ipc_table;
88010
88011 memcpy(&ipc_table, table, sizeof(ipc_table));
88012 ipc_table.data = get_ipc(table);
88013@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88014 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88015 void __user *buffer, size_t *lenp, loff_t *ppos)
88016 {
88017- struct ctl_table ipc_table;
88018+ ctl_table_no_const ipc_table;
88019
88020 memcpy(&ipc_table, table, sizeof(ipc_table));
88021 ipc_table.data = get_ipc(table);
88022@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88023 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88024 void __user *buffer, size_t *lenp, loff_t *ppos)
88025 {
88026- struct ctl_table ipc_table;
88027+ ctl_table_no_const ipc_table;
88028 memcpy(&ipc_table, table, sizeof(ipc_table));
88029 ipc_table.data = get_ipc(table);
88030
88031@@ -76,7 +76,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88032 static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
88033 void __user *buffer, size_t *lenp, loff_t *ppos)
88034 {
88035- struct ctl_table ipc_table;
88036+ ctl_table_no_const ipc_table;
88037 int dummy = 0;
88038
88039 memcpy(&ipc_table, table, sizeof(ipc_table));
88040diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88041index 68d4e95..1477ded 100644
88042--- a/ipc/mq_sysctl.c
88043+++ b/ipc/mq_sysctl.c
88044@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88045 static int proc_mq_dointvec(struct ctl_table *table, int write,
88046 void __user *buffer, size_t *lenp, loff_t *ppos)
88047 {
88048- struct ctl_table mq_table;
88049+ ctl_table_no_const mq_table;
88050 memcpy(&mq_table, table, sizeof(mq_table));
88051 mq_table.data = get_mq(table);
88052
88053@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88054 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88055 void __user *buffer, size_t *lenp, loff_t *ppos)
88056 {
88057- struct ctl_table mq_table;
88058+ ctl_table_no_const mq_table;
88059 memcpy(&mq_table, table, sizeof(mq_table));
88060 mq_table.data = get_mq(table);
88061
88062diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88063index 7635a1c..7432cb6 100644
88064--- a/ipc/mqueue.c
88065+++ b/ipc/mqueue.c
88066@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88067 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88068 info->attr.mq_msgsize);
88069
88070+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88071 spin_lock(&mq_lock);
88072 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88073 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88074diff --git a/ipc/shm.c b/ipc/shm.c
88075index 19633b4..d454904 100644
88076--- a/ipc/shm.c
88077+++ b/ipc/shm.c
88078@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88079 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88080 #endif
88081
88082+#ifdef CONFIG_GRKERNSEC
88083+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88084+ const u64 shm_createtime, const kuid_t cuid,
88085+ const int shmid);
88086+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88087+ const u64 shm_createtime);
88088+#endif
88089+
88090 void shm_init_ns(struct ipc_namespace *ns)
88091 {
88092 ns->shm_ctlmax = SHMMAX;
88093@@ -560,6 +568,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88094 shp->shm_lprid = 0;
88095 shp->shm_atim = shp->shm_dtim = 0;
88096 shp->shm_ctim = get_seconds();
88097+#ifdef CONFIG_GRKERNSEC
88098+ shp->shm_createtime = ktime_get_ns();
88099+#endif
88100 shp->shm_segsz = size;
88101 shp->shm_nattch = 0;
88102 shp->shm_file = file;
88103@@ -1096,6 +1107,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88104 f_mode = FMODE_READ | FMODE_WRITE;
88105 }
88106 if (shmflg & SHM_EXEC) {
88107+
88108+#ifdef CONFIG_PAX_MPROTECT
88109+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88110+ goto out;
88111+#endif
88112+
88113 prot |= PROT_EXEC;
88114 acc_mode |= S_IXUGO;
88115 }
88116@@ -1120,6 +1137,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88117 if (err)
88118 goto out_unlock;
88119
88120+#ifdef CONFIG_GRKERNSEC
88121+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88122+ shp->shm_perm.cuid, shmid) ||
88123+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88124+ err = -EACCES;
88125+ goto out_unlock;
88126+ }
88127+#endif
88128+
88129 ipc_lock_object(&shp->shm_perm);
88130
88131 /* check if shm_destroy() is tearing down shp */
88132@@ -1132,6 +1158,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88133 path = shp->shm_file->f_path;
88134 path_get(&path);
88135 shp->shm_nattch++;
88136+#ifdef CONFIG_GRKERNSEC
88137+ shp->shm_lapid = current->pid;
88138+#endif
88139 size = i_size_read(path.dentry->d_inode);
88140 ipc_unlock_object(&shp->shm_perm);
88141 rcu_read_unlock();
88142diff --git a/ipc/util.c b/ipc/util.c
88143index 106bed0..f851429 100644
88144--- a/ipc/util.c
88145+++ b/ipc/util.c
88146@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88147 int (*show)(struct seq_file *, void *);
88148 };
88149
88150+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88151+
88152 /**
88153 * ipc_init - initialise ipc subsystem
88154 *
88155@@ -497,6 +499,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88156 granted_mode >>= 6;
88157 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88158 granted_mode >>= 3;
88159+
88160+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88161+ return -1;
88162+
88163 /* is there some bit set in requested_mode but not in granted_mode? */
88164 if ((requested_mode & ~granted_mode & 0007) &&
88165 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88166diff --git a/kernel/audit.c b/kernel/audit.c
88167index 72ab759..757deba 100644
88168--- a/kernel/audit.c
88169+++ b/kernel/audit.c
88170@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88171 3) suppressed due to audit_rate_limit
88172 4) suppressed due to audit_backlog_limit
88173 */
88174-static atomic_t audit_lost = ATOMIC_INIT(0);
88175+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88176
88177 /* The netlink socket. */
88178 static struct sock *audit_sock;
88179@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88180 unsigned long now;
88181 int print;
88182
88183- atomic_inc(&audit_lost);
88184+ atomic_inc_unchecked(&audit_lost);
88185
88186 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88187
88188@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88189 if (print) {
88190 if (printk_ratelimit())
88191 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88192- atomic_read(&audit_lost),
88193+ atomic_read_unchecked(&audit_lost),
88194 audit_rate_limit,
88195 audit_backlog_limit);
88196 audit_panic(message);
88197@@ -831,7 +831,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88198 s.pid = audit_pid;
88199 s.rate_limit = audit_rate_limit;
88200 s.backlog_limit = audit_backlog_limit;
88201- s.lost = atomic_read(&audit_lost);
88202+ s.lost = atomic_read_unchecked(&audit_lost);
88203 s.backlog = skb_queue_len(&audit_skb_queue);
88204 s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
88205 s.backlog_wait_time = audit_backlog_wait_time;
88206diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88207index 072566d..1190489 100644
88208--- a/kernel/auditsc.c
88209+++ b/kernel/auditsc.c
88210@@ -2056,7 +2056,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88211 }
88212
88213 /* global counter which is incremented every time something logs in */
88214-static atomic_t session_id = ATOMIC_INIT(0);
88215+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88216
88217 static int audit_set_loginuid_perm(kuid_t loginuid)
88218 {
88219@@ -2123,7 +2123,7 @@ int audit_set_loginuid(kuid_t loginuid)
88220
88221 /* are we setting or clearing? */
88222 if (uid_valid(loginuid))
88223- sessionid = (unsigned int)atomic_inc_return(&session_id);
88224+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88225
88226 task->sessionid = sessionid;
88227 task->loginuid = loginuid;
88228diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88229index a64e7a2..2e69448 100644
88230--- a/kernel/bpf/core.c
88231+++ b/kernel/bpf/core.c
88232@@ -143,14 +143,17 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88233 * random section of illegal instructions.
88234 */
88235 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
88236- hdr = module_alloc(size);
88237+ hdr = module_alloc_exec(size);
88238 if (hdr == NULL)
88239 return NULL;
88240
88241 /* Fill space with illegal/arch-dep instructions. */
88242 bpf_fill_ill_insns(hdr, size);
88243
88244+ pax_open_kernel();
88245 hdr->pages = size / PAGE_SIZE;
88246+ pax_close_kernel();
88247+
88248 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
88249 PAGE_SIZE - sizeof(*hdr));
88250 start = (prandom_u32() % hole) & ~(alignment - 1);
88251@@ -163,7 +166,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
88252
88253 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
88254 {
88255- module_memfree(hdr);
88256+ module_memfree_exec(hdr);
88257 }
88258 #endif /* CONFIG_BPF_JIT */
88259
88260diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
88261index 536edc2..d28c85d 100644
88262--- a/kernel/bpf/syscall.c
88263+++ b/kernel/bpf/syscall.c
88264@@ -548,11 +548,15 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
88265 int err;
88266
88267 /* the syscall is limited to root temporarily. This restriction will be
88268- * lifted when security audit is clean. Note that eBPF+tracing must have
88269- * this restriction, since it may pass kernel data to user space
88270+ * lifted by upstream when a half-assed security audit is clean. Note
88271+ * that eBPF+tracing must have this restriction, since it may pass
88272+ * kernel data to user space
88273 */
88274 if (!capable(CAP_SYS_ADMIN))
88275 return -EPERM;
88276+#ifdef CONFIG_GRKERNSEC
88277+ return -EPERM;
88278+#endif
88279
88280 if (!access_ok(VERIFY_READ, uattr, 1))
88281 return -EFAULT;
88282diff --git a/kernel/capability.c b/kernel/capability.c
88283index 989f5bf..d317ca0 100644
88284--- a/kernel/capability.c
88285+++ b/kernel/capability.c
88286@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88287 * before modification is attempted and the application
88288 * fails.
88289 */
88290+ if (tocopy > ARRAY_SIZE(kdata))
88291+ return -EFAULT;
88292+
88293 if (copy_to_user(dataptr, kdata, tocopy
88294 * sizeof(struct __user_cap_data_struct))) {
88295 return -EFAULT;
88296@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88297 int ret;
88298
88299 rcu_read_lock();
88300- ret = security_capable(__task_cred(t), ns, cap);
88301+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88302+ gr_task_is_capable(t, __task_cred(t), cap);
88303 rcu_read_unlock();
88304
88305- return (ret == 0);
88306+ return ret;
88307 }
88308
88309 /**
88310@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88311 int ret;
88312
88313 rcu_read_lock();
88314- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88315+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88316 rcu_read_unlock();
88317
88318- return (ret == 0);
88319+ return ret;
88320 }
88321
88322 /**
88323@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88324 BUG();
88325 }
88326
88327- if (security_capable(current_cred(), ns, cap) == 0) {
88328+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88329 current->flags |= PF_SUPERPRIV;
88330 return true;
88331 }
88332@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88333 }
88334 EXPORT_SYMBOL(ns_capable);
88335
88336+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88337+{
88338+ if (unlikely(!cap_valid(cap))) {
88339+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88340+ BUG();
88341+ }
88342+
88343+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88344+ current->flags |= PF_SUPERPRIV;
88345+ return true;
88346+ }
88347+ return false;
88348+}
88349+EXPORT_SYMBOL(ns_capable_nolog);
88350+
88351 /**
88352 * file_ns_capable - Determine if the file's opener had a capability in effect
88353 * @file: The file we want to check
88354@@ -427,6 +446,12 @@ bool capable(int cap)
88355 }
88356 EXPORT_SYMBOL(capable);
88357
88358+bool capable_nolog(int cap)
88359+{
88360+ return ns_capable_nolog(&init_user_ns, cap);
88361+}
88362+EXPORT_SYMBOL(capable_nolog);
88363+
88364 /**
88365 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88366 * @inode: The inode in question
88367@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88368 kgid_has_mapping(ns, inode->i_gid);
88369 }
88370 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88371+
88372+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88373+{
88374+ struct user_namespace *ns = current_user_ns();
88375+
88376+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88377+ kgid_has_mapping(ns, inode->i_gid);
88378+}
88379+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88380diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88381index 04cfe8a..adadcc0 100644
88382--- a/kernel/cgroup.c
88383+++ b/kernel/cgroup.c
88384@@ -5343,6 +5343,9 @@ static void cgroup_release_agent(struct work_struct *work)
88385 if (!pathbuf || !agentbuf)
88386 goto out;
88387
88388+ if (agentbuf[0] == '\0')
88389+ goto out;
88390+
88391 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
88392 if (!path)
88393 goto out;
88394@@ -5528,7 +5531,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88395 struct task_struct *task;
88396 int count = 0;
88397
88398- seq_printf(seq, "css_set %p\n", cset);
88399+ seq_printf(seq, "css_set %pK\n", cset);
88400
88401 list_for_each_entry(task, &cset->tasks, cg_list) {
88402 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88403diff --git a/kernel/compat.c b/kernel/compat.c
88404index ebb3c36..1df606e 100644
88405--- a/kernel/compat.c
88406+++ b/kernel/compat.c
88407@@ -13,6 +13,7 @@
88408
88409 #include <linux/linkage.h>
88410 #include <linux/compat.h>
88411+#include <linux/module.h>
88412 #include <linux/errno.h>
88413 #include <linux/time.h>
88414 #include <linux/signal.h>
88415@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88416 mm_segment_t oldfs;
88417 long ret;
88418
88419- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88420+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88421 oldfs = get_fs();
88422 set_fs(KERNEL_DS);
88423 ret = hrtimer_nanosleep_restart(restart);
88424@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88425 oldfs = get_fs();
88426 set_fs(KERNEL_DS);
88427 ret = hrtimer_nanosleep(&tu,
88428- rmtp ? (struct timespec __user *)&rmt : NULL,
88429+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88430 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88431 set_fs(oldfs);
88432
88433@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88434 mm_segment_t old_fs = get_fs();
88435
88436 set_fs(KERNEL_DS);
88437- ret = sys_sigpending((old_sigset_t __user *) &s);
88438+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88439 set_fs(old_fs);
88440 if (ret == 0)
88441 ret = put_user(s, set);
88442@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88443 mm_segment_t old_fs = get_fs();
88444
88445 set_fs(KERNEL_DS);
88446- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88447+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88448 set_fs(old_fs);
88449
88450 if (!ret) {
88451@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88452 set_fs (KERNEL_DS);
88453 ret = sys_wait4(pid,
88454 (stat_addr ?
88455- (unsigned int __user *) &status : NULL),
88456- options, (struct rusage __user *) &r);
88457+ (unsigned int __force_user *) &status : NULL),
88458+ options, (struct rusage __force_user *) &r);
88459 set_fs (old_fs);
88460
88461 if (ret > 0) {
88462@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88463 memset(&info, 0, sizeof(info));
88464
88465 set_fs(KERNEL_DS);
88466- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88467- uru ? (struct rusage __user *)&ru : NULL);
88468+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88469+ uru ? (struct rusage __force_user *)&ru : NULL);
88470 set_fs(old_fs);
88471
88472 if ((ret < 0) || (info.si_signo == 0))
88473@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88474 oldfs = get_fs();
88475 set_fs(KERNEL_DS);
88476 err = sys_timer_settime(timer_id, flags,
88477- (struct itimerspec __user *) &newts,
88478- (struct itimerspec __user *) &oldts);
88479+ (struct itimerspec __force_user *) &newts,
88480+ (struct itimerspec __force_user *) &oldts);
88481 set_fs(oldfs);
88482 if (!err && old && put_compat_itimerspec(old, &oldts))
88483 return -EFAULT;
88484@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88485 oldfs = get_fs();
88486 set_fs(KERNEL_DS);
88487 err = sys_timer_gettime(timer_id,
88488- (struct itimerspec __user *) &ts);
88489+ (struct itimerspec __force_user *) &ts);
88490 set_fs(oldfs);
88491 if (!err && put_compat_itimerspec(setting, &ts))
88492 return -EFAULT;
88493@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88494 oldfs = get_fs();
88495 set_fs(KERNEL_DS);
88496 err = sys_clock_settime(which_clock,
88497- (struct timespec __user *) &ts);
88498+ (struct timespec __force_user *) &ts);
88499 set_fs(oldfs);
88500 return err;
88501 }
88502@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88503 oldfs = get_fs();
88504 set_fs(KERNEL_DS);
88505 err = sys_clock_gettime(which_clock,
88506- (struct timespec __user *) &ts);
88507+ (struct timespec __force_user *) &ts);
88508 set_fs(oldfs);
88509 if (!err && compat_put_timespec(&ts, tp))
88510 return -EFAULT;
88511@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88512
88513 oldfs = get_fs();
88514 set_fs(KERNEL_DS);
88515- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88516+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88517 set_fs(oldfs);
88518
88519 err = compat_put_timex(utp, &txc);
88520@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88521 oldfs = get_fs();
88522 set_fs(KERNEL_DS);
88523 err = sys_clock_getres(which_clock,
88524- (struct timespec __user *) &ts);
88525+ (struct timespec __force_user *) &ts);
88526 set_fs(oldfs);
88527 if (!err && tp && compat_put_timespec(&ts, tp))
88528 return -EFAULT;
88529@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88530 struct timespec tu;
88531 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88532
88533- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88534+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88535 oldfs = get_fs();
88536 set_fs(KERNEL_DS);
88537 err = clock_nanosleep_restart(restart);
88538@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88539 oldfs = get_fs();
88540 set_fs(KERNEL_DS);
88541 err = sys_clock_nanosleep(which_clock, flags,
88542- (struct timespec __user *) &in,
88543- (struct timespec __user *) &out);
88544+ (struct timespec __force_user *) &in,
88545+ (struct timespec __force_user *) &out);
88546 set_fs(oldfs);
88547
88548 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88549@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88550 mm_segment_t old_fs = get_fs();
88551
88552 set_fs(KERNEL_DS);
88553- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88554+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88555 set_fs(old_fs);
88556 if (compat_put_timespec(&t, interval))
88557 return -EFAULT;
88558diff --git a/kernel/configs.c b/kernel/configs.c
88559index c18b1f1..b9a0132 100644
88560--- a/kernel/configs.c
88561+++ b/kernel/configs.c
88562@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88563 struct proc_dir_entry *entry;
88564
88565 /* create the current config file */
88566+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88567+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88568+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88569+ &ikconfig_file_ops);
88570+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88571+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88572+ &ikconfig_file_ops);
88573+#endif
88574+#else
88575 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88576 &ikconfig_file_ops);
88577+#endif
88578+
88579 if (!entry)
88580 return -ENOMEM;
88581
88582diff --git a/kernel/cred.c b/kernel/cred.c
88583index e0573a4..26c0fd3 100644
88584--- a/kernel/cred.c
88585+++ b/kernel/cred.c
88586@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88587 validate_creds(cred);
88588 alter_cred_subscribers(cred, -1);
88589 put_cred(cred);
88590+
88591+#ifdef CONFIG_GRKERNSEC_SETXID
88592+ cred = (struct cred *) tsk->delayed_cred;
88593+ if (cred != NULL) {
88594+ tsk->delayed_cred = NULL;
88595+ validate_creds(cred);
88596+ alter_cred_subscribers(cred, -1);
88597+ put_cred(cred);
88598+ }
88599+#endif
88600 }
88601
88602 /**
88603@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88604 * Always returns 0 thus allowing this function to be tail-called at the end
88605 * of, say, sys_setgid().
88606 */
88607-int commit_creds(struct cred *new)
88608+static int __commit_creds(struct cred *new)
88609 {
88610 struct task_struct *task = current;
88611 const struct cred *old = task->real_cred;
88612@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88613
88614 get_cred(new); /* we will require a ref for the subj creds too */
88615
88616+ gr_set_role_label(task, new->uid, new->gid);
88617+
88618 /* dumpability changes */
88619 if (!uid_eq(old->euid, new->euid) ||
88620 !gid_eq(old->egid, new->egid) ||
88621@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88622 put_cred(old);
88623 return 0;
88624 }
88625+#ifdef CONFIG_GRKERNSEC_SETXID
88626+extern int set_user(struct cred *new);
88627+
88628+void gr_delayed_cred_worker(void)
88629+{
88630+ const struct cred *new = current->delayed_cred;
88631+ struct cred *ncred;
88632+
88633+ current->delayed_cred = NULL;
88634+
88635+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88636+ // from doing get_cred on it when queueing this
88637+ put_cred(new);
88638+ return;
88639+ } else if (new == NULL)
88640+ return;
88641+
88642+ ncred = prepare_creds();
88643+ if (!ncred)
88644+ goto die;
88645+ // uids
88646+ ncred->uid = new->uid;
88647+ ncred->euid = new->euid;
88648+ ncred->suid = new->suid;
88649+ ncred->fsuid = new->fsuid;
88650+ // gids
88651+ ncred->gid = new->gid;
88652+ ncred->egid = new->egid;
88653+ ncred->sgid = new->sgid;
88654+ ncred->fsgid = new->fsgid;
88655+ // groups
88656+ set_groups(ncred, new->group_info);
88657+ // caps
88658+ ncred->securebits = new->securebits;
88659+ ncred->cap_inheritable = new->cap_inheritable;
88660+ ncred->cap_permitted = new->cap_permitted;
88661+ ncred->cap_effective = new->cap_effective;
88662+ ncred->cap_bset = new->cap_bset;
88663+
88664+ if (set_user(ncred)) {
88665+ abort_creds(ncred);
88666+ goto die;
88667+ }
88668+
88669+ // from doing get_cred on it when queueing this
88670+ put_cred(new);
88671+
88672+ __commit_creds(ncred);
88673+ return;
88674+die:
88675+ // from doing get_cred on it when queueing this
88676+ put_cred(new);
88677+ do_group_exit(SIGKILL);
88678+}
88679+#endif
88680+
88681+int commit_creds(struct cred *new)
88682+{
88683+#ifdef CONFIG_GRKERNSEC_SETXID
88684+ int ret;
88685+ int schedule_it = 0;
88686+ struct task_struct *t;
88687+ unsigned oldsecurebits = current_cred()->securebits;
88688+
88689+ /* we won't get called with tasklist_lock held for writing
88690+ and interrupts disabled as the cred struct in that case is
88691+ init_cred
88692+ */
88693+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88694+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88695+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88696+ schedule_it = 1;
88697+ }
88698+ ret = __commit_creds(new);
88699+ if (schedule_it) {
88700+ rcu_read_lock();
88701+ read_lock(&tasklist_lock);
88702+ for (t = next_thread(current); t != current;
88703+ t = next_thread(t)) {
88704+ /* we'll check if the thread has uid 0 in
88705+ * the delayed worker routine
88706+ */
88707+ if (task_securebits(t) == oldsecurebits &&
88708+ t->delayed_cred == NULL) {
88709+ t->delayed_cred = get_cred(new);
88710+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88711+ set_tsk_need_resched(t);
88712+ }
88713+ }
88714+ read_unlock(&tasklist_lock);
88715+ rcu_read_unlock();
88716+ }
88717+
88718+ return ret;
88719+#else
88720+ return __commit_creds(new);
88721+#endif
88722+}
88723+
88724 EXPORT_SYMBOL(commit_creds);
88725
88726 /**
88727diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88728index ac5c0f9..4b1c6c2 100644
88729--- a/kernel/debug/debug_core.c
88730+++ b/kernel/debug/debug_core.c
88731@@ -127,7 +127,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88732 */
88733 static atomic_t masters_in_kgdb;
88734 static atomic_t slaves_in_kgdb;
88735-static atomic_t kgdb_break_tasklet_var;
88736+static atomic_unchecked_t kgdb_break_tasklet_var;
88737 atomic_t kgdb_setting_breakpoint;
88738
88739 struct task_struct *kgdb_usethread;
88740@@ -137,7 +137,7 @@ int kgdb_single_step;
88741 static pid_t kgdb_sstep_pid;
88742
88743 /* to keep track of the CPU which is doing the single stepping*/
88744-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88745+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88746
88747 /*
88748 * If you are debugging a problem where roundup (the collection of
88749@@ -552,7 +552,7 @@ return_normal:
88750 * kernel will only try for the value of sstep_tries before
88751 * giving up and continuing on.
88752 */
88753- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88754+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88755 (kgdb_info[cpu].task &&
88756 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88757 atomic_set(&kgdb_active, -1);
88758@@ -654,8 +654,8 @@ cpu_master_loop:
88759 }
88760
88761 kgdb_restore:
88762- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88763- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88764+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88765+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88766 if (kgdb_info[sstep_cpu].task)
88767 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88768 else
88769@@ -932,18 +932,18 @@ static void kgdb_unregister_callbacks(void)
88770 static void kgdb_tasklet_bpt(unsigned long ing)
88771 {
88772 kgdb_breakpoint();
88773- atomic_set(&kgdb_break_tasklet_var, 0);
88774+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88775 }
88776
88777 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88778
88779 void kgdb_schedule_breakpoint(void)
88780 {
88781- if (atomic_read(&kgdb_break_tasklet_var) ||
88782+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88783 atomic_read(&kgdb_active) != -1 ||
88784 atomic_read(&kgdb_setting_breakpoint))
88785 return;
88786- atomic_inc(&kgdb_break_tasklet_var);
88787+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88788 tasklet_schedule(&kgdb_tasklet_breakpoint);
88789 }
88790 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88791diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88792index 60f6bb8..104bb07 100644
88793--- a/kernel/debug/kdb/kdb_main.c
88794+++ b/kernel/debug/kdb/kdb_main.c
88795@@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv)
88796 continue;
88797
88798 kdb_printf("%-20s%8u 0x%p ", mod->name,
88799- mod->core_size, (void *)mod);
88800+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88801 #ifdef CONFIG_MODULE_UNLOAD
88802 kdb_printf("%4d ", module_refcount(mod));
88803 #endif
88804@@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv)
88805 kdb_printf(" (Loading)");
88806 else
88807 kdb_printf(" (Live)");
88808- kdb_printf(" 0x%p", mod->module_core);
88809+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88810
88811 #ifdef CONFIG_MODULE_UNLOAD
88812 {
88813diff --git a/kernel/events/core.c b/kernel/events/core.c
88814index 7959624..c01b886 100644
88815--- a/kernel/events/core.c
88816+++ b/kernel/events/core.c
88817@@ -170,8 +170,15 @@ static struct srcu_struct pmus_srcu;
88818 * 0 - disallow raw tracepoint access for unpriv
88819 * 1 - disallow cpu events for unpriv
88820 * 2 - disallow kernel profiling for unpriv
88821+ * 3 - disallow all unpriv perf event use
88822 */
88823-int sysctl_perf_event_paranoid __read_mostly = 1;
88824+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88825+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88826+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88827+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88828+#else
88829+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88830+#endif
88831
88832 /* Minimum for 512 kiB + 1 user control page */
88833 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88834@@ -197,7 +204,7 @@ void update_perf_cpu_limits(void)
88835
88836 tmp *= sysctl_perf_cpu_time_max_percent;
88837 do_div(tmp, 100);
88838- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88839+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88840 }
88841
88842 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88843@@ -303,7 +310,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88844 }
88845 }
88846
88847-static atomic64_t perf_event_id;
88848+static atomic64_unchecked_t perf_event_id;
88849
88850 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88851 enum event_type_t event_type);
88852@@ -3102,7 +3109,7 @@ static void __perf_event_read(void *info)
88853
88854 static inline u64 perf_event_count(struct perf_event *event)
88855 {
88856- return local64_read(&event->count) + atomic64_read(&event->child_count);
88857+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88858 }
88859
88860 static u64 perf_event_read(struct perf_event *event)
88861@@ -3528,9 +3535,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88862 mutex_lock(&event->child_mutex);
88863 total += perf_event_read(event);
88864 *enabled += event->total_time_enabled +
88865- atomic64_read(&event->child_total_time_enabled);
88866+ atomic64_read_unchecked(&event->child_total_time_enabled);
88867 *running += event->total_time_running +
88868- atomic64_read(&event->child_total_time_running);
88869+ atomic64_read_unchecked(&event->child_total_time_running);
88870
88871 list_for_each_entry(child, &event->child_list, child_list) {
88872 total += perf_event_read(child);
88873@@ -3994,10 +4001,10 @@ void perf_event_update_userpage(struct perf_event *event)
88874 userpg->offset -= local64_read(&event->hw.prev_count);
88875
88876 userpg->time_enabled = enabled +
88877- atomic64_read(&event->child_total_time_enabled);
88878+ atomic64_read_unchecked(&event->child_total_time_enabled);
88879
88880 userpg->time_running = running +
88881- atomic64_read(&event->child_total_time_running);
88882+ atomic64_read_unchecked(&event->child_total_time_running);
88883
88884 arch_perf_update_userpage(userpg, now);
88885
88886@@ -4578,7 +4585,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88887
88888 /* Data. */
88889 sp = perf_user_stack_pointer(regs);
88890- rem = __output_copy_user(handle, (void *) sp, dump_size);
88891+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88892 dyn_size = dump_size - rem;
88893
88894 perf_output_skip(handle, rem);
88895@@ -4669,11 +4676,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88896 values[n++] = perf_event_count(event);
88897 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88898 values[n++] = enabled +
88899- atomic64_read(&event->child_total_time_enabled);
88900+ atomic64_read_unchecked(&event->child_total_time_enabled);
88901 }
88902 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88903 values[n++] = running +
88904- atomic64_read(&event->child_total_time_running);
88905+ atomic64_read_unchecked(&event->child_total_time_running);
88906 }
88907 if (read_format & PERF_FORMAT_ID)
88908 values[n++] = primary_event_id(event);
88909@@ -7004,7 +7011,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
88910 event->parent = parent_event;
88911
88912 event->ns = get_pid_ns(task_active_pid_ns(current));
88913- event->id = atomic64_inc_return(&perf_event_id);
88914+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
88915
88916 event->state = PERF_EVENT_STATE_INACTIVE;
88917
88918@@ -7285,6 +7292,11 @@ SYSCALL_DEFINE5(perf_event_open,
88919 if (flags & ~PERF_FLAG_ALL)
88920 return -EINVAL;
88921
88922+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88923+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
88924+ return -EACCES;
88925+#endif
88926+
88927 err = perf_copy_attr(attr_uptr, &attr);
88928 if (err)
88929 return err;
88930@@ -7652,10 +7664,10 @@ static void sync_child_event(struct perf_event *child_event,
88931 /*
88932 * Add back the child's count to the parent's count:
88933 */
88934- atomic64_add(child_val, &parent_event->child_count);
88935- atomic64_add(child_event->total_time_enabled,
88936+ atomic64_add_unchecked(child_val, &parent_event->child_count);
88937+ atomic64_add_unchecked(child_event->total_time_enabled,
88938 &parent_event->child_total_time_enabled);
88939- atomic64_add(child_event->total_time_running,
88940+ atomic64_add_unchecked(child_event->total_time_running,
88941 &parent_event->child_total_time_running);
88942
88943 /*
88944diff --git a/kernel/events/internal.h b/kernel/events/internal.h
88945index 569b2187..19940d9 100644
88946--- a/kernel/events/internal.h
88947+++ b/kernel/events/internal.h
88948@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
88949 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
88950 }
88951
88952-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
88953+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
88954 static inline unsigned long \
88955 func_name(struct perf_output_handle *handle, \
88956- const void *buf, unsigned long len) \
88957+ const void user *buf, unsigned long len) \
88958 { \
88959 unsigned long size, written; \
88960 \
88961@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
88962 return 0;
88963 }
88964
88965-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
88966+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
88967
88968 static inline unsigned long
88969 memcpy_skip(void *dst, const void *src, unsigned long n)
88970@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
88971 return 0;
88972 }
88973
88974-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
88975+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
88976
88977 #ifndef arch_perf_out_copy_user
88978 #define arch_perf_out_copy_user arch_perf_out_copy_user
88979@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
88980 }
88981 #endif
88982
88983-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
88984+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
88985
88986 /* Callchain handling */
88987 extern struct perf_callchain_entry *
88988diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
88989index cb346f2..e4dc317 100644
88990--- a/kernel/events/uprobes.c
88991+++ b/kernel/events/uprobes.c
88992@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
88993 {
88994 struct page *page;
88995 uprobe_opcode_t opcode;
88996- int result;
88997+ long result;
88998
88999 pagefault_disable();
89000 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89001diff --git a/kernel/exit.c b/kernel/exit.c
89002index 6806c55..a5fb128 100644
89003--- a/kernel/exit.c
89004+++ b/kernel/exit.c
89005@@ -171,6 +171,10 @@ void release_task(struct task_struct *p)
89006 struct task_struct *leader;
89007 int zap_leader;
89008 repeat:
89009+#ifdef CONFIG_NET
89010+ gr_del_task_from_ip_table(p);
89011+#endif
89012+
89013 /* don't need to get the RCU readlock here - the process is dead and
89014 * can't be modifying its own credentials. But shut RCU-lockdep up */
89015 rcu_read_lock();
89016@@ -655,6 +659,8 @@ void do_exit(long code)
89017 int group_dead;
89018 TASKS_RCU(int tasks_rcu_i);
89019
89020+ set_fs(USER_DS);
89021+
89022 profile_task_exit(tsk);
89023
89024 WARN_ON(blk_needs_flush_plug(tsk));
89025@@ -671,7 +677,6 @@ void do_exit(long code)
89026 * mm_release()->clear_child_tid() from writing to a user-controlled
89027 * kernel address.
89028 */
89029- set_fs(USER_DS);
89030
89031 ptrace_event(PTRACE_EVENT_EXIT, code);
89032
89033@@ -729,6 +734,9 @@ void do_exit(long code)
89034 tsk->exit_code = code;
89035 taskstats_exit(tsk, group_dead);
89036
89037+ gr_acl_handle_psacct(tsk, code);
89038+ gr_acl_handle_exit();
89039+
89040 exit_mm(tsk);
89041
89042 if (group_dead)
89043@@ -848,7 +856,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89044 * Take down every thread in the group. This is called by fatal signals
89045 * as well as by sys_exit_group (below).
89046 */
89047-void
89048+__noreturn void
89049 do_group_exit(int exit_code)
89050 {
89051 struct signal_struct *sig = current->signal;
89052diff --git a/kernel/fork.c b/kernel/fork.c
89053index 4dc2dda..651add0 100644
89054--- a/kernel/fork.c
89055+++ b/kernel/fork.c
89056@@ -177,12 +177,54 @@ static void free_thread_info(struct thread_info *ti)
89057 void thread_info_cache_init(void)
89058 {
89059 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
89060- THREAD_SIZE, 0, NULL);
89061+ THREAD_SIZE, SLAB_USERCOPY, NULL);
89062 BUG_ON(thread_info_cache == NULL);
89063 }
89064 # endif
89065 #endif
89066
89067+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89068+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89069+ int node, void **lowmem_stack)
89070+{
89071+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89072+ void *ret = NULL;
89073+ unsigned int i;
89074+
89075+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89076+ if (*lowmem_stack == NULL)
89077+ goto out;
89078+
89079+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89080+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89081+
89082+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89083+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89084+ if (ret == NULL) {
89085+ free_thread_info(*lowmem_stack);
89086+ *lowmem_stack = NULL;
89087+ }
89088+
89089+out:
89090+ return ret;
89091+}
89092+
89093+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89094+{
89095+ unmap_process_stacks(tsk);
89096+}
89097+#else
89098+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89099+ int node, void **lowmem_stack)
89100+{
89101+ return alloc_thread_info_node(tsk, node);
89102+}
89103+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89104+{
89105+ free_thread_info(ti);
89106+}
89107+#endif
89108+
89109 /* SLAB cache for signal_struct structures (tsk->signal) */
89110 static struct kmem_cache *signal_cachep;
89111
89112@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89113 /* SLAB cache for mm_struct structures (tsk->mm) */
89114 static struct kmem_cache *mm_cachep;
89115
89116-static void account_kernel_stack(struct thread_info *ti, int account)
89117+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89118 {
89119+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89120+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89121+#else
89122 struct zone *zone = page_zone(virt_to_page(ti));
89123+#endif
89124
89125 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89126 }
89127
89128 void free_task(struct task_struct *tsk)
89129 {
89130- account_kernel_stack(tsk->stack, -1);
89131+ account_kernel_stack(tsk, tsk->stack, -1);
89132 arch_release_thread_info(tsk->stack);
89133- free_thread_info(tsk->stack);
89134+ gr_free_thread_info(tsk, tsk->stack);
89135 rt_mutex_debug_task_free(tsk);
89136 ftrace_graph_exit_task(tsk);
89137 put_seccomp_filter(tsk);
89138@@ -306,6 +352,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89139 {
89140 struct task_struct *tsk;
89141 struct thread_info *ti;
89142+ void *lowmem_stack;
89143 int node = tsk_fork_get_node(orig);
89144 int err;
89145
89146@@ -313,7 +360,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89147 if (!tsk)
89148 return NULL;
89149
89150- ti = alloc_thread_info_node(tsk, node);
89151+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89152 if (!ti)
89153 goto free_tsk;
89154
89155@@ -322,6 +369,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89156 goto free_ti;
89157
89158 tsk->stack = ti;
89159+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89160+ tsk->lowmem_stack = lowmem_stack;
89161+#endif
89162 #ifdef CONFIG_SECCOMP
89163 /*
89164 * We must handle setting up seccomp filters once we're under
89165@@ -338,7 +388,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89166 set_task_stack_end_magic(tsk);
89167
89168 #ifdef CONFIG_CC_STACKPROTECTOR
89169- tsk->stack_canary = get_random_int();
89170+ tsk->stack_canary = pax_get_random_long();
89171 #endif
89172
89173 /*
89174@@ -352,24 +402,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89175 tsk->splice_pipe = NULL;
89176 tsk->task_frag.page = NULL;
89177
89178- account_kernel_stack(ti, 1);
89179+ account_kernel_stack(tsk, ti, 1);
89180
89181 return tsk;
89182
89183 free_ti:
89184- free_thread_info(ti);
89185+ gr_free_thread_info(tsk, ti);
89186 free_tsk:
89187 free_task_struct(tsk);
89188 return NULL;
89189 }
89190
89191 #ifdef CONFIG_MMU
89192-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89193+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89194+{
89195+ struct vm_area_struct *tmp;
89196+ unsigned long charge;
89197+ struct file *file;
89198+ int retval;
89199+
89200+ charge = 0;
89201+ if (mpnt->vm_flags & VM_ACCOUNT) {
89202+ unsigned long len = vma_pages(mpnt);
89203+
89204+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89205+ goto fail_nomem;
89206+ charge = len;
89207+ }
89208+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89209+ if (!tmp)
89210+ goto fail_nomem;
89211+ *tmp = *mpnt;
89212+ tmp->vm_mm = mm;
89213+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89214+ retval = vma_dup_policy(mpnt, tmp);
89215+ if (retval)
89216+ goto fail_nomem_policy;
89217+ if (anon_vma_fork(tmp, mpnt))
89218+ goto fail_nomem_anon_vma_fork;
89219+ tmp->vm_flags &= ~VM_LOCKED;
89220+ tmp->vm_next = tmp->vm_prev = NULL;
89221+ tmp->vm_mirror = NULL;
89222+ file = tmp->vm_file;
89223+ if (file) {
89224+ struct inode *inode = file_inode(file);
89225+ struct address_space *mapping = file->f_mapping;
89226+
89227+ get_file(file);
89228+ if (tmp->vm_flags & VM_DENYWRITE)
89229+ atomic_dec(&inode->i_writecount);
89230+ i_mmap_lock_write(mapping);
89231+ if (tmp->vm_flags & VM_SHARED)
89232+ atomic_inc(&mapping->i_mmap_writable);
89233+ flush_dcache_mmap_lock(mapping);
89234+ /* insert tmp into the share list, just after mpnt */
89235+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89236+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89237+ else
89238+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89239+ flush_dcache_mmap_unlock(mapping);
89240+ i_mmap_unlock_write(mapping);
89241+ }
89242+
89243+ /*
89244+ * Clear hugetlb-related page reserves for children. This only
89245+ * affects MAP_PRIVATE mappings. Faults generated by the child
89246+ * are not guaranteed to succeed, even if read-only
89247+ */
89248+ if (is_vm_hugetlb_page(tmp))
89249+ reset_vma_resv_huge_pages(tmp);
89250+
89251+ return tmp;
89252+
89253+fail_nomem_anon_vma_fork:
89254+ mpol_put(vma_policy(tmp));
89255+fail_nomem_policy:
89256+ kmem_cache_free(vm_area_cachep, tmp);
89257+fail_nomem:
89258+ vm_unacct_memory(charge);
89259+ return NULL;
89260+}
89261+
89262+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89263 {
89264 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89265 struct rb_node **rb_link, *rb_parent;
89266 int retval;
89267- unsigned long charge;
89268
89269 uprobe_start_dup_mmap();
89270 down_write(&oldmm->mmap_sem);
89271@@ -397,55 +515,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89272
89273 prev = NULL;
89274 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89275- struct file *file;
89276-
89277 if (mpnt->vm_flags & VM_DONTCOPY) {
89278 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89279 -vma_pages(mpnt));
89280 continue;
89281 }
89282- charge = 0;
89283- if (mpnt->vm_flags & VM_ACCOUNT) {
89284- unsigned long len = vma_pages(mpnt);
89285-
89286- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89287- goto fail_nomem;
89288- charge = len;
89289- }
89290- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89291- if (!tmp)
89292- goto fail_nomem;
89293- *tmp = *mpnt;
89294- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89295- retval = vma_dup_policy(mpnt, tmp);
89296- if (retval)
89297- goto fail_nomem_policy;
89298- tmp->vm_mm = mm;
89299- if (anon_vma_fork(tmp, mpnt))
89300- goto fail_nomem_anon_vma_fork;
89301- tmp->vm_flags &= ~VM_LOCKED;
89302- tmp->vm_next = tmp->vm_prev = NULL;
89303- file = tmp->vm_file;
89304- if (file) {
89305- struct inode *inode = file_inode(file);
89306- struct address_space *mapping = file->f_mapping;
89307-
89308- get_file(file);
89309- if (tmp->vm_flags & VM_DENYWRITE)
89310- atomic_dec(&inode->i_writecount);
89311- i_mmap_lock_write(mapping);
89312- if (tmp->vm_flags & VM_SHARED)
89313- atomic_inc(&mapping->i_mmap_writable);
89314- flush_dcache_mmap_lock(mapping);
89315- /* insert tmp into the share list, just after mpnt */
89316- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89317- vma_nonlinear_insert(tmp,
89318- &mapping->i_mmap_nonlinear);
89319- else
89320- vma_interval_tree_insert_after(tmp, mpnt,
89321- &mapping->i_mmap);
89322- flush_dcache_mmap_unlock(mapping);
89323- i_mmap_unlock_write(mapping);
89324+ tmp = dup_vma(mm, oldmm, mpnt);
89325+ if (!tmp) {
89326+ retval = -ENOMEM;
89327+ goto out;
89328 }
89329
89330 /*
89331@@ -477,6 +555,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89332 if (retval)
89333 goto out;
89334 }
89335+
89336+#ifdef CONFIG_PAX_SEGMEXEC
89337+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89338+ struct vm_area_struct *mpnt_m;
89339+
89340+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89341+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89342+
89343+ if (!mpnt->vm_mirror)
89344+ continue;
89345+
89346+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89347+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89348+ mpnt->vm_mirror = mpnt_m;
89349+ } else {
89350+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89351+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89352+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89353+ mpnt->vm_mirror->vm_mirror = mpnt;
89354+ }
89355+ }
89356+ BUG_ON(mpnt_m);
89357+ }
89358+#endif
89359+
89360 /* a new mm has just been created */
89361 arch_dup_mmap(oldmm, mm);
89362 retval = 0;
89363@@ -486,14 +589,6 @@ out:
89364 up_write(&oldmm->mmap_sem);
89365 uprobe_end_dup_mmap();
89366 return retval;
89367-fail_nomem_anon_vma_fork:
89368- mpol_put(vma_policy(tmp));
89369-fail_nomem_policy:
89370- kmem_cache_free(vm_area_cachep, tmp);
89371-fail_nomem:
89372- retval = -ENOMEM;
89373- vm_unacct_memory(charge);
89374- goto out;
89375 }
89376
89377 static inline int mm_alloc_pgd(struct mm_struct *mm)
89378@@ -734,8 +829,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89379 return ERR_PTR(err);
89380
89381 mm = get_task_mm(task);
89382- if (mm && mm != current->mm &&
89383- !ptrace_may_access(task, mode)) {
89384+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89385+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89386 mmput(mm);
89387 mm = ERR_PTR(-EACCES);
89388 }
89389@@ -938,13 +1033,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89390 spin_unlock(&fs->lock);
89391 return -EAGAIN;
89392 }
89393- fs->users++;
89394+ atomic_inc(&fs->users);
89395 spin_unlock(&fs->lock);
89396 return 0;
89397 }
89398 tsk->fs = copy_fs_struct(fs);
89399 if (!tsk->fs)
89400 return -ENOMEM;
89401+ /* Carry through gr_chroot_dentry and is_chrooted instead
89402+ of recomputing it here. Already copied when the task struct
89403+ is duplicated. This allows pivot_root to not be treated as
89404+ a chroot
89405+ */
89406+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89407+
89408 return 0;
89409 }
89410
89411@@ -1182,7 +1284,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89412 * parts of the process environment (as per the clone
89413 * flags). The actual kick-off is left to the caller.
89414 */
89415-static struct task_struct *copy_process(unsigned long clone_flags,
89416+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89417 unsigned long stack_start,
89418 unsigned long stack_size,
89419 int __user *child_tidptr,
89420@@ -1253,6 +1355,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89421 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89422 #endif
89423 retval = -EAGAIN;
89424+
89425+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89426+
89427 if (atomic_read(&p->real_cred->user->processes) >=
89428 task_rlimit(p, RLIMIT_NPROC)) {
89429 if (p->real_cred->user != INIT_USER &&
89430@@ -1502,6 +1607,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89431 goto bad_fork_free_pid;
89432 }
89433
89434+ /* synchronizes with gr_set_acls()
89435+ we need to call this past the point of no return for fork()
89436+ */
89437+ gr_copy_label(p);
89438+
89439 if (likely(p->pid)) {
89440 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89441
89442@@ -1592,6 +1702,8 @@ bad_fork_cleanup_count:
89443 bad_fork_free:
89444 free_task(p);
89445 fork_out:
89446+ gr_log_forkfail(retval);
89447+
89448 return ERR_PTR(retval);
89449 }
89450
89451@@ -1653,6 +1765,7 @@ long do_fork(unsigned long clone_flags,
89452
89453 p = copy_process(clone_flags, stack_start, stack_size,
89454 child_tidptr, NULL, trace);
89455+ add_latent_entropy();
89456 /*
89457 * Do this prior waking up the new thread - the thread pointer
89458 * might get invalid after that point, if the thread exits quickly.
89459@@ -1669,6 +1782,8 @@ long do_fork(unsigned long clone_flags,
89460 if (clone_flags & CLONE_PARENT_SETTID)
89461 put_user(nr, parent_tidptr);
89462
89463+ gr_handle_brute_check();
89464+
89465 if (clone_flags & CLONE_VFORK) {
89466 p->vfork_done = &vfork;
89467 init_completion(&vfork);
89468@@ -1787,7 +1902,7 @@ void __init proc_caches_init(void)
89469 mm_cachep = kmem_cache_create("mm_struct",
89470 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89471 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89472- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89473+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89474 mmap_init();
89475 nsproxy_cache_init();
89476 }
89477@@ -1827,7 +1942,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89478 return 0;
89479
89480 /* don't need lock here; in the worst case we'll do useless copy */
89481- if (fs->users == 1)
89482+ if (atomic_read(&fs->users) == 1)
89483 return 0;
89484
89485 *new_fsp = copy_fs_struct(fs);
89486@@ -1939,7 +2054,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89487 fs = current->fs;
89488 spin_lock(&fs->lock);
89489 current->fs = new_fs;
89490- if (--fs->users)
89491+ gr_set_chroot_entries(current, &current->fs->root);
89492+ if (atomic_dec_return(&fs->users))
89493 new_fs = NULL;
89494 else
89495 new_fs = fs;
89496diff --git a/kernel/futex.c b/kernel/futex.c
89497index 63678b5..512f9af 100644
89498--- a/kernel/futex.c
89499+++ b/kernel/futex.c
89500@@ -201,7 +201,7 @@ struct futex_pi_state {
89501 atomic_t refcount;
89502
89503 union futex_key key;
89504-};
89505+} __randomize_layout;
89506
89507 /**
89508 * struct futex_q - The hashed futex queue entry, one per waiting task
89509@@ -235,7 +235,7 @@ struct futex_q {
89510 struct rt_mutex_waiter *rt_waiter;
89511 union futex_key *requeue_pi_key;
89512 u32 bitset;
89513-};
89514+} __randomize_layout;
89515
89516 static const struct futex_q futex_q_init = {
89517 /* list gets initialized in queue_me()*/
89518@@ -402,6 +402,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89519 struct page *page, *page_head;
89520 int err, ro = 0;
89521
89522+#ifdef CONFIG_PAX_SEGMEXEC
89523+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89524+ return -EFAULT;
89525+#endif
89526+
89527 /*
89528 * The futex address must be "naturally" aligned.
89529 */
89530@@ -601,7 +606,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89531
89532 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89533 {
89534- int ret;
89535+ unsigned long ret;
89536
89537 pagefault_disable();
89538 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89539@@ -3006,6 +3011,7 @@ static void __init futex_detect_cmpxchg(void)
89540 {
89541 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89542 u32 curval;
89543+ mm_segment_t oldfs;
89544
89545 /*
89546 * This will fail and we want it. Some arch implementations do
89547@@ -3017,8 +3023,11 @@ static void __init futex_detect_cmpxchg(void)
89548 * implementation, the non-functional ones will return
89549 * -ENOSYS.
89550 */
89551+ oldfs = get_fs();
89552+ set_fs(USER_DS);
89553 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89554 futex_cmpxchg_enabled = 1;
89555+ set_fs(oldfs);
89556 #endif
89557 }
89558
89559diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89560index 55c8c93..9ba7ad6 100644
89561--- a/kernel/futex_compat.c
89562+++ b/kernel/futex_compat.c
89563@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89564 return 0;
89565 }
89566
89567-static void __user *futex_uaddr(struct robust_list __user *entry,
89568+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89569 compat_long_t futex_offset)
89570 {
89571 compat_uptr_t base = ptr_to_compat(entry);
89572diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89573index b358a80..fc25240 100644
89574--- a/kernel/gcov/base.c
89575+++ b/kernel/gcov/base.c
89576@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89577 }
89578
89579 #ifdef CONFIG_MODULES
89580-static inline int within(void *addr, void *start, unsigned long size)
89581-{
89582- return ((addr >= start) && (addr < start + size));
89583-}
89584-
89585 /* Update list and generate events when modules are unloaded. */
89586 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89587 void *data)
89588@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89589
89590 /* Remove entries located in module from linked list. */
89591 while ((info = gcov_info_next(info))) {
89592- if (within(info, mod->module_core, mod->core_size)) {
89593+ if (within_module_core_rw((unsigned long)info, mod)) {
89594 gcov_info_unlink(prev, info);
89595 if (gcov_events_enabled)
89596 gcov_event(GCOV_REMOVE, info);
89597diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
89598index 8069237..fe712d0 100644
89599--- a/kernel/irq/manage.c
89600+++ b/kernel/irq/manage.c
89601@@ -871,7 +871,7 @@ static int irq_thread(void *data)
89602
89603 action_ret = handler_fn(desc, action);
89604 if (action_ret == IRQ_HANDLED)
89605- atomic_inc(&desc->threads_handled);
89606+ atomic_inc_unchecked(&desc->threads_handled);
89607
89608 wake_threads_waitq(desc);
89609 }
89610diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
89611index e2514b0..de3dfe0 100644
89612--- a/kernel/irq/spurious.c
89613+++ b/kernel/irq/spurious.c
89614@@ -337,7 +337,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
89615 * count. We just care about the count being
89616 * different than the one we saw before.
89617 */
89618- handled = atomic_read(&desc->threads_handled);
89619+ handled = atomic_read_unchecked(&desc->threads_handled);
89620 handled |= SPURIOUS_DEFERRED;
89621 if (handled != desc->threads_handled_last) {
89622 action_ret = IRQ_HANDLED;
89623diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89624index 9019f15..9a3c42e 100644
89625--- a/kernel/jump_label.c
89626+++ b/kernel/jump_label.c
89627@@ -14,6 +14,7 @@
89628 #include <linux/err.h>
89629 #include <linux/static_key.h>
89630 #include <linux/jump_label_ratelimit.h>
89631+#include <linux/mm.h>
89632
89633 #ifdef HAVE_JUMP_LABEL
89634
89635@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89636
89637 size = (((unsigned long)stop - (unsigned long)start)
89638 / sizeof(struct jump_entry));
89639+ pax_open_kernel();
89640 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89641+ pax_close_kernel();
89642 }
89643
89644 static void jump_label_update(struct static_key *key, int enable);
89645@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89646 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89647 struct jump_entry *iter;
89648
89649+ pax_open_kernel();
89650 for (iter = iter_start; iter < iter_stop; iter++) {
89651 if (within_module_init(iter->code, mod))
89652 iter->code = 0;
89653 }
89654+ pax_close_kernel();
89655 }
89656
89657 static int
89658diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89659index 5c5987f..bc502b0 100644
89660--- a/kernel/kallsyms.c
89661+++ b/kernel/kallsyms.c
89662@@ -11,6 +11,9 @@
89663 * Changed the compression method from stem compression to "table lookup"
89664 * compression (see scripts/kallsyms.c for a more complete description)
89665 */
89666+#ifdef CONFIG_GRKERNSEC_HIDESYM
89667+#define __INCLUDED_BY_HIDESYM 1
89668+#endif
89669 #include <linux/kallsyms.h>
89670 #include <linux/module.h>
89671 #include <linux/init.h>
89672@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89673
89674 static inline int is_kernel_inittext(unsigned long addr)
89675 {
89676+ if (system_state != SYSTEM_BOOTING)
89677+ return 0;
89678+
89679 if (addr >= (unsigned long)_sinittext
89680 && addr <= (unsigned long)_einittext)
89681 return 1;
89682 return 0;
89683 }
89684
89685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89686+#ifdef CONFIG_MODULES
89687+static inline int is_module_text(unsigned long addr)
89688+{
89689+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89690+ return 1;
89691+
89692+ addr = ktla_ktva(addr);
89693+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89694+}
89695+#else
89696+static inline int is_module_text(unsigned long addr)
89697+{
89698+ return 0;
89699+}
89700+#endif
89701+#endif
89702+
89703 static inline int is_kernel_text(unsigned long addr)
89704 {
89705 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89706@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89707
89708 static inline int is_kernel(unsigned long addr)
89709 {
89710+
89711+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89712+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89713+ return 1;
89714+
89715+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89716+#else
89717 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89718+#endif
89719+
89720 return 1;
89721 return in_gate_area_no_mm(addr);
89722 }
89723
89724 static int is_ksym_addr(unsigned long addr)
89725 {
89726+
89727+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89728+ if (is_module_text(addr))
89729+ return 0;
89730+#endif
89731+
89732 if (all_var)
89733 return is_kernel(addr);
89734
89735@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89736
89737 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89738 {
89739- iter->name[0] = '\0';
89740 iter->nameoff = get_symbol_offset(new_pos);
89741 iter->pos = new_pos;
89742 }
89743@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89744 {
89745 struct kallsym_iter *iter = m->private;
89746
89747+#ifdef CONFIG_GRKERNSEC_HIDESYM
89748+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89749+ return 0;
89750+#endif
89751+
89752 /* Some debugging symbols have no name. Ignore them. */
89753 if (!iter->name[0])
89754 return 0;
89755@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89756 */
89757 type = iter->exported ? toupper(iter->type) :
89758 tolower(iter->type);
89759+
89760 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89761 type, iter->name, iter->module_name);
89762 } else
89763diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89764index 0aa69ea..a7fcafb 100644
89765--- a/kernel/kcmp.c
89766+++ b/kernel/kcmp.c
89767@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89768 struct task_struct *task1, *task2;
89769 int ret;
89770
89771+#ifdef CONFIG_GRKERNSEC
89772+ return -ENOSYS;
89773+#endif
89774+
89775 rcu_read_lock();
89776
89777 /*
89778diff --git a/kernel/kexec.c b/kernel/kexec.c
89779index 9a8a01a..3c35dd6 100644
89780--- a/kernel/kexec.c
89781+++ b/kernel/kexec.c
89782@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89783 compat_ulong_t, flags)
89784 {
89785 struct compat_kexec_segment in;
89786- struct kexec_segment out, __user *ksegments;
89787+ struct kexec_segment out;
89788+ struct kexec_segment __user *ksegments;
89789 unsigned long i, result;
89790
89791 /* Don't allow clients that don't understand the native
89792diff --git a/kernel/kmod.c b/kernel/kmod.c
89793index 2777f40..a689506 100644
89794--- a/kernel/kmod.c
89795+++ b/kernel/kmod.c
89796@@ -68,7 +68,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89797 kfree(info->argv);
89798 }
89799
89800-static int call_modprobe(char *module_name, int wait)
89801+static int call_modprobe(char *module_name, char *module_param, int wait)
89802 {
89803 struct subprocess_info *info;
89804 static char *envp[] = {
89805@@ -78,7 +78,7 @@ static int call_modprobe(char *module_name, int wait)
89806 NULL
89807 };
89808
89809- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89810+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89811 if (!argv)
89812 goto out;
89813
89814@@ -90,7 +90,8 @@ static int call_modprobe(char *module_name, int wait)
89815 argv[1] = "-q";
89816 argv[2] = "--";
89817 argv[3] = module_name; /* check free_modprobe_argv() */
89818- argv[4] = NULL;
89819+ argv[4] = module_param;
89820+ argv[5] = NULL;
89821
89822 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89823 NULL, free_modprobe_argv, NULL);
89824@@ -122,9 +123,8 @@ out:
89825 * If module auto-loading support is disabled then this function
89826 * becomes a no-operation.
89827 */
89828-int __request_module(bool wait, const char *fmt, ...)
89829+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89830 {
89831- va_list args;
89832 char module_name[MODULE_NAME_LEN];
89833 unsigned int max_modprobes;
89834 int ret;
89835@@ -143,9 +143,7 @@ int __request_module(bool wait, const char *fmt, ...)
89836 if (!modprobe_path[0])
89837 return 0;
89838
89839- va_start(args, fmt);
89840- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89841- va_end(args);
89842+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89843 if (ret >= MODULE_NAME_LEN)
89844 return -ENAMETOOLONG;
89845
89846@@ -153,6 +151,20 @@ int __request_module(bool wait, const char *fmt, ...)
89847 if (ret)
89848 return ret;
89849
89850+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89851+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89852+ /* hack to workaround consolekit/udisks stupidity */
89853+ read_lock(&tasklist_lock);
89854+ if (!strcmp(current->comm, "mount") &&
89855+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89856+ read_unlock(&tasklist_lock);
89857+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89858+ return -EPERM;
89859+ }
89860+ read_unlock(&tasklist_lock);
89861+ }
89862+#endif
89863+
89864 /* If modprobe needs a service that is in a module, we get a recursive
89865 * loop. Limit the number of running kmod threads to max_threads/2 or
89866 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89867@@ -181,16 +193,61 @@ int __request_module(bool wait, const char *fmt, ...)
89868
89869 trace_module_request(module_name, wait, _RET_IP_);
89870
89871- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89872+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89873
89874 atomic_dec(&kmod_concurrent);
89875 return ret;
89876 }
89877+
89878+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89879+{
89880+ va_list args;
89881+ int ret;
89882+
89883+ va_start(args, fmt);
89884+ ret = ____request_module(wait, module_param, fmt, args);
89885+ va_end(args);
89886+
89887+ return ret;
89888+}
89889+
89890+int __request_module(bool wait, const char *fmt, ...)
89891+{
89892+ va_list args;
89893+ int ret;
89894+
89895+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89896+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89897+ char module_param[MODULE_NAME_LEN];
89898+
89899+ memset(module_param, 0, sizeof(module_param));
89900+
89901+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89902+
89903+ va_start(args, fmt);
89904+ ret = ____request_module(wait, module_param, fmt, args);
89905+ va_end(args);
89906+
89907+ return ret;
89908+ }
89909+#endif
89910+
89911+ va_start(args, fmt);
89912+ ret = ____request_module(wait, NULL, fmt, args);
89913+ va_end(args);
89914+
89915+ return ret;
89916+}
89917+
89918 EXPORT_SYMBOL(__request_module);
89919 #endif /* CONFIG_MODULES */
89920
89921 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
89922 {
89923+#ifdef CONFIG_GRKERNSEC
89924+ kfree(info->path);
89925+ info->path = info->origpath;
89926+#endif
89927 if (info->cleanup)
89928 (*info->cleanup)(info);
89929 kfree(info);
89930@@ -232,6 +289,21 @@ static int ____call_usermodehelper(void *data)
89931 */
89932 set_user_nice(current, 0);
89933
89934+#ifdef CONFIG_GRKERNSEC
89935+ /* this is race-free as far as userland is concerned as we copied
89936+ out the path to be used prior to this point and are now operating
89937+ on that copy
89938+ */
89939+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
89940+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
89941+ strncmp(sub_info->path, "/usr/libexec/", 13) && strncmp(sub_info->path, "/usr/bin/", 9) &&
89942+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
89943+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of permitted system paths\n", sub_info->path);
89944+ retval = -EPERM;
89945+ goto out;
89946+ }
89947+#endif
89948+
89949 retval = -ENOMEM;
89950 new = prepare_kernel_cred(current);
89951 if (!new)
89952@@ -254,8 +326,8 @@ static int ____call_usermodehelper(void *data)
89953 commit_creds(new);
89954
89955 retval = do_execve(getname_kernel(sub_info->path),
89956- (const char __user *const __user *)sub_info->argv,
89957- (const char __user *const __user *)sub_info->envp);
89958+ (const char __user *const __force_user *)sub_info->argv,
89959+ (const char __user *const __force_user *)sub_info->envp);
89960 out:
89961 sub_info->retval = retval;
89962 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
89963@@ -288,7 +360,7 @@ static int wait_for_helper(void *data)
89964 *
89965 * Thus the __user pointer cast is valid here.
89966 */
89967- sys_wait4(pid, (int __user *)&ret, 0, NULL);
89968+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
89969
89970 /*
89971 * If ret is 0, either ____call_usermodehelper failed and the
89972@@ -510,7 +582,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
89973 goto out;
89974
89975 INIT_WORK(&sub_info->work, __call_usermodehelper);
89976+#ifdef CONFIG_GRKERNSEC
89977+ sub_info->origpath = path;
89978+ sub_info->path = kstrdup(path, gfp_mask);
89979+#else
89980 sub_info->path = path;
89981+#endif
89982 sub_info->argv = argv;
89983 sub_info->envp = envp;
89984
89985@@ -612,7 +689,7 @@ EXPORT_SYMBOL(call_usermodehelper);
89986 static int proc_cap_handler(struct ctl_table *table, int write,
89987 void __user *buffer, size_t *lenp, loff_t *ppos)
89988 {
89989- struct ctl_table t;
89990+ ctl_table_no_const t;
89991 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
89992 kernel_cap_t new_cap;
89993 int err, i;
89994diff --git a/kernel/kprobes.c b/kernel/kprobes.c
89995index ee61992..62142b1 100644
89996--- a/kernel/kprobes.c
89997+++ b/kernel/kprobes.c
89998@@ -31,6 +31,9 @@
89999 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90000 * <prasanna@in.ibm.com> added function-return probes.
90001 */
90002+#ifdef CONFIG_GRKERNSEC_HIDESYM
90003+#define __INCLUDED_BY_HIDESYM 1
90004+#endif
90005 #include <linux/kprobes.h>
90006 #include <linux/hash.h>
90007 #include <linux/init.h>
90008@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90009
90010 static void *alloc_insn_page(void)
90011 {
90012- return module_alloc(PAGE_SIZE);
90013+ return module_alloc_exec(PAGE_SIZE);
90014 }
90015
90016 static void free_insn_page(void *page)
90017 {
90018- module_memfree(page);
90019+ module_memfree_exec(page);
90020 }
90021
90022 struct kprobe_insn_cache kprobe_insn_slots = {
90023@@ -2191,11 +2194,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90024 kprobe_type = "k";
90025
90026 if (sym)
90027- seq_printf(pi, "%p %s %s+0x%x %s ",
90028+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90029 p->addr, kprobe_type, sym, offset,
90030 (modname ? modname : " "));
90031 else
90032- seq_printf(pi, "%p %s %p ",
90033+ seq_printf(pi, "%pK %s %pK ",
90034 p->addr, kprobe_type, p->addr);
90035
90036 if (!pp)
90037diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90038index 6683cce..daf8999 100644
90039--- a/kernel/ksysfs.c
90040+++ b/kernel/ksysfs.c
90041@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90042 {
90043 if (count+1 > UEVENT_HELPER_PATH_LEN)
90044 return -ENOENT;
90045+ if (!capable(CAP_SYS_ADMIN))
90046+ return -EPERM;
90047 memcpy(uevent_helper, buf, count);
90048 uevent_helper[count] = '\0';
90049 if (count && uevent_helper[count-1] == '\n')
90050@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90051 return count;
90052 }
90053
90054-static struct bin_attribute notes_attr = {
90055+static bin_attribute_no_const notes_attr __read_only = {
90056 .attr = {
90057 .name = "notes",
90058 .mode = S_IRUGO,
90059diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90060index 88d0d44..e9ce0ee 100644
90061--- a/kernel/locking/lockdep.c
90062+++ b/kernel/locking/lockdep.c
90063@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90064 end = (unsigned long) &_end,
90065 addr = (unsigned long) obj;
90066
90067+#ifdef CONFIG_PAX_KERNEXEC
90068+ start = ktla_ktva(start);
90069+#endif
90070+
90071 /*
90072 * static variable?
90073 */
90074@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90075 if (!static_obj(lock->key)) {
90076 debug_locks_off();
90077 printk("INFO: trying to register non-static key.\n");
90078+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90079 printk("the code is fine but needs lockdep annotation.\n");
90080 printk("turning off the locking correctness validator.\n");
90081 dump_stack();
90082@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90083 if (!class)
90084 return 0;
90085 }
90086- atomic_inc((atomic_t *)&class->ops);
90087+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90088 if (very_verbose(class)) {
90089 printk("\nacquire class [%p] %s", class->key, class->name);
90090 if (class->name_version > 1)
90091diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90092index ef43ac4..2720dfa 100644
90093--- a/kernel/locking/lockdep_proc.c
90094+++ b/kernel/locking/lockdep_proc.c
90095@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90096 return 0;
90097 }
90098
90099- seq_printf(m, "%p", class->key);
90100+ seq_printf(m, "%pK", class->key);
90101 #ifdef CONFIG_DEBUG_LOCKDEP
90102 seq_printf(m, " OPS:%8ld", class->ops);
90103 #endif
90104@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90105
90106 list_for_each_entry(entry, &class->locks_after, entry) {
90107 if (entry->distance == 1) {
90108- seq_printf(m, " -> [%p] ", entry->class->key);
90109+ seq_printf(m, " -> [%pK] ", entry->class->key);
90110 print_name(m, entry->class);
90111 seq_puts(m, "\n");
90112 }
90113@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90114 if (!class->key)
90115 continue;
90116
90117- seq_printf(m, "[%p] ", class->key);
90118+ seq_printf(m, "[%pK] ", class->key);
90119 print_name(m, class);
90120 seq_puts(m, "\n");
90121 }
90122@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90123 if (!i)
90124 seq_line(m, '-', 40-namelen, namelen);
90125
90126- snprintf(ip, sizeof(ip), "[<%p>]",
90127+ snprintf(ip, sizeof(ip), "[<%pK>]",
90128 (void *)class->contention_point[i]);
90129 seq_printf(m, "%40s %14lu %29s %pS\n",
90130 name, stats->contention_point[i],
90131@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90132 if (!i)
90133 seq_line(m, '-', 40-namelen, namelen);
90134
90135- snprintf(ip, sizeof(ip), "[<%p>]",
90136+ snprintf(ip, sizeof(ip), "[<%pK>]",
90137 (void *)class->contending_point[i]);
90138 seq_printf(m, "%40s %14lu %29s %pS\n",
90139 name, stats->contending_point[i],
90140diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90141index 9887a90..0cd2b1d 100644
90142--- a/kernel/locking/mcs_spinlock.c
90143+++ b/kernel/locking/mcs_spinlock.c
90144@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90145
90146 prev = decode_cpu(old);
90147 node->prev = prev;
90148- ACCESS_ONCE(prev->next) = node;
90149+ ACCESS_ONCE_RW(prev->next) = node;
90150
90151 /*
90152 * Normally @prev is untouchable after the above store; because at that
90153@@ -172,8 +172,8 @@ unqueue:
90154 * it will wait in Step-A.
90155 */
90156
90157- ACCESS_ONCE(next->prev) = prev;
90158- ACCESS_ONCE(prev->next) = next;
90159+ ACCESS_ONCE_RW(next->prev) = prev;
90160+ ACCESS_ONCE_RW(prev->next) = next;
90161
90162 return false;
90163 }
90164@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90165 node = this_cpu_ptr(&osq_node);
90166 next = xchg(&node->next, NULL);
90167 if (next) {
90168- ACCESS_ONCE(next->locked) = 1;
90169+ ACCESS_ONCE_RW(next->locked) = 1;
90170 return;
90171 }
90172
90173 next = osq_wait_next(lock, node, NULL);
90174 if (next)
90175- ACCESS_ONCE(next->locked) = 1;
90176+ ACCESS_ONCE_RW(next->locked) = 1;
90177 }
90178
90179 #endif
90180diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90181index 4d60986..5d351c1 100644
90182--- a/kernel/locking/mcs_spinlock.h
90183+++ b/kernel/locking/mcs_spinlock.h
90184@@ -78,7 +78,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90185 */
90186 return;
90187 }
90188- ACCESS_ONCE(prev->next) = node;
90189+ ACCESS_ONCE_RW(prev->next) = node;
90190
90191 /* Wait until the lock holder passes the lock down. */
90192 arch_mcs_spin_lock_contended(&node->locked);
90193diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90194index 3ef3736..9c951fa 100644
90195--- a/kernel/locking/mutex-debug.c
90196+++ b/kernel/locking/mutex-debug.c
90197@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90198 }
90199
90200 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90201- struct thread_info *ti)
90202+ struct task_struct *task)
90203 {
90204 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90205
90206 /* Mark the current thread as blocked on the lock: */
90207- ti->task->blocked_on = waiter;
90208+ task->blocked_on = waiter;
90209 }
90210
90211 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90212- struct thread_info *ti)
90213+ struct task_struct *task)
90214 {
90215 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90216- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90217- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90218- ti->task->blocked_on = NULL;
90219+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90220+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90221+ task->blocked_on = NULL;
90222
90223 list_del_init(&waiter->list);
90224 waiter->task = NULL;
90225diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90226index 0799fd3..d06ae3b 100644
90227--- a/kernel/locking/mutex-debug.h
90228+++ b/kernel/locking/mutex-debug.h
90229@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90230 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90231 extern void debug_mutex_add_waiter(struct mutex *lock,
90232 struct mutex_waiter *waiter,
90233- struct thread_info *ti);
90234+ struct task_struct *task);
90235 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90236- struct thread_info *ti);
90237+ struct task_struct *task);
90238 extern void debug_mutex_unlock(struct mutex *lock);
90239 extern void debug_mutex_init(struct mutex *lock, const char *name,
90240 struct lock_class_key *key);
90241diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90242index 4541951..39fe90a 100644
90243--- a/kernel/locking/mutex.c
90244+++ b/kernel/locking/mutex.c
90245@@ -524,7 +524,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90246 goto skip_wait;
90247
90248 debug_mutex_lock_common(lock, &waiter);
90249- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90250+ debug_mutex_add_waiter(lock, &waiter, task);
90251
90252 /* add waiting tasks to the end of the waitqueue (FIFO): */
90253 list_add_tail(&waiter.list, &lock->wait_list);
90254@@ -569,7 +569,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
90255 schedule_preempt_disabled();
90256 spin_lock_mutex(&lock->wait_lock, flags);
90257 }
90258- mutex_remove_waiter(lock, &waiter, current_thread_info());
90259+ mutex_remove_waiter(lock, &waiter, task);
90260 /* set it to 0 if there are no waiters left: */
90261 if (likely(list_empty(&lock->wait_list)))
90262 atomic_set(&lock->count, 0);
90263@@ -606,7 +606,7 @@ skip_wait:
90264 return 0;
90265
90266 err:
90267- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90268+ mutex_remove_waiter(lock, &waiter, task);
90269 spin_unlock_mutex(&lock->wait_lock, flags);
90270 debug_mutex_free_waiter(&waiter);
90271 mutex_release(&lock->dep_map, 1, ip);
90272diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90273index 1d96dd0..994ff19 100644
90274--- a/kernel/locking/rtmutex-tester.c
90275+++ b/kernel/locking/rtmutex-tester.c
90276@@ -22,7 +22,7 @@
90277 #define MAX_RT_TEST_MUTEXES 8
90278
90279 static spinlock_t rttest_lock;
90280-static atomic_t rttest_event;
90281+static atomic_unchecked_t rttest_event;
90282
90283 struct test_thread_data {
90284 int opcode;
90285@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90286
90287 case RTTEST_LOCKCONT:
90288 td->mutexes[td->opdata] = 1;
90289- td->event = atomic_add_return(1, &rttest_event);
90290+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90291 return 0;
90292
90293 case RTTEST_RESET:
90294@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90295 return 0;
90296
90297 case RTTEST_RESETEVENT:
90298- atomic_set(&rttest_event, 0);
90299+ atomic_set_unchecked(&rttest_event, 0);
90300 return 0;
90301
90302 default:
90303@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90304 return ret;
90305
90306 td->mutexes[id] = 1;
90307- td->event = atomic_add_return(1, &rttest_event);
90308+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90309 rt_mutex_lock(&mutexes[id]);
90310- td->event = atomic_add_return(1, &rttest_event);
90311+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90312 td->mutexes[id] = 4;
90313 return 0;
90314
90315@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90316 return ret;
90317
90318 td->mutexes[id] = 1;
90319- td->event = atomic_add_return(1, &rttest_event);
90320+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90321 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90322- td->event = atomic_add_return(1, &rttest_event);
90323+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90324 td->mutexes[id] = ret ? 0 : 4;
90325 return ret ? -EINTR : 0;
90326
90327@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90328 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90329 return ret;
90330
90331- td->event = atomic_add_return(1, &rttest_event);
90332+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90333 rt_mutex_unlock(&mutexes[id]);
90334- td->event = atomic_add_return(1, &rttest_event);
90335+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90336 td->mutexes[id] = 0;
90337 return 0;
90338
90339@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90340 break;
90341
90342 td->mutexes[dat] = 2;
90343- td->event = atomic_add_return(1, &rttest_event);
90344+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90345 break;
90346
90347 default:
90348@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90349 return;
90350
90351 td->mutexes[dat] = 3;
90352- td->event = atomic_add_return(1, &rttest_event);
90353+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90354 break;
90355
90356 case RTTEST_LOCKNOWAIT:
90357@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90358 return;
90359
90360 td->mutexes[dat] = 1;
90361- td->event = atomic_add_return(1, &rttest_event);
90362+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90363 return;
90364
90365 default:
90366diff --git a/kernel/module.c b/kernel/module.c
90367index d856e96..b82225c 100644
90368--- a/kernel/module.c
90369+++ b/kernel/module.c
90370@@ -59,6 +59,7 @@
90371 #include <linux/jump_label.h>
90372 #include <linux/pfn.h>
90373 #include <linux/bsearch.h>
90374+#include <linux/grsecurity.h>
90375 #include <uapi/linux/module.h>
90376 #include "module-internal.h"
90377
90378@@ -155,7 +156,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90379
90380 /* Bounds of module allocation, for speeding __module_address.
90381 * Protected by module_mutex. */
90382-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90383+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90384+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90385
90386 int register_module_notifier(struct notifier_block *nb)
90387 {
90388@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90389 return true;
90390
90391 list_for_each_entry_rcu(mod, &modules, list) {
90392- struct symsearch arr[] = {
90393+ struct symsearch modarr[] = {
90394 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90395 NOT_GPL_ONLY, false },
90396 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90397@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90398 if (mod->state == MODULE_STATE_UNFORMED)
90399 continue;
90400
90401- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90402+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90403 return true;
90404 }
90405 return false;
90406@@ -487,7 +489,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90407 if (!pcpusec->sh_size)
90408 return 0;
90409
90410- if (align > PAGE_SIZE) {
90411+ if (align-1 >= PAGE_SIZE) {
90412 pr_warn("%s: per-cpu alignment %li > %li\n",
90413 mod->name, align, PAGE_SIZE);
90414 align = PAGE_SIZE;
90415@@ -1053,7 +1055,7 @@ struct module_attribute module_uevent =
90416 static ssize_t show_coresize(struct module_attribute *mattr,
90417 struct module_kobject *mk, char *buffer)
90418 {
90419- return sprintf(buffer, "%u\n", mk->mod->core_size);
90420+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90421 }
90422
90423 static struct module_attribute modinfo_coresize =
90424@@ -1062,7 +1064,7 @@ static struct module_attribute modinfo_coresize =
90425 static ssize_t show_initsize(struct module_attribute *mattr,
90426 struct module_kobject *mk, char *buffer)
90427 {
90428- return sprintf(buffer, "%u\n", mk->mod->init_size);
90429+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90430 }
90431
90432 static struct module_attribute modinfo_initsize =
90433@@ -1154,12 +1156,29 @@ static int check_version(Elf_Shdr *sechdrs,
90434 goto bad_version;
90435 }
90436
90437+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90438+ /*
90439+ * avoid potentially printing jibberish on attempted load
90440+ * of a module randomized with a different seed
90441+ */
90442+ pr_warn("no symbol version for %s\n", symname);
90443+#else
90444 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90445+#endif
90446 return 0;
90447
90448 bad_version:
90449+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90450+ /*
90451+ * avoid potentially printing jibberish on attempted load
90452+ * of a module randomized with a different seed
90453+ */
90454+ pr_warn("attempted module disagrees about version of symbol %s\n",
90455+ symname);
90456+#else
90457 pr_warn("%s: disagrees about version of symbol %s\n",
90458 mod->name, symname);
90459+#endif
90460 return 0;
90461 }
90462
90463@@ -1275,7 +1294,7 @@ resolve_symbol_wait(struct module *mod,
90464 */
90465 #ifdef CONFIG_SYSFS
90466
90467-#ifdef CONFIG_KALLSYMS
90468+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90469 static inline bool sect_empty(const Elf_Shdr *sect)
90470 {
90471 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90472@@ -1413,7 +1432,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90473 {
90474 unsigned int notes, loaded, i;
90475 struct module_notes_attrs *notes_attrs;
90476- struct bin_attribute *nattr;
90477+ bin_attribute_no_const *nattr;
90478
90479 /* failed to create section attributes, so can't create notes */
90480 if (!mod->sect_attrs)
90481@@ -1525,7 +1544,7 @@ static void del_usage_links(struct module *mod)
90482 static int module_add_modinfo_attrs(struct module *mod)
90483 {
90484 struct module_attribute *attr;
90485- struct module_attribute *temp_attr;
90486+ module_attribute_no_const *temp_attr;
90487 int error = 0;
90488 int i;
90489
90490@@ -1735,21 +1754,21 @@ static void set_section_ro_nx(void *base,
90491
90492 static void unset_module_core_ro_nx(struct module *mod)
90493 {
90494- set_page_attributes(mod->module_core + mod->core_text_size,
90495- mod->module_core + mod->core_size,
90496+ set_page_attributes(mod->module_core_rw,
90497+ mod->module_core_rw + mod->core_size_rw,
90498 set_memory_x);
90499- set_page_attributes(mod->module_core,
90500- mod->module_core + mod->core_ro_size,
90501+ set_page_attributes(mod->module_core_rx,
90502+ mod->module_core_rx + mod->core_size_rx,
90503 set_memory_rw);
90504 }
90505
90506 static void unset_module_init_ro_nx(struct module *mod)
90507 {
90508- set_page_attributes(mod->module_init + mod->init_text_size,
90509- mod->module_init + mod->init_size,
90510+ set_page_attributes(mod->module_init_rw,
90511+ mod->module_init_rw + mod->init_size_rw,
90512 set_memory_x);
90513- set_page_attributes(mod->module_init,
90514- mod->module_init + mod->init_ro_size,
90515+ set_page_attributes(mod->module_init_rx,
90516+ mod->module_init_rx + mod->init_size_rx,
90517 set_memory_rw);
90518 }
90519
90520@@ -1762,14 +1781,14 @@ void set_all_modules_text_rw(void)
90521 list_for_each_entry_rcu(mod, &modules, list) {
90522 if (mod->state == MODULE_STATE_UNFORMED)
90523 continue;
90524- if ((mod->module_core) && (mod->core_text_size)) {
90525- set_page_attributes(mod->module_core,
90526- mod->module_core + mod->core_text_size,
90527+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90528+ set_page_attributes(mod->module_core_rx,
90529+ mod->module_core_rx + mod->core_size_rx,
90530 set_memory_rw);
90531 }
90532- if ((mod->module_init) && (mod->init_text_size)) {
90533- set_page_attributes(mod->module_init,
90534- mod->module_init + mod->init_text_size,
90535+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90536+ set_page_attributes(mod->module_init_rx,
90537+ mod->module_init_rx + mod->init_size_rx,
90538 set_memory_rw);
90539 }
90540 }
90541@@ -1785,14 +1804,14 @@ void set_all_modules_text_ro(void)
90542 list_for_each_entry_rcu(mod, &modules, list) {
90543 if (mod->state == MODULE_STATE_UNFORMED)
90544 continue;
90545- if ((mod->module_core) && (mod->core_text_size)) {
90546- set_page_attributes(mod->module_core,
90547- mod->module_core + mod->core_text_size,
90548+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90549+ set_page_attributes(mod->module_core_rx,
90550+ mod->module_core_rx + mod->core_size_rx,
90551 set_memory_ro);
90552 }
90553- if ((mod->module_init) && (mod->init_text_size)) {
90554- set_page_attributes(mod->module_init,
90555- mod->module_init + mod->init_text_size,
90556+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90557+ set_page_attributes(mod->module_init_rx,
90558+ mod->module_init_rx + mod->init_size_rx,
90559 set_memory_ro);
90560 }
90561 }
90562@@ -1801,7 +1820,15 @@ void set_all_modules_text_ro(void)
90563 #else
90564 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
90565 static void unset_module_core_ro_nx(struct module *mod) { }
90566-static void unset_module_init_ro_nx(struct module *mod) { }
90567+static void unset_module_init_ro_nx(struct module *mod)
90568+{
90569+
90570+#ifdef CONFIG_PAX_KERNEXEC
90571+ set_memory_nx((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90572+ set_memory_rw((unsigned long)mod->module_init_rx, PFN_UP(mod->init_size_rx));
90573+#endif
90574+
90575+}
90576 #endif
90577
90578 void __weak module_memfree(void *module_region)
90579@@ -1855,16 +1882,19 @@ static void free_module(struct module *mod)
90580 /* This may be NULL, but that's OK */
90581 unset_module_init_ro_nx(mod);
90582 module_arch_freeing_init(mod);
90583- module_memfree(mod->module_init);
90584+ module_memfree(mod->module_init_rw);
90585+ module_memfree_exec(mod->module_init_rx);
90586 kfree(mod->args);
90587 percpu_modfree(mod);
90588
90589 /* Free lock-classes: */
90590- lockdep_free_key_range(mod->module_core, mod->core_size);
90591+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90592+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90593
90594 /* Finally, free the core (containing the module structure) */
90595 unset_module_core_ro_nx(mod);
90596- module_memfree(mod->module_core);
90597+ module_memfree_exec(mod->module_core_rx);
90598+ module_memfree(mod->module_core_rw);
90599
90600 #ifdef CONFIG_MPU
90601 update_protections(current->mm);
90602@@ -1933,9 +1963,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90603 int ret = 0;
90604 const struct kernel_symbol *ksym;
90605
90606+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90607+ int is_fs_load = 0;
90608+ int register_filesystem_found = 0;
90609+ char *p;
90610+
90611+ p = strstr(mod->args, "grsec_modharden_fs");
90612+ if (p) {
90613+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90614+ /* copy \0 as well */
90615+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90616+ is_fs_load = 1;
90617+ }
90618+#endif
90619+
90620 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90621 const char *name = info->strtab + sym[i].st_name;
90622
90623+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90624+ /* it's a real shame this will never get ripped and copied
90625+ upstream! ;(
90626+ */
90627+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90628+ register_filesystem_found = 1;
90629+#endif
90630+
90631 switch (sym[i].st_shndx) {
90632 case SHN_COMMON:
90633 /* Ignore common symbols */
90634@@ -1960,7 +2012,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90635 ksym = resolve_symbol_wait(mod, info, name);
90636 /* Ok if resolved. */
90637 if (ksym && !IS_ERR(ksym)) {
90638+ pax_open_kernel();
90639 sym[i].st_value = ksym->value;
90640+ pax_close_kernel();
90641 break;
90642 }
90643
90644@@ -1979,11 +2033,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90645 secbase = (unsigned long)mod_percpu(mod);
90646 else
90647 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90648+ pax_open_kernel();
90649 sym[i].st_value += secbase;
90650+ pax_close_kernel();
90651 break;
90652 }
90653 }
90654
90655+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90656+ if (is_fs_load && !register_filesystem_found) {
90657+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90658+ ret = -EPERM;
90659+ }
90660+#endif
90661+
90662 return ret;
90663 }
90664
90665@@ -2067,22 +2130,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90666 || s->sh_entsize != ~0UL
90667 || strstarts(sname, ".init"))
90668 continue;
90669- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90670+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90671+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90672+ else
90673+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90674 pr_debug("\t%s\n", sname);
90675 }
90676- switch (m) {
90677- case 0: /* executable */
90678- mod->core_size = debug_align(mod->core_size);
90679- mod->core_text_size = mod->core_size;
90680- break;
90681- case 1: /* RO: text and ro-data */
90682- mod->core_size = debug_align(mod->core_size);
90683- mod->core_ro_size = mod->core_size;
90684- break;
90685- case 3: /* whole core */
90686- mod->core_size = debug_align(mod->core_size);
90687- break;
90688- }
90689 }
90690
90691 pr_debug("Init section allocation order:\n");
90692@@ -2096,23 +2149,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90693 || s->sh_entsize != ~0UL
90694 || !strstarts(sname, ".init"))
90695 continue;
90696- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90697- | INIT_OFFSET_MASK);
90698+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90699+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90700+ else
90701+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90702+ s->sh_entsize |= INIT_OFFSET_MASK;
90703 pr_debug("\t%s\n", sname);
90704 }
90705- switch (m) {
90706- case 0: /* executable */
90707- mod->init_size = debug_align(mod->init_size);
90708- mod->init_text_size = mod->init_size;
90709- break;
90710- case 1: /* RO: text and ro-data */
90711- mod->init_size = debug_align(mod->init_size);
90712- mod->init_ro_size = mod->init_size;
90713- break;
90714- case 3: /* whole init */
90715- mod->init_size = debug_align(mod->init_size);
90716- break;
90717- }
90718 }
90719 }
90720
90721@@ -2285,7 +2328,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90722
90723 /* Put symbol section at end of init part of module. */
90724 symsect->sh_flags |= SHF_ALLOC;
90725- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90726+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90727 info->index.sym) | INIT_OFFSET_MASK;
90728 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90729
90730@@ -2302,13 +2345,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90731 }
90732
90733 /* Append room for core symbols at end of core part. */
90734- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90735- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90736- mod->core_size += strtab_size;
90737+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90738+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90739+ mod->core_size_rx += strtab_size;
90740
90741 /* Put string table section at end of init part of module. */
90742 strsect->sh_flags |= SHF_ALLOC;
90743- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90744+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90745 info->index.str) | INIT_OFFSET_MASK;
90746 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90747 }
90748@@ -2326,12 +2369,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90749 /* Make sure we get permanent strtab: don't use info->strtab. */
90750 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90751
90752+ pax_open_kernel();
90753+
90754 /* Set types up while we still have access to sections. */
90755 for (i = 0; i < mod->num_symtab; i++)
90756 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90757
90758- mod->core_symtab = dst = mod->module_core + info->symoffs;
90759- mod->core_strtab = s = mod->module_core + info->stroffs;
90760+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90761+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90762 src = mod->symtab;
90763 for (ndst = i = 0; i < mod->num_symtab; i++) {
90764 if (i == 0 ||
90765@@ -2343,6 +2388,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90766 }
90767 }
90768 mod->core_num_syms = ndst;
90769+
90770+ pax_close_kernel();
90771 }
90772 #else
90773 static inline void layout_symtab(struct module *mod, struct load_info *info)
90774@@ -2376,17 +2423,33 @@ void * __weak module_alloc(unsigned long size)
90775 return vmalloc_exec(size);
90776 }
90777
90778-static void *module_alloc_update_bounds(unsigned long size)
90779+static void *module_alloc_update_bounds_rw(unsigned long size)
90780 {
90781 void *ret = module_alloc(size);
90782
90783 if (ret) {
90784 mutex_lock(&module_mutex);
90785 /* Update module bounds. */
90786- if ((unsigned long)ret < module_addr_min)
90787- module_addr_min = (unsigned long)ret;
90788- if ((unsigned long)ret + size > module_addr_max)
90789- module_addr_max = (unsigned long)ret + size;
90790+ if ((unsigned long)ret < module_addr_min_rw)
90791+ module_addr_min_rw = (unsigned long)ret;
90792+ if ((unsigned long)ret + size > module_addr_max_rw)
90793+ module_addr_max_rw = (unsigned long)ret + size;
90794+ mutex_unlock(&module_mutex);
90795+ }
90796+ return ret;
90797+}
90798+
90799+static void *module_alloc_update_bounds_rx(unsigned long size)
90800+{
90801+ void *ret = module_alloc_exec(size);
90802+
90803+ if (ret) {
90804+ mutex_lock(&module_mutex);
90805+ /* Update module bounds. */
90806+ if ((unsigned long)ret < module_addr_min_rx)
90807+ module_addr_min_rx = (unsigned long)ret;
90808+ if ((unsigned long)ret + size > module_addr_max_rx)
90809+ module_addr_max_rx = (unsigned long)ret + size;
90810 mutex_unlock(&module_mutex);
90811 }
90812 return ret;
90813@@ -2640,7 +2703,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90814 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90815
90816 if (info->index.sym == 0) {
90817+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90818+ /*
90819+ * avoid potentially printing jibberish on attempted load
90820+ * of a module randomized with a different seed
90821+ */
90822+ pr_warn("module has no symbols (stripped?)\n");
90823+#else
90824 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90825+#endif
90826 return ERR_PTR(-ENOEXEC);
90827 }
90828
90829@@ -2656,8 +2727,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90830 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90831 {
90832 const char *modmagic = get_modinfo(info, "vermagic");
90833+ const char *license = get_modinfo(info, "license");
90834 int err;
90835
90836+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90837+ if (!license || !license_is_gpl_compatible(license))
90838+ return -ENOEXEC;
90839+#endif
90840+
90841 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90842 modmagic = NULL;
90843
90844@@ -2682,7 +2759,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90845 }
90846
90847 /* Set up license info based on the info section */
90848- set_license(mod, get_modinfo(info, "license"));
90849+ set_license(mod, license);
90850
90851 return 0;
90852 }
90853@@ -2776,7 +2853,7 @@ static int move_module(struct module *mod, struct load_info *info)
90854 void *ptr;
90855
90856 /* Do the allocs. */
90857- ptr = module_alloc_update_bounds(mod->core_size);
90858+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90859 /*
90860 * The pointer to this block is stored in the module structure
90861 * which is inside the block. Just mark it as not being a
90862@@ -2786,11 +2863,11 @@ static int move_module(struct module *mod, struct load_info *info)
90863 if (!ptr)
90864 return -ENOMEM;
90865
90866- memset(ptr, 0, mod->core_size);
90867- mod->module_core = ptr;
90868+ memset(ptr, 0, mod->core_size_rw);
90869+ mod->module_core_rw = ptr;
90870
90871- if (mod->init_size) {
90872- ptr = module_alloc_update_bounds(mod->init_size);
90873+ if (mod->init_size_rw) {
90874+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90875 /*
90876 * The pointer to this block is stored in the module structure
90877 * which is inside the block. This block doesn't need to be
90878@@ -2799,13 +2876,45 @@ static int move_module(struct module *mod, struct load_info *info)
90879 */
90880 kmemleak_ignore(ptr);
90881 if (!ptr) {
90882- module_memfree(mod->module_core);
90883+ module_memfree(mod->module_core_rw);
90884 return -ENOMEM;
90885 }
90886- memset(ptr, 0, mod->init_size);
90887- mod->module_init = ptr;
90888+ memset(ptr, 0, mod->init_size_rw);
90889+ mod->module_init_rw = ptr;
90890 } else
90891- mod->module_init = NULL;
90892+ mod->module_init_rw = NULL;
90893+
90894+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90895+ kmemleak_not_leak(ptr);
90896+ if (!ptr) {
90897+ if (mod->module_init_rw)
90898+ module_memfree(mod->module_init_rw);
90899+ module_memfree(mod->module_core_rw);
90900+ return -ENOMEM;
90901+ }
90902+
90903+ pax_open_kernel();
90904+ memset(ptr, 0, mod->core_size_rx);
90905+ pax_close_kernel();
90906+ mod->module_core_rx = ptr;
90907+
90908+ if (mod->init_size_rx) {
90909+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90910+ kmemleak_ignore(ptr);
90911+ if (!ptr && mod->init_size_rx) {
90912+ module_memfree_exec(mod->module_core_rx);
90913+ if (mod->module_init_rw)
90914+ module_memfree(mod->module_init_rw);
90915+ module_memfree(mod->module_core_rw);
90916+ return -ENOMEM;
90917+ }
90918+
90919+ pax_open_kernel();
90920+ memset(ptr, 0, mod->init_size_rx);
90921+ pax_close_kernel();
90922+ mod->module_init_rx = ptr;
90923+ } else
90924+ mod->module_init_rx = NULL;
90925
90926 /* Transfer each section which specifies SHF_ALLOC */
90927 pr_debug("final section addresses:\n");
90928@@ -2816,16 +2925,45 @@ static int move_module(struct module *mod, struct load_info *info)
90929 if (!(shdr->sh_flags & SHF_ALLOC))
90930 continue;
90931
90932- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90933- dest = mod->module_init
90934- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90935- else
90936- dest = mod->module_core + shdr->sh_entsize;
90937+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90938+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90939+ dest = mod->module_init_rw
90940+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90941+ else
90942+ dest = mod->module_init_rx
90943+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90944+ } else {
90945+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90946+ dest = mod->module_core_rw + shdr->sh_entsize;
90947+ else
90948+ dest = mod->module_core_rx + shdr->sh_entsize;
90949+ }
90950+
90951+ if (shdr->sh_type != SHT_NOBITS) {
90952+
90953+#ifdef CONFIG_PAX_KERNEXEC
90954+#ifdef CONFIG_X86_64
90955+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
90956+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
90957+#endif
90958+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
90959+ pax_open_kernel();
90960+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90961+ pax_close_kernel();
90962+ } else
90963+#endif
90964
90965- if (shdr->sh_type != SHT_NOBITS)
90966 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
90967+ }
90968 /* Update sh_addr to point to copy in image. */
90969- shdr->sh_addr = (unsigned long)dest;
90970+
90971+#ifdef CONFIG_PAX_KERNEXEC
90972+ if (shdr->sh_flags & SHF_EXECINSTR)
90973+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
90974+ else
90975+#endif
90976+
90977+ shdr->sh_addr = (unsigned long)dest;
90978 pr_debug("\t0x%lx %s\n",
90979 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
90980 }
90981@@ -2882,12 +3020,12 @@ static void flush_module_icache(const struct module *mod)
90982 * Do it before processing of module parameters, so the module
90983 * can provide parameter accessor functions of its own.
90984 */
90985- if (mod->module_init)
90986- flush_icache_range((unsigned long)mod->module_init,
90987- (unsigned long)mod->module_init
90988- + mod->init_size);
90989- flush_icache_range((unsigned long)mod->module_core,
90990- (unsigned long)mod->module_core + mod->core_size);
90991+ if (mod->module_init_rx)
90992+ flush_icache_range((unsigned long)mod->module_init_rx,
90993+ (unsigned long)mod->module_init_rx
90994+ + mod->init_size_rx);
90995+ flush_icache_range((unsigned long)mod->module_core_rx,
90996+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
90997
90998 set_fs(old_fs);
90999 }
91000@@ -2945,8 +3083,10 @@ static void module_deallocate(struct module *mod, struct load_info *info)
91001 {
91002 percpu_modfree(mod);
91003 module_arch_freeing_init(mod);
91004- module_memfree(mod->module_init);
91005- module_memfree(mod->module_core);
91006+ module_memfree_exec(mod->module_init_rx);
91007+ module_memfree_exec(mod->module_core_rx);
91008+ module_memfree(mod->module_init_rw);
91009+ module_memfree(mod->module_core_rw);
91010 }
91011
91012 int __weak module_finalize(const Elf_Ehdr *hdr,
91013@@ -2959,7 +3099,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91014 static int post_relocation(struct module *mod, const struct load_info *info)
91015 {
91016 /* Sort exception table now relocations are done. */
91017+ pax_open_kernel();
91018 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91019+ pax_close_kernel();
91020
91021 /* Copy relocated percpu area over. */
91022 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91023@@ -3001,13 +3143,15 @@ static void do_mod_ctors(struct module *mod)
91024 /* For freeing module_init on success, in case kallsyms traversing */
91025 struct mod_initfree {
91026 struct rcu_head rcu;
91027- void *module_init;
91028+ void *module_init_rw;
91029+ void *module_init_rx;
91030 };
91031
91032 static void do_free_init(struct rcu_head *head)
91033 {
91034 struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
91035- module_memfree(m->module_init);
91036+ module_memfree(m->module_init_rw);
91037+ module_memfree_exec(m->module_init_rx);
91038 kfree(m);
91039 }
91040
91041@@ -3022,7 +3166,8 @@ static int do_init_module(struct module *mod)
91042 ret = -ENOMEM;
91043 goto fail;
91044 }
91045- freeinit->module_init = mod->module_init;
91046+ freeinit->module_init_rw = mod->module_init_rw;
91047+ freeinit->module_init_rx = mod->module_init_rx;
91048
91049 /*
91050 * We want to find out whether @mod uses async during init. Clear
91051@@ -3081,10 +3226,10 @@ static int do_init_module(struct module *mod)
91052 #endif
91053 unset_module_init_ro_nx(mod);
91054 module_arch_freeing_init(mod);
91055- mod->module_init = NULL;
91056- mod->init_size = 0;
91057- mod->init_ro_size = 0;
91058- mod->init_text_size = 0;
91059+ mod->module_init_rw = NULL;
91060+ mod->module_init_rx = NULL;
91061+ mod->init_size_rw = 0;
91062+ mod->init_size_rx = 0;
91063 /*
91064 * We want to free module_init, but be aware that kallsyms may be
91065 * walking this with preempt disabled. In all the failure paths,
91066@@ -3198,16 +3343,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91067 module_bug_finalize(info->hdr, info->sechdrs, mod);
91068
91069 /* Set RO and NX regions for core */
91070- set_section_ro_nx(mod->module_core,
91071- mod->core_text_size,
91072- mod->core_ro_size,
91073- mod->core_size);
91074+ set_section_ro_nx(mod->module_core_rx,
91075+ mod->core_size_rx,
91076+ mod->core_size_rx,
91077+ mod->core_size_rx);
91078
91079 /* Set RO and NX regions for init */
91080- set_section_ro_nx(mod->module_init,
91081- mod->init_text_size,
91082- mod->init_ro_size,
91083- mod->init_size);
91084+ set_section_ro_nx(mod->module_init_rx,
91085+ mod->init_size_rx,
91086+ mod->init_size_rx,
91087+ mod->init_size_rx);
91088
91089 /* Mark state as coming so strong_try_module_get() ignores us,
91090 * but kallsyms etc. can see us. */
91091@@ -3291,9 +3436,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91092 if (err)
91093 goto free_unload;
91094
91095+ /* Now copy in args */
91096+ mod->args = strndup_user(uargs, ~0UL >> 1);
91097+ if (IS_ERR(mod->args)) {
91098+ err = PTR_ERR(mod->args);
91099+ goto free_unload;
91100+ }
91101+
91102 /* Set up MODINFO_ATTR fields */
91103 setup_modinfo(mod, info);
91104
91105+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91106+ {
91107+ char *p, *p2;
91108+
91109+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91110+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91111+ err = -EPERM;
91112+ goto free_modinfo;
91113+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91114+ p += sizeof("grsec_modharden_normal") - 1;
91115+ p2 = strstr(p, "_");
91116+ if (p2) {
91117+ *p2 = '\0';
91118+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91119+ *p2 = '_';
91120+ }
91121+ err = -EPERM;
91122+ goto free_modinfo;
91123+ }
91124+ }
91125+#endif
91126+
91127 /* Fix up syms, so that st_value is a pointer to location. */
91128 err = simplify_symbols(mod, info);
91129 if (err < 0)
91130@@ -3309,13 +3483,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91131
91132 flush_module_icache(mod);
91133
91134- /* Now copy in args */
91135- mod->args = strndup_user(uargs, ~0UL >> 1);
91136- if (IS_ERR(mod->args)) {
91137- err = PTR_ERR(mod->args);
91138- goto free_arch_cleanup;
91139- }
91140-
91141 dynamic_debug_setup(info->debug, info->num_debug);
91142
91143 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91144@@ -3363,11 +3530,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91145 ddebug_cleanup:
91146 dynamic_debug_remove(info->debug);
91147 synchronize_sched();
91148- kfree(mod->args);
91149- free_arch_cleanup:
91150 module_arch_cleanup(mod);
91151 free_modinfo:
91152 free_modinfo(mod);
91153+ kfree(mod->args);
91154 free_unload:
91155 module_unload_free(mod);
91156 unlink_mod:
91157@@ -3454,10 +3620,16 @@ static const char *get_ksymbol(struct module *mod,
91158 unsigned long nextval;
91159
91160 /* At worse, next value is at end of module */
91161- if (within_module_init(addr, mod))
91162- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91163+ if (within_module_init_rx(addr, mod))
91164+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91165+ else if (within_module_init_rw(addr, mod))
91166+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91167+ else if (within_module_core_rx(addr, mod))
91168+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91169+ else if (within_module_core_rw(addr, mod))
91170+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91171 else
91172- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91173+ return NULL;
91174
91175 /* Scan for closest preceding symbol, and next symbol. (ELF
91176 starts real symbols at 1). */
91177@@ -3705,7 +3877,7 @@ static int m_show(struct seq_file *m, void *p)
91178 return 0;
91179
91180 seq_printf(m, "%s %u",
91181- mod->name, mod->init_size + mod->core_size);
91182+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91183 print_unload_info(m, mod);
91184
91185 /* Informative for users. */
91186@@ -3714,7 +3886,7 @@ static int m_show(struct seq_file *m, void *p)
91187 mod->state == MODULE_STATE_COMING ? "Loading" :
91188 "Live");
91189 /* Used by oprofile and other similar tools. */
91190- seq_printf(m, " 0x%pK", mod->module_core);
91191+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91192
91193 /* Taints info */
91194 if (mod->taints)
91195@@ -3750,7 +3922,17 @@ static const struct file_operations proc_modules_operations = {
91196
91197 static int __init proc_modules_init(void)
91198 {
91199+#ifndef CONFIG_GRKERNSEC_HIDESYM
91200+#ifdef CONFIG_GRKERNSEC_PROC_USER
91201+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91202+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91203+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91204+#else
91205 proc_create("modules", 0, NULL, &proc_modules_operations);
91206+#endif
91207+#else
91208+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91209+#endif
91210 return 0;
91211 }
91212 module_init(proc_modules_init);
91213@@ -3811,7 +3993,8 @@ struct module *__module_address(unsigned long addr)
91214 {
91215 struct module *mod;
91216
91217- if (addr < module_addr_min || addr > module_addr_max)
91218+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91219+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91220 return NULL;
91221
91222 list_for_each_entry_rcu(mod, &modules, list) {
91223@@ -3852,11 +4035,20 @@ bool is_module_text_address(unsigned long addr)
91224 */
91225 struct module *__module_text_address(unsigned long addr)
91226 {
91227- struct module *mod = __module_address(addr);
91228+ struct module *mod;
91229+
91230+#ifdef CONFIG_X86_32
91231+ addr = ktla_ktva(addr);
91232+#endif
91233+
91234+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91235+ return NULL;
91236+
91237+ mod = __module_address(addr);
91238+
91239 if (mod) {
91240 /* Make sure it's within the text section. */
91241- if (!within(addr, mod->module_init, mod->init_text_size)
91242- && !within(addr, mod->module_core, mod->core_text_size))
91243+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91244 mod = NULL;
91245 }
91246 return mod;
91247diff --git a/kernel/notifier.c b/kernel/notifier.c
91248index 4803da6..1c5eea6 100644
91249--- a/kernel/notifier.c
91250+++ b/kernel/notifier.c
91251@@ -5,6 +5,7 @@
91252 #include <linux/rcupdate.h>
91253 #include <linux/vmalloc.h>
91254 #include <linux/reboot.h>
91255+#include <linux/mm.h>
91256
91257 /*
91258 * Notifier list for kernel code which wants to be called
91259@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91260 while ((*nl) != NULL) {
91261 if (n->priority > (*nl)->priority)
91262 break;
91263- nl = &((*nl)->next);
91264+ nl = (struct notifier_block **)&((*nl)->next);
91265 }
91266- n->next = *nl;
91267+ pax_open_kernel();
91268+ *(const void **)&n->next = *nl;
91269 rcu_assign_pointer(*nl, n);
91270+ pax_close_kernel();
91271 return 0;
91272 }
91273
91274@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91275 return 0;
91276 if (n->priority > (*nl)->priority)
91277 break;
91278- nl = &((*nl)->next);
91279+ nl = (struct notifier_block **)&((*nl)->next);
91280 }
91281- n->next = *nl;
91282+ pax_open_kernel();
91283+ *(const void **)&n->next = *nl;
91284 rcu_assign_pointer(*nl, n);
91285+ pax_close_kernel();
91286 return 0;
91287 }
91288
91289@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91290 {
91291 while ((*nl) != NULL) {
91292 if ((*nl) == n) {
91293+ pax_open_kernel();
91294 rcu_assign_pointer(*nl, n->next);
91295+ pax_close_kernel();
91296 return 0;
91297 }
91298- nl = &((*nl)->next);
91299+ nl = (struct notifier_block **)&((*nl)->next);
91300 }
91301 return -ENOENT;
91302 }
91303diff --git a/kernel/padata.c b/kernel/padata.c
91304index 161402f..598814c 100644
91305--- a/kernel/padata.c
91306+++ b/kernel/padata.c
91307@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91308 * seq_nr mod. number of cpus in use.
91309 */
91310
91311- seq_nr = atomic_inc_return(&pd->seq_nr);
91312+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91313 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91314
91315 return padata_index_to_cpu(pd, cpu_index);
91316@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91317 padata_init_pqueues(pd);
91318 padata_init_squeues(pd);
91319 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91320- atomic_set(&pd->seq_nr, -1);
91321+ atomic_set_unchecked(&pd->seq_nr, -1);
91322 atomic_set(&pd->reorder_objects, 0);
91323 atomic_set(&pd->refcnt, 0);
91324 pd->pinst = pinst;
91325diff --git a/kernel/panic.c b/kernel/panic.c
91326index 4d8d6f9..97b9b9c 100644
91327--- a/kernel/panic.c
91328+++ b/kernel/panic.c
91329@@ -54,7 +54,7 @@ EXPORT_SYMBOL(panic_blink);
91330 /*
91331 * Stop ourself in panic -- architecture code may override this
91332 */
91333-void __weak panic_smp_self_stop(void)
91334+void __weak __noreturn panic_smp_self_stop(void)
91335 {
91336 while (1)
91337 cpu_relax();
91338@@ -423,7 +423,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91339 disable_trace_on_warning();
91340
91341 pr_warn("------------[ cut here ]------------\n");
91342- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91343+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91344 raw_smp_processor_id(), current->pid, file, line, caller);
91345
91346 if (args)
91347@@ -488,7 +488,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91348 */
91349 __visible void __stack_chk_fail(void)
91350 {
91351- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91352+ dump_stack();
91353+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91354 __builtin_return_address(0));
91355 }
91356 EXPORT_SYMBOL(__stack_chk_fail);
91357diff --git a/kernel/pid.c b/kernel/pid.c
91358index cd36a5e..11f185d 100644
91359--- a/kernel/pid.c
91360+++ b/kernel/pid.c
91361@@ -33,6 +33,7 @@
91362 #include <linux/rculist.h>
91363 #include <linux/bootmem.h>
91364 #include <linux/hash.h>
91365+#include <linux/security.h>
91366 #include <linux/pid_namespace.h>
91367 #include <linux/init_task.h>
91368 #include <linux/syscalls.h>
91369@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91370
91371 int pid_max = PID_MAX_DEFAULT;
91372
91373-#define RESERVED_PIDS 300
91374+#define RESERVED_PIDS 500
91375
91376 int pid_max_min = RESERVED_PIDS + 1;
91377 int pid_max_max = PID_MAX_LIMIT;
91378@@ -450,10 +451,18 @@ EXPORT_SYMBOL(pid_task);
91379 */
91380 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91381 {
91382+ struct task_struct *task;
91383+
91384 rcu_lockdep_assert(rcu_read_lock_held(),
91385 "find_task_by_pid_ns() needs rcu_read_lock()"
91386 " protection");
91387- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91388+
91389+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91390+
91391+ if (gr_pid_is_chrooted(task))
91392+ return NULL;
91393+
91394+ return task;
91395 }
91396
91397 struct task_struct *find_task_by_vpid(pid_t vnr)
91398@@ -461,6 +470,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91399 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91400 }
91401
91402+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91403+{
91404+ rcu_lockdep_assert(rcu_read_lock_held(),
91405+ "find_task_by_pid_ns() needs rcu_read_lock()"
91406+ " protection");
91407+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91408+}
91409+
91410 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91411 {
91412 struct pid *pid;
91413diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91414index a65ba13..f600dbb 100644
91415--- a/kernel/pid_namespace.c
91416+++ b/kernel/pid_namespace.c
91417@@ -274,7 +274,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91418 void __user *buffer, size_t *lenp, loff_t *ppos)
91419 {
91420 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91421- struct ctl_table tmp = *table;
91422+ ctl_table_no_const tmp = *table;
91423
91424 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91425 return -EPERM;
91426diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91427index 48b28d3..c63ccaf 100644
91428--- a/kernel/power/Kconfig
91429+++ b/kernel/power/Kconfig
91430@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91431 config HIBERNATION
91432 bool "Hibernation (aka 'suspend to disk')"
91433 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91434+ depends on !GRKERNSEC_KMEM
91435+ depends on !PAX_MEMORY_SANITIZE
91436 select HIBERNATE_CALLBACKS
91437 select LZO_COMPRESS
91438 select LZO_DECOMPRESS
91439diff --git a/kernel/power/process.c b/kernel/power/process.c
91440index 5a6ec86..3a8c884 100644
91441--- a/kernel/power/process.c
91442+++ b/kernel/power/process.c
91443@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91444 unsigned int elapsed_msecs;
91445 bool wakeup = false;
91446 int sleep_usecs = USEC_PER_MSEC;
91447+ bool timedout = false;
91448
91449 do_gettimeofday(&start);
91450
91451@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91452
91453 while (true) {
91454 todo = 0;
91455+ if (time_after(jiffies, end_time))
91456+ timedout = true;
91457 read_lock(&tasklist_lock);
91458 for_each_process_thread(g, p) {
91459 if (p == current || !freeze_task(p))
91460 continue;
91461
91462- if (!freezer_should_skip(p))
91463+ if (!freezer_should_skip(p)) {
91464 todo++;
91465+ if (timedout) {
91466+ printk(KERN_ERR "Task refusing to freeze:\n");
91467+ sched_show_task(p);
91468+ }
91469+ }
91470 }
91471 read_unlock(&tasklist_lock);
91472
91473@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91474 todo += wq_busy;
91475 }
91476
91477- if (!todo || time_after(jiffies, end_time))
91478+ if (!todo || timedout)
91479 break;
91480
91481 if (pm_wakeup_pending()) {
91482diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91483index 2cdd353..7df1786 100644
91484--- a/kernel/printk/printk.c
91485+++ b/kernel/printk/printk.c
91486@@ -486,6 +486,11 @@ int check_syslog_permissions(int type, bool from_file)
91487 if (from_file && type != SYSLOG_ACTION_OPEN)
91488 return 0;
91489
91490+#ifdef CONFIG_GRKERNSEC_DMESG
91491+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91492+ return -EPERM;
91493+#endif
91494+
91495 if (syslog_action_restricted(type)) {
91496 if (capable(CAP_SYSLOG))
91497 return 0;
91498diff --git a/kernel/profile.c b/kernel/profile.c
91499index 54bf5ba..df6e0a2 100644
91500--- a/kernel/profile.c
91501+++ b/kernel/profile.c
91502@@ -37,7 +37,7 @@ struct profile_hit {
91503 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91504 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91505
91506-static atomic_t *prof_buffer;
91507+static atomic_unchecked_t *prof_buffer;
91508 static unsigned long prof_len, prof_shift;
91509
91510 int prof_on __read_mostly;
91511@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91512 hits[i].pc = 0;
91513 continue;
91514 }
91515- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91516+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91517 hits[i].hits = hits[i].pc = 0;
91518 }
91519 }
91520@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91521 * Add the current hit(s) and flush the write-queue out
91522 * to the global buffer:
91523 */
91524- atomic_add(nr_hits, &prof_buffer[pc]);
91525+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91526 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91527- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91528+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91529 hits[i].pc = hits[i].hits = 0;
91530 }
91531 out:
91532@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91533 {
91534 unsigned long pc;
91535 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91536- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91537+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91538 }
91539 #endif /* !CONFIG_SMP */
91540
91541@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91542 return -EFAULT;
91543 buf++; p++; count--; read++;
91544 }
91545- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91546+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91547 if (copy_to_user(buf, (void *)pnt, count))
91548 return -EFAULT;
91549 read += count;
91550@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91551 }
91552 #endif
91553 profile_discard_flip_buffers();
91554- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91555+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91556 return count;
91557 }
91558
91559diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91560index 1eb9d90..d40d21e 100644
91561--- a/kernel/ptrace.c
91562+++ b/kernel/ptrace.c
91563@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91564 if (seize)
91565 flags |= PT_SEIZED;
91566 rcu_read_lock();
91567- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91568+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91569 flags |= PT_PTRACE_CAP;
91570 rcu_read_unlock();
91571 task->ptrace = flags;
91572@@ -515,7 +515,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91573 break;
91574 return -EIO;
91575 }
91576- if (copy_to_user(dst, buf, retval))
91577+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91578 return -EFAULT;
91579 copied += retval;
91580 src += retval;
91581@@ -783,7 +783,7 @@ int ptrace_request(struct task_struct *child, long request,
91582 bool seized = child->ptrace & PT_SEIZED;
91583 int ret = -EIO;
91584 siginfo_t siginfo, *si;
91585- void __user *datavp = (void __user *) data;
91586+ void __user *datavp = (__force void __user *) data;
91587 unsigned long __user *datalp = datavp;
91588 unsigned long flags;
91589
91590@@ -1029,14 +1029,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91591 goto out;
91592 }
91593
91594+ if (gr_handle_ptrace(child, request)) {
91595+ ret = -EPERM;
91596+ goto out_put_task_struct;
91597+ }
91598+
91599 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91600 ret = ptrace_attach(child, request, addr, data);
91601 /*
91602 * Some architectures need to do book-keeping after
91603 * a ptrace attach.
91604 */
91605- if (!ret)
91606+ if (!ret) {
91607 arch_ptrace_attach(child);
91608+ gr_audit_ptrace(child);
91609+ }
91610 goto out_put_task_struct;
91611 }
91612
91613@@ -1064,7 +1071,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91614 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91615 if (copied != sizeof(tmp))
91616 return -EIO;
91617- return put_user(tmp, (unsigned long __user *)data);
91618+ return put_user(tmp, (__force unsigned long __user *)data);
91619 }
91620
91621 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91622@@ -1158,7 +1165,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91623 }
91624
91625 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91626- compat_long_t, addr, compat_long_t, data)
91627+ compat_ulong_t, addr, compat_ulong_t, data)
91628 {
91629 struct task_struct *child;
91630 long ret;
91631@@ -1174,14 +1181,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91632 goto out;
91633 }
91634
91635+ if (gr_handle_ptrace(child, request)) {
91636+ ret = -EPERM;
91637+ goto out_put_task_struct;
91638+ }
91639+
91640 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91641 ret = ptrace_attach(child, request, addr, data);
91642 /*
91643 * Some architectures need to do book-keeping after
91644 * a ptrace attach.
91645 */
91646- if (!ret)
91647+ if (!ret) {
91648 arch_ptrace_attach(child);
91649+ gr_audit_ptrace(child);
91650+ }
91651 goto out_put_task_struct;
91652 }
91653
91654diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91655index 4d559ba..053da37 100644
91656--- a/kernel/rcu/rcutorture.c
91657+++ b/kernel/rcu/rcutorture.c
91658@@ -134,12 +134,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91659 rcu_torture_count) = { 0 };
91660 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91661 rcu_torture_batch) = { 0 };
91662-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91663-static atomic_t n_rcu_torture_alloc;
91664-static atomic_t n_rcu_torture_alloc_fail;
91665-static atomic_t n_rcu_torture_free;
91666-static atomic_t n_rcu_torture_mberror;
91667-static atomic_t n_rcu_torture_error;
91668+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91669+static atomic_unchecked_t n_rcu_torture_alloc;
91670+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91671+static atomic_unchecked_t n_rcu_torture_free;
91672+static atomic_unchecked_t n_rcu_torture_mberror;
91673+static atomic_unchecked_t n_rcu_torture_error;
91674 static long n_rcu_torture_barrier_error;
91675 static long n_rcu_torture_boost_ktrerror;
91676 static long n_rcu_torture_boost_rterror;
91677@@ -148,7 +148,7 @@ static long n_rcu_torture_boosts;
91678 static long n_rcu_torture_timers;
91679 static long n_barrier_attempts;
91680 static long n_barrier_successes;
91681-static atomic_long_t n_cbfloods;
91682+static atomic_long_unchecked_t n_cbfloods;
91683 static struct list_head rcu_torture_removed;
91684
91685 static int rcu_torture_writer_state;
91686@@ -211,11 +211,11 @@ rcu_torture_alloc(void)
91687
91688 spin_lock_bh(&rcu_torture_lock);
91689 if (list_empty(&rcu_torture_freelist)) {
91690- atomic_inc(&n_rcu_torture_alloc_fail);
91691+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91692 spin_unlock_bh(&rcu_torture_lock);
91693 return NULL;
91694 }
91695- atomic_inc(&n_rcu_torture_alloc);
91696+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91697 p = rcu_torture_freelist.next;
91698 list_del_init(p);
91699 spin_unlock_bh(&rcu_torture_lock);
91700@@ -228,7 +228,7 @@ rcu_torture_alloc(void)
91701 static void
91702 rcu_torture_free(struct rcu_torture *p)
91703 {
91704- atomic_inc(&n_rcu_torture_free);
91705+ atomic_inc_unchecked(&n_rcu_torture_free);
91706 spin_lock_bh(&rcu_torture_lock);
91707 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91708 spin_unlock_bh(&rcu_torture_lock);
91709@@ -312,7 +312,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91710 i = rp->rtort_pipe_count;
91711 if (i > RCU_TORTURE_PIPE_LEN)
91712 i = RCU_TORTURE_PIPE_LEN;
91713- atomic_inc(&rcu_torture_wcount[i]);
91714+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91715 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91716 rp->rtort_mbtest = 0;
91717 return true;
91718@@ -799,7 +799,7 @@ rcu_torture_cbflood(void *arg)
91719 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
91720 do {
91721 schedule_timeout_interruptible(cbflood_inter_holdoff);
91722- atomic_long_inc(&n_cbfloods);
91723+ atomic_long_inc_unchecked(&n_cbfloods);
91724 WARN_ON(signal_pending(current));
91725 for (i = 0; i < cbflood_n_burst; i++) {
91726 for (j = 0; j < cbflood_n_per_burst; j++) {
91727@@ -918,7 +918,7 @@ rcu_torture_writer(void *arg)
91728 i = old_rp->rtort_pipe_count;
91729 if (i > RCU_TORTURE_PIPE_LEN)
91730 i = RCU_TORTURE_PIPE_LEN;
91731- atomic_inc(&rcu_torture_wcount[i]);
91732+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91733 old_rp->rtort_pipe_count++;
91734 switch (synctype[torture_random(&rand) % nsynctypes]) {
91735 case RTWS_DEF_FREE:
91736@@ -1036,7 +1036,7 @@ static void rcu_torture_timer(unsigned long unused)
91737 return;
91738 }
91739 if (p->rtort_mbtest == 0)
91740- atomic_inc(&n_rcu_torture_mberror);
91741+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91742 spin_lock(&rand_lock);
91743 cur_ops->read_delay(&rand);
91744 n_rcu_torture_timers++;
91745@@ -1106,7 +1106,7 @@ rcu_torture_reader(void *arg)
91746 continue;
91747 }
91748 if (p->rtort_mbtest == 0)
91749- atomic_inc(&n_rcu_torture_mberror);
91750+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91751 cur_ops->read_delay(&rand);
91752 preempt_disable();
91753 pipe_count = p->rtort_pipe_count;
91754@@ -1173,11 +1173,11 @@ rcu_torture_stats_print(void)
91755 rcu_torture_current,
91756 rcu_torture_current_version,
91757 list_empty(&rcu_torture_freelist),
91758- atomic_read(&n_rcu_torture_alloc),
91759- atomic_read(&n_rcu_torture_alloc_fail),
91760- atomic_read(&n_rcu_torture_free));
91761+ atomic_read_unchecked(&n_rcu_torture_alloc),
91762+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91763+ atomic_read_unchecked(&n_rcu_torture_free));
91764 pr_cont("rtmbe: %d rtbke: %ld rtbre: %ld ",
91765- atomic_read(&n_rcu_torture_mberror),
91766+ atomic_read_unchecked(&n_rcu_torture_mberror),
91767 n_rcu_torture_boost_ktrerror,
91768 n_rcu_torture_boost_rterror);
91769 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
91770@@ -1189,17 +1189,17 @@ rcu_torture_stats_print(void)
91771 n_barrier_successes,
91772 n_barrier_attempts,
91773 n_rcu_torture_barrier_error);
91774- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
91775+ pr_cont("cbflood: %ld\n", atomic_long_read_unchecked(&n_cbfloods));
91776
91777 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91778- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91779+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91780 n_rcu_torture_barrier_error != 0 ||
91781 n_rcu_torture_boost_ktrerror != 0 ||
91782 n_rcu_torture_boost_rterror != 0 ||
91783 n_rcu_torture_boost_failure != 0 ||
91784 i > 1) {
91785 pr_cont("%s", "!!! ");
91786- atomic_inc(&n_rcu_torture_error);
91787+ atomic_inc_unchecked(&n_rcu_torture_error);
91788 WARN_ON_ONCE(1);
91789 }
91790 pr_cont("Reader Pipe: ");
91791@@ -1216,7 +1216,7 @@ rcu_torture_stats_print(void)
91792 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
91793 pr_cont("Free-Block Circulation: ");
91794 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91795- pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
91796+ pr_cont(" %d", atomic_read_unchecked(&rcu_torture_wcount[i]));
91797 }
91798 pr_cont("\n");
91799
91800@@ -1560,7 +1560,7 @@ rcu_torture_cleanup(void)
91801
91802 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91803
91804- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91805+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91806 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91807 else if (torture_onoff_failures())
91808 rcu_torture_print_module_parms(cur_ops,
91809@@ -1685,18 +1685,18 @@ rcu_torture_init(void)
91810
91811 rcu_torture_current = NULL;
91812 rcu_torture_current_version = 0;
91813- atomic_set(&n_rcu_torture_alloc, 0);
91814- atomic_set(&n_rcu_torture_alloc_fail, 0);
91815- atomic_set(&n_rcu_torture_free, 0);
91816- atomic_set(&n_rcu_torture_mberror, 0);
91817- atomic_set(&n_rcu_torture_error, 0);
91818+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91819+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91820+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91821+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91822+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91823 n_rcu_torture_barrier_error = 0;
91824 n_rcu_torture_boost_ktrerror = 0;
91825 n_rcu_torture_boost_rterror = 0;
91826 n_rcu_torture_boost_failure = 0;
91827 n_rcu_torture_boosts = 0;
91828 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91829- atomic_set(&rcu_torture_wcount[i], 0);
91830+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91831 for_each_possible_cpu(cpu) {
91832 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91833 per_cpu(rcu_torture_count, cpu)[i] = 0;
91834diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91835index 0db5649..e6ec167 100644
91836--- a/kernel/rcu/tiny.c
91837+++ b/kernel/rcu/tiny.c
91838@@ -42,7 +42,7 @@
91839 /* Forward declarations for tiny_plugin.h. */
91840 struct rcu_ctrlblk;
91841 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91842-static void rcu_process_callbacks(struct softirq_action *unused);
91843+static void rcu_process_callbacks(void);
91844 static void __call_rcu(struct rcu_head *head,
91845 void (*func)(struct rcu_head *rcu),
91846 struct rcu_ctrlblk *rcp);
91847@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91848 false));
91849 }
91850
91851-static void rcu_process_callbacks(struct softirq_action *unused)
91852+static __latent_entropy void rcu_process_callbacks(void)
91853 {
91854 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91855 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91856diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91857index 858c565..7efd915 100644
91858--- a/kernel/rcu/tiny_plugin.h
91859+++ b/kernel/rcu/tiny_plugin.h
91860@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91861 dump_stack();
91862 }
91863 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91864- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91865+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91866 3 * rcu_jiffies_till_stall_check() + 3;
91867 else if (ULONG_CMP_GE(j, js))
91868- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91869+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91870 }
91871
91872 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91873 {
91874 rcp->ticks_this_gp = 0;
91875 rcp->gp_start = jiffies;
91876- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91877+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91878 }
91879
91880 static void check_cpu_stalls(void)
91881diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91882index 7680fc2..b8e9161 100644
91883--- a/kernel/rcu/tree.c
91884+++ b/kernel/rcu/tree.c
91885@@ -261,7 +261,7 @@ static void rcu_momentary_dyntick_idle(void)
91886 */
91887 rdtp = this_cpu_ptr(&rcu_dynticks);
91888 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91889- atomic_add(2, &rdtp->dynticks); /* QS. */
91890+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91891 smp_mb__after_atomic(); /* Later stuff after QS. */
91892 break;
91893 }
91894@@ -521,9 +521,9 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
91895 rcu_prepare_for_idle();
91896 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91897 smp_mb__before_atomic(); /* See above. */
91898- atomic_inc(&rdtp->dynticks);
91899+ atomic_inc_unchecked(&rdtp->dynticks);
91900 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91901- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91902+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91903 rcu_dynticks_task_enter();
91904
91905 /*
91906@@ -644,10 +644,10 @@ static void rcu_eqs_exit_common(long long oldval, int user)
91907
91908 rcu_dynticks_task_exit();
91909 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91910- atomic_inc(&rdtp->dynticks);
91911+ atomic_inc_unchecked(&rdtp->dynticks);
91912 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91913 smp_mb__after_atomic(); /* See above. */
91914- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91915+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91916 rcu_cleanup_after_idle();
91917 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91918 if (!user && !is_idle_task(current)) {
91919@@ -768,14 +768,14 @@ void rcu_nmi_enter(void)
91920 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91921
91922 if (rdtp->dynticks_nmi_nesting == 0 &&
91923- (atomic_read(&rdtp->dynticks) & 0x1))
91924+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91925 return;
91926 rdtp->dynticks_nmi_nesting++;
91927 smp_mb__before_atomic(); /* Force delay from prior write. */
91928- atomic_inc(&rdtp->dynticks);
91929+ atomic_inc_unchecked(&rdtp->dynticks);
91930 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91931 smp_mb__after_atomic(); /* See above. */
91932- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91933+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91934 }
91935
91936 /**
91937@@ -794,9 +794,9 @@ void rcu_nmi_exit(void)
91938 return;
91939 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91940 smp_mb__before_atomic(); /* See above. */
91941- atomic_inc(&rdtp->dynticks);
91942+ atomic_inc_unchecked(&rdtp->dynticks);
91943 smp_mb__after_atomic(); /* Force delay to next write. */
91944- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91945+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91946 }
91947
91948 /**
91949@@ -809,7 +809,7 @@ void rcu_nmi_exit(void)
91950 */
91951 bool notrace __rcu_is_watching(void)
91952 {
91953- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91954+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91955 }
91956
91957 /**
91958@@ -892,7 +892,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91959 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91960 bool *isidle, unsigned long *maxj)
91961 {
91962- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91963+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91964 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91965 if ((rdp->dynticks_snap & 0x1) == 0) {
91966 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91967@@ -921,7 +921,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91968 int *rcrmp;
91969 unsigned int snap;
91970
91971- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91972+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91973 snap = (unsigned int)rdp->dynticks_snap;
91974
91975 /*
91976@@ -984,10 +984,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91977 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91978 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91979 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91980- ACCESS_ONCE(rdp->cond_resched_completed) =
91981+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91982 ACCESS_ONCE(rdp->mynode->completed);
91983 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91984- ACCESS_ONCE(*rcrmp) =
91985+ ACCESS_ONCE_RW(*rcrmp) =
91986 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91987 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91988 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
91989@@ -1009,7 +1009,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
91990 rsp->gp_start = j;
91991 smp_wmb(); /* Record start time before stall time. */
91992 j1 = rcu_jiffies_till_stall_check();
91993- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
91994+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
91995 rsp->jiffies_resched = j + j1 / 2;
91996 }
91997
91998@@ -1050,7 +1050,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
91999 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92000 return;
92001 }
92002- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92003+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92004 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92005
92006 /*
92007@@ -1127,7 +1127,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92008
92009 raw_spin_lock_irqsave(&rnp->lock, flags);
92010 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92011- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92012+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92013 3 * rcu_jiffies_till_stall_check() + 3;
92014 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92015
92016@@ -1211,7 +1211,7 @@ void rcu_cpu_stall_reset(void)
92017 struct rcu_state *rsp;
92018
92019 for_each_rcu_flavor(rsp)
92020- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92021+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92022 }
92023
92024 /*
92025@@ -1597,7 +1597,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92026 raw_spin_unlock_irq(&rnp->lock);
92027 return 0;
92028 }
92029- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92030+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92031
92032 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92033 /*
92034@@ -1638,9 +1638,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92035 rdp = this_cpu_ptr(rsp->rda);
92036 rcu_preempt_check_blocked_tasks(rnp);
92037 rnp->qsmask = rnp->qsmaskinit;
92038- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92039+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92040 WARN_ON_ONCE(rnp->completed != rsp->completed);
92041- ACCESS_ONCE(rnp->completed) = rsp->completed;
92042+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92043 if (rnp == rdp->mynode)
92044 (void)__note_gp_changes(rsp, rnp, rdp);
92045 rcu_preempt_boost_start_gp(rnp);
92046@@ -1685,7 +1685,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92047 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92048 raw_spin_lock_irq(&rnp->lock);
92049 smp_mb__after_unlock_lock();
92050- ACCESS_ONCE(rsp->gp_flags) =
92051+ ACCESS_ONCE_RW(rsp->gp_flags) =
92052 ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
92053 raw_spin_unlock_irq(&rnp->lock);
92054 }
92055@@ -1731,7 +1731,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92056 rcu_for_each_node_breadth_first(rsp, rnp) {
92057 raw_spin_lock_irq(&rnp->lock);
92058 smp_mb__after_unlock_lock();
92059- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92060+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92061 rdp = this_cpu_ptr(rsp->rda);
92062 if (rnp == rdp->mynode)
92063 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92064@@ -1746,14 +1746,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92065 rcu_nocb_gp_set(rnp, nocb);
92066
92067 /* Declare grace period done. */
92068- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92069+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92070 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92071 rsp->fqs_state = RCU_GP_IDLE;
92072 rdp = this_cpu_ptr(rsp->rda);
92073 /* Advance CBs to reduce false positives below. */
92074 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92075 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92076- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92077+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92078 trace_rcu_grace_period(rsp->name,
92079 ACCESS_ONCE(rsp->gpnum),
92080 TPS("newreq"));
92081@@ -1878,7 +1878,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92082 */
92083 return false;
92084 }
92085- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92086+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92087 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92088 TPS("newreq"));
92089
92090@@ -2099,7 +2099,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92091 rsp->qlen += rdp->qlen;
92092 rdp->n_cbs_orphaned += rdp->qlen;
92093 rdp->qlen_lazy = 0;
92094- ACCESS_ONCE(rdp->qlen) = 0;
92095+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92096 }
92097
92098 /*
92099@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92100 }
92101 smp_mb(); /* List handling before counting for rcu_barrier(). */
92102 rdp->qlen_lazy -= count_lazy;
92103- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92104+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92105 rdp->n_cbs_invoked += count;
92106
92107 /* Reinstate batch limit if we have worked down the excess. */
92108@@ -2507,7 +2507,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92109 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92110 return; /* Someone beat us to it. */
92111 }
92112- ACCESS_ONCE(rsp->gp_flags) =
92113+ ACCESS_ONCE_RW(rsp->gp_flags) =
92114 ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
92115 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92116 rcu_gp_kthread_wake(rsp);
92117@@ -2553,7 +2553,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92118 /*
92119 * Do RCU core processing for the current CPU.
92120 */
92121-static void rcu_process_callbacks(struct softirq_action *unused)
92122+static void rcu_process_callbacks(void)
92123 {
92124 struct rcu_state *rsp;
92125
92126@@ -2665,7 +2665,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92127 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92128 if (debug_rcu_head_queue(head)) {
92129 /* Probable double call_rcu(), so leak the callback. */
92130- ACCESS_ONCE(head->func) = rcu_leak_callback;
92131+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92132 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92133 return;
92134 }
92135@@ -2693,7 +2693,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92136 local_irq_restore(flags);
92137 return;
92138 }
92139- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92140+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92141 if (lazy)
92142 rdp->qlen_lazy++;
92143 else
92144@@ -2966,11 +2966,11 @@ void synchronize_sched_expedited(void)
92145 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92146 * course be required on a 64-bit system.
92147 */
92148- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92149+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92150 (ulong)atomic_long_read(&rsp->expedited_done) +
92151 ULONG_MAX / 8)) {
92152 synchronize_sched();
92153- atomic_long_inc(&rsp->expedited_wrap);
92154+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92155 return;
92156 }
92157
92158@@ -2978,12 +2978,12 @@ void synchronize_sched_expedited(void)
92159 * Take a ticket. Note that atomic_inc_return() implies a
92160 * full memory barrier.
92161 */
92162- snap = atomic_long_inc_return(&rsp->expedited_start);
92163+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92164 firstsnap = snap;
92165 if (!try_get_online_cpus()) {
92166 /* CPU hotplug operation in flight, fall back to normal GP. */
92167 wait_rcu_gp(call_rcu_sched);
92168- atomic_long_inc(&rsp->expedited_normal);
92169+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92170 return;
92171 }
92172 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92173@@ -2996,7 +2996,7 @@ void synchronize_sched_expedited(void)
92174 for_each_cpu(cpu, cm) {
92175 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
92176
92177- if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
92178+ if (!(atomic_add_return_unchecked(0, &rdtp->dynticks) & 0x1))
92179 cpumask_clear_cpu(cpu, cm);
92180 }
92181 if (cpumask_weight(cm) == 0)
92182@@ -3011,14 +3011,14 @@ void synchronize_sched_expedited(void)
92183 synchronize_sched_expedited_cpu_stop,
92184 NULL) == -EAGAIN) {
92185 put_online_cpus();
92186- atomic_long_inc(&rsp->expedited_tryfail);
92187+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92188
92189 /* Check to see if someone else did our work for us. */
92190 s = atomic_long_read(&rsp->expedited_done);
92191 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92192 /* ensure test happens before caller kfree */
92193 smp_mb__before_atomic(); /* ^^^ */
92194- atomic_long_inc(&rsp->expedited_workdone1);
92195+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92196 free_cpumask_var(cm);
92197 return;
92198 }
92199@@ -3028,7 +3028,7 @@ void synchronize_sched_expedited(void)
92200 udelay(trycount * num_online_cpus());
92201 } else {
92202 wait_rcu_gp(call_rcu_sched);
92203- atomic_long_inc(&rsp->expedited_normal);
92204+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92205 free_cpumask_var(cm);
92206 return;
92207 }
92208@@ -3038,7 +3038,7 @@ void synchronize_sched_expedited(void)
92209 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92210 /* ensure test happens before caller kfree */
92211 smp_mb__before_atomic(); /* ^^^ */
92212- atomic_long_inc(&rsp->expedited_workdone2);
92213+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92214 free_cpumask_var(cm);
92215 return;
92216 }
92217@@ -3053,14 +3053,14 @@ void synchronize_sched_expedited(void)
92218 if (!try_get_online_cpus()) {
92219 /* CPU hotplug operation in flight, use normal GP. */
92220 wait_rcu_gp(call_rcu_sched);
92221- atomic_long_inc(&rsp->expedited_normal);
92222+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92223 free_cpumask_var(cm);
92224 return;
92225 }
92226- snap = atomic_long_read(&rsp->expedited_start);
92227+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92228 smp_mb(); /* ensure read is before try_stop_cpus(). */
92229 }
92230- atomic_long_inc(&rsp->expedited_stoppedcpus);
92231+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92232
92233 all_cpus_idle:
92234 free_cpumask_var(cm);
92235@@ -3072,16 +3072,16 @@ all_cpus_idle:
92236 * than we did already did their update.
92237 */
92238 do {
92239- atomic_long_inc(&rsp->expedited_done_tries);
92240+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92241 s = atomic_long_read(&rsp->expedited_done);
92242 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92243 /* ensure test happens before caller kfree */
92244 smp_mb__before_atomic(); /* ^^^ */
92245- atomic_long_inc(&rsp->expedited_done_lost);
92246+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92247 break;
92248 }
92249 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92250- atomic_long_inc(&rsp->expedited_done_exit);
92251+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92252
92253 put_online_cpus();
92254 }
92255@@ -3287,7 +3287,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92256 * ACCESS_ONCE() to prevent the compiler from speculating
92257 * the increment to precede the early-exit check.
92258 */
92259- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92260+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92262 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92263 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92264@@ -3342,7 +3342,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92265
92266 /* Increment ->n_barrier_done to prevent duplicate work. */
92267 smp_mb(); /* Keep increment after above mechanism. */
92268- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92269+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92270 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92271 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92272 smp_mb(); /* Keep increment before caller's subsequent code. */
92273@@ -3387,10 +3387,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92274 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92275 init_callback_list(rdp);
92276 rdp->qlen_lazy = 0;
92277- ACCESS_ONCE(rdp->qlen) = 0;
92278+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92279 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92280 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92281- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92282+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92283 rdp->cpu = cpu;
92284 rdp->rsp = rsp;
92285 rcu_boot_init_nocb_percpu_data(rdp);
92286@@ -3423,8 +3423,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92287 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92288 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92289 rcu_sysidle_init_percpu_data(rdp->dynticks);
92290- atomic_set(&rdp->dynticks->dynticks,
92291- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92292+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92293+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92294 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92295
92296 /* Add CPU to rcu_node bitmasks. */
92297diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92298index 8e7b184..9c55768 100644
92299--- a/kernel/rcu/tree.h
92300+++ b/kernel/rcu/tree.h
92301@@ -87,11 +87,11 @@ struct rcu_dynticks {
92302 long long dynticks_nesting; /* Track irq/process nesting level. */
92303 /* Process level is worth LLONG_MAX/2. */
92304 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92305- atomic_t dynticks; /* Even value for idle, else odd. */
92306+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92307 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92308 long long dynticks_idle_nesting;
92309 /* irq/process nesting level from idle. */
92310- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92311+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92312 /* "Idle" excludes userspace execution. */
92313 unsigned long dynticks_idle_jiffies;
92314 /* End of last non-NMI non-idle period. */
92315@@ -466,17 +466,17 @@ struct rcu_state {
92316 /* _rcu_barrier(). */
92317 /* End of fields guarded by barrier_mutex. */
92318
92319- atomic_long_t expedited_start; /* Starting ticket. */
92320- atomic_long_t expedited_done; /* Done ticket. */
92321- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92322- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92323- atomic_long_t expedited_workdone1; /* # done by others #1. */
92324- atomic_long_t expedited_workdone2; /* # done by others #2. */
92325- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92326- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92327- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92328- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92329- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92330+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92331+ atomic_long_t expedited_done; /* Done ticket. */
92332+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92333+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92334+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92335+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92336+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92337+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92338+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92339+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92340+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92341
92342 unsigned long jiffies_force_qs; /* Time at which to invoke */
92343 /* force_quiescent_state(). */
92344diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92345index 3ec85cb..3687925 100644
92346--- a/kernel/rcu/tree_plugin.h
92347+++ b/kernel/rcu/tree_plugin.h
92348@@ -709,7 +709,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92349 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92350 {
92351 return !rcu_preempted_readers_exp(rnp) &&
92352- ACCESS_ONCE(rnp->expmask) == 0;
92353+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92354 }
92355
92356 /*
92357@@ -870,7 +870,7 @@ void synchronize_rcu_expedited(void)
92358
92359 /* Clean up and exit. */
92360 smp_mb(); /* ensure expedited GP seen before counter increment. */
92361- ACCESS_ONCE(sync_rcu_preempt_exp_count) =
92362+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count) =
92363 sync_rcu_preempt_exp_count + 1;
92364 unlock_mb_ret:
92365 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92366@@ -1426,7 +1426,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92367 free_cpumask_var(cm);
92368 }
92369
92370-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92371+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92372 .store = &rcu_cpu_kthread_task,
92373 .thread_should_run = rcu_cpu_kthread_should_run,
92374 .thread_fn = rcu_cpu_kthread,
92375@@ -1900,7 +1900,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92376 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92377 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92378 cpu, ticks_value, ticks_title,
92379- atomic_read(&rdtp->dynticks) & 0xfff,
92380+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92381 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92382 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92383 fast_no_hz);
92384@@ -2044,7 +2044,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92385 return;
92386 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92387 /* Prior smp_mb__after_atomic() orders against prior enqueue. */
92388- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92389+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92390 wake_up(&rdp_leader->nocb_wq);
92391 }
92392 }
92393@@ -2096,7 +2096,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92394
92395 /* Enqueue the callback on the nocb list and update counts. */
92396 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92397- ACCESS_ONCE(*old_rhpp) = rhp;
92398+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92399 atomic_long_add(rhcount, &rdp->nocb_q_count);
92400 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92401 smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
92402@@ -2286,7 +2286,7 @@ wait_again:
92403 continue; /* No CBs here, try next follower. */
92404
92405 /* Move callbacks to wait-for-GP list, which is empty. */
92406- ACCESS_ONCE(rdp->nocb_head) = NULL;
92407+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92408 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92409 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92410 rdp->nocb_gp_count_lazy =
92411@@ -2413,7 +2413,7 @@ static int rcu_nocb_kthread(void *arg)
92412 list = ACCESS_ONCE(rdp->nocb_follower_head);
92413 BUG_ON(!list);
92414 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92415- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92416+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92417 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92418 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92419 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92420@@ -2443,8 +2443,8 @@ static int rcu_nocb_kthread(void *arg)
92421 list = next;
92422 }
92423 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92424- ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92425- ACCESS_ONCE(rdp->nocb_p_count_lazy) =
92426+ ACCESS_ONCE_RW(rdp->nocb_p_count) = rdp->nocb_p_count - c;
92427+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) =
92428 rdp->nocb_p_count_lazy - cl;
92429 rdp->n_nocbs_invoked += c;
92430 }
92431@@ -2465,7 +2465,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92432 if (!rcu_nocb_need_deferred_wakeup(rdp))
92433 return;
92434 ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
92435- ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92436+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
92437 wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
92438 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
92439 }
92440@@ -2588,7 +2588,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
92441 t = kthread_run(rcu_nocb_kthread, rdp_spawn,
92442 "rcuo%c/%d", rsp->abbr, cpu);
92443 BUG_ON(IS_ERR(t));
92444- ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
92445+ ACCESS_ONCE_RW(rdp_spawn->nocb_kthread) = t;
92446 }
92447
92448 /*
92449@@ -2793,11 +2793,11 @@ static void rcu_sysidle_enter(int irq)
92450
92451 /* Record start of fully idle period. */
92452 j = jiffies;
92453- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92454+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92455 smp_mb__before_atomic();
92456- atomic_inc(&rdtp->dynticks_idle);
92457+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92458 smp_mb__after_atomic();
92459- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92460+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92461 }
92462
92463 /*
92464@@ -2868,9 +2868,9 @@ static void rcu_sysidle_exit(int irq)
92465
92466 /* Record end of idle period. */
92467 smp_mb__before_atomic();
92468- atomic_inc(&rdtp->dynticks_idle);
92469+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92470 smp_mb__after_atomic();
92471- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92472+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92473
92474 /*
92475 * If we are the timekeeping CPU, we are permitted to be non-idle
92476@@ -2915,7 +2915,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92477 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92478
92479 /* Pick up current idle and NMI-nesting counter and check. */
92480- cur = atomic_read(&rdtp->dynticks_idle);
92481+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92482 if (cur & 0x1) {
92483 *isidle = false; /* We are not idle! */
92484 return;
92485@@ -2964,7 +2964,7 @@ static void rcu_sysidle(unsigned long j)
92486 case RCU_SYSIDLE_NOT:
92487
92488 /* First time all are idle, so note a short idle period. */
92489- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92490+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92491 break;
92492
92493 case RCU_SYSIDLE_SHORT:
92494@@ -3002,7 +3002,7 @@ static void rcu_sysidle_cancel(void)
92495 {
92496 smp_mb();
92497 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92498- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92499+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92500 }
92501
92502 /*
92503@@ -3054,7 +3054,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92504 smp_mb(); /* grace period precedes setting inuse. */
92505
92506 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92507- ACCESS_ONCE(rshp->inuse) = 0;
92508+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92509 }
92510
92511 /*
92512@@ -3207,7 +3207,7 @@ static void rcu_bind_gp_kthread(void)
92513 static void rcu_dynticks_task_enter(void)
92514 {
92515 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92516- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
92517+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = smp_processor_id();
92518 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92519 }
92520
92521@@ -3215,6 +3215,6 @@ static void rcu_dynticks_task_enter(void)
92522 static void rcu_dynticks_task_exit(void)
92523 {
92524 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
92525- ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
92526+ ACCESS_ONCE_RW(current->rcu_tasks_idle_cpu) = -1;
92527 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
92528 }
92529diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92530index 5cdc62e..cc52e88 100644
92531--- a/kernel/rcu/tree_trace.c
92532+++ b/kernel/rcu/tree_trace.c
92533@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92534 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92535 rdp->passed_quiesce, rdp->qs_pending);
92536 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92537- atomic_read(&rdp->dynticks->dynticks),
92538+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92539 rdp->dynticks->dynticks_nesting,
92540 rdp->dynticks->dynticks_nmi_nesting,
92541 rdp->dynticks_fqs);
92542@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92543 struct rcu_state *rsp = (struct rcu_state *)m->private;
92544
92545 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92546- atomic_long_read(&rsp->expedited_start),
92547+ atomic_long_read_unchecked(&rsp->expedited_start),
92548 atomic_long_read(&rsp->expedited_done),
92549- atomic_long_read(&rsp->expedited_wrap),
92550- atomic_long_read(&rsp->expedited_tryfail),
92551- atomic_long_read(&rsp->expedited_workdone1),
92552- atomic_long_read(&rsp->expedited_workdone2),
92553- atomic_long_read(&rsp->expedited_normal),
92554- atomic_long_read(&rsp->expedited_stoppedcpus),
92555- atomic_long_read(&rsp->expedited_done_tries),
92556- atomic_long_read(&rsp->expedited_done_lost),
92557- atomic_long_read(&rsp->expedited_done_exit));
92558+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92559+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92560+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92561+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92562+ atomic_long_read_unchecked(&rsp->expedited_normal),
92563+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92564+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92565+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92566+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92567 return 0;
92568 }
92569
92570diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92571index e0d31a3..f4dafe3 100644
92572--- a/kernel/rcu/update.c
92573+++ b/kernel/rcu/update.c
92574@@ -342,10 +342,10 @@ int rcu_jiffies_till_stall_check(void)
92575 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92576 */
92577 if (till_stall_check < 3) {
92578- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92579+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92580 till_stall_check = 3;
92581 } else if (till_stall_check > 300) {
92582- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92583+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92584 till_stall_check = 300;
92585 }
92586 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92587@@ -501,7 +501,7 @@ static void check_holdout_task(struct task_struct *t,
92588 !ACCESS_ONCE(t->on_rq) ||
92589 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
92590 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
92591- ACCESS_ONCE(t->rcu_tasks_holdout) = false;
92592+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = false;
92593 list_del_init(&t->rcu_tasks_holdout_list);
92594 put_task_struct(t);
92595 return;
92596@@ -589,7 +589,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
92597 !is_idle_task(t)) {
92598 get_task_struct(t);
92599 t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
92600- ACCESS_ONCE(t->rcu_tasks_holdout) = true;
92601+ ACCESS_ONCE_RW(t->rcu_tasks_holdout) = true;
92602 list_add(&t->rcu_tasks_holdout_list,
92603 &rcu_tasks_holdouts);
92604 }
92605@@ -686,7 +686,7 @@ static void rcu_spawn_tasks_kthread(void)
92606 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
92607 BUG_ON(IS_ERR(t));
92608 smp_mb(); /* Ensure others see full kthread. */
92609- ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
92610+ ACCESS_ONCE_RW(rcu_tasks_kthread_ptr) = t;
92611 mutex_unlock(&rcu_tasks_kthread_mutex);
92612 }
92613
92614diff --git a/kernel/resource.c b/kernel/resource.c
92615index 0bcebff..e7cd5b2 100644
92616--- a/kernel/resource.c
92617+++ b/kernel/resource.c
92618@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92619
92620 static int __init ioresources_init(void)
92621 {
92622+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92623+#ifdef CONFIG_GRKERNSEC_PROC_USER
92624+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92625+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92626+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92627+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92628+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92629+#endif
92630+#else
92631 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92632 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92633+#endif
92634 return 0;
92635 }
92636 __initcall(ioresources_init);
92637diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92638index eae160d..c9aa22e 100644
92639--- a/kernel/sched/auto_group.c
92640+++ b/kernel/sched/auto_group.c
92641@@ -11,7 +11,7 @@
92642
92643 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92644 static struct autogroup autogroup_default;
92645-static atomic_t autogroup_seq_nr;
92646+static atomic_unchecked_t autogroup_seq_nr;
92647
92648 void __init autogroup_init(struct task_struct *init_task)
92649 {
92650@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92651
92652 kref_init(&ag->kref);
92653 init_rwsem(&ag->lock);
92654- ag->id = atomic_inc_return(&autogroup_seq_nr);
92655+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92656 ag->tg = tg;
92657 #ifdef CONFIG_RT_GROUP_SCHED
92658 /*
92659diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92660index 607f852..486bc87 100644
92661--- a/kernel/sched/completion.c
92662+++ b/kernel/sched/completion.c
92663@@ -205,7 +205,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92664 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92665 * or number of jiffies left till timeout) if completed.
92666 */
92667-long __sched
92668+long __sched __intentional_overflow(-1)
92669 wait_for_completion_interruptible_timeout(struct completion *x,
92670 unsigned long timeout)
92671 {
92672@@ -222,7 +222,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92673 *
92674 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92675 */
92676-int __sched wait_for_completion_killable(struct completion *x)
92677+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92678 {
92679 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92680 if (t == -ERESTARTSYS)
92681@@ -243,7 +243,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92682 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92683 * or number of jiffies left till timeout) if completed.
92684 */
92685-long __sched
92686+long __sched __intentional_overflow(-1)
92687 wait_for_completion_killable_timeout(struct completion *x,
92688 unsigned long timeout)
92689 {
92690diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92691index d400c82..50fca96 100644
92692--- a/kernel/sched/core.c
92693+++ b/kernel/sched/core.c
92694@@ -1902,7 +1902,7 @@ void set_numabalancing_state(bool enabled)
92695 int sysctl_numa_balancing(struct ctl_table *table, int write,
92696 void __user *buffer, size_t *lenp, loff_t *ppos)
92697 {
92698- struct ctl_table t;
92699+ ctl_table_no_const t;
92700 int err;
92701 int state = numabalancing_enabled;
92702
92703@@ -2352,8 +2352,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92704 next->active_mm = oldmm;
92705 atomic_inc(&oldmm->mm_count);
92706 enter_lazy_tlb(oldmm, next);
92707- } else
92708+ } else {
92709 switch_mm(oldmm, mm, next);
92710+ populate_stack();
92711+ }
92712
92713 if (!prev->mm) {
92714 prev->active_mm = NULL;
92715@@ -3154,6 +3156,8 @@ int can_nice(const struct task_struct *p, const int nice)
92716 /* convert nice value [19,-20] to rlimit style value [1,40] */
92717 int nice_rlim = nice_to_rlimit(nice);
92718
92719+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92720+
92721 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92722 capable(CAP_SYS_NICE));
92723 }
92724@@ -3180,7 +3184,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92725 nice = task_nice(current) + increment;
92726
92727 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92728- if (increment < 0 && !can_nice(current, nice))
92729+ if (increment < 0 && (!can_nice(current, nice) ||
92730+ gr_handle_chroot_nice()))
92731 return -EPERM;
92732
92733 retval = security_task_setnice(current, nice);
92734@@ -3475,6 +3480,7 @@ recheck:
92735 if (policy != p->policy && !rlim_rtprio)
92736 return -EPERM;
92737
92738+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92739 /* can't increase priority */
92740 if (attr->sched_priority > p->rt_priority &&
92741 attr->sched_priority > rlim_rtprio)
92742@@ -4975,6 +4981,7 @@ void idle_task_exit(void)
92743
92744 if (mm != &init_mm) {
92745 switch_mm(mm, &init_mm, current);
92746+ populate_stack();
92747 finish_arch_post_lock_switch();
92748 }
92749 mmdrop(mm);
92750@@ -5070,7 +5077,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92751
92752 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92753
92754-static struct ctl_table sd_ctl_dir[] = {
92755+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92756 {
92757 .procname = "sched_domain",
92758 .mode = 0555,
92759@@ -5087,17 +5094,17 @@ static struct ctl_table sd_ctl_root[] = {
92760 {}
92761 };
92762
92763-static struct ctl_table *sd_alloc_ctl_entry(int n)
92764+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92765 {
92766- struct ctl_table *entry =
92767+ ctl_table_no_const *entry =
92768 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92769
92770 return entry;
92771 }
92772
92773-static void sd_free_ctl_entry(struct ctl_table **tablep)
92774+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92775 {
92776- struct ctl_table *entry;
92777+ ctl_table_no_const *entry;
92778
92779 /*
92780 * In the intermediate directories, both the child directory and
92781@@ -5105,22 +5112,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92782 * will always be set. In the lowest directory the names are
92783 * static strings and all have proc handlers.
92784 */
92785- for (entry = *tablep; entry->mode; entry++) {
92786- if (entry->child)
92787- sd_free_ctl_entry(&entry->child);
92788+ for (entry = tablep; entry->mode; entry++) {
92789+ if (entry->child) {
92790+ sd_free_ctl_entry(entry->child);
92791+ pax_open_kernel();
92792+ entry->child = NULL;
92793+ pax_close_kernel();
92794+ }
92795 if (entry->proc_handler == NULL)
92796 kfree(entry->procname);
92797 }
92798
92799- kfree(*tablep);
92800- *tablep = NULL;
92801+ kfree(tablep);
92802 }
92803
92804 static int min_load_idx = 0;
92805 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92806
92807 static void
92808-set_table_entry(struct ctl_table *entry,
92809+set_table_entry(ctl_table_no_const *entry,
92810 const char *procname, void *data, int maxlen,
92811 umode_t mode, proc_handler *proc_handler,
92812 bool load_idx)
92813@@ -5140,7 +5150,7 @@ set_table_entry(struct ctl_table *entry,
92814 static struct ctl_table *
92815 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92816 {
92817- struct ctl_table *table = sd_alloc_ctl_entry(14);
92818+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92819
92820 if (table == NULL)
92821 return NULL;
92822@@ -5178,9 +5188,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92823 return table;
92824 }
92825
92826-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92827+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92828 {
92829- struct ctl_table *entry, *table;
92830+ ctl_table_no_const *entry, *table;
92831 struct sched_domain *sd;
92832 int domain_num = 0, i;
92833 char buf[32];
92834@@ -5207,11 +5217,13 @@ static struct ctl_table_header *sd_sysctl_header;
92835 static void register_sched_domain_sysctl(void)
92836 {
92837 int i, cpu_num = num_possible_cpus();
92838- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92839+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92840 char buf[32];
92841
92842 WARN_ON(sd_ctl_dir[0].child);
92843+ pax_open_kernel();
92844 sd_ctl_dir[0].child = entry;
92845+ pax_close_kernel();
92846
92847 if (entry == NULL)
92848 return;
92849@@ -5234,8 +5246,12 @@ static void unregister_sched_domain_sysctl(void)
92850 if (sd_sysctl_header)
92851 unregister_sysctl_table(sd_sysctl_header);
92852 sd_sysctl_header = NULL;
92853- if (sd_ctl_dir[0].child)
92854- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92855+ if (sd_ctl_dir[0].child) {
92856+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92857+ pax_open_kernel();
92858+ sd_ctl_dir[0].child = NULL;
92859+ pax_close_kernel();
92860+ }
92861 }
92862 #else
92863 static void register_sched_domain_sysctl(void)
92864diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92865index fe331fc..29d620e 100644
92866--- a/kernel/sched/fair.c
92867+++ b/kernel/sched/fair.c
92868@@ -2089,7 +2089,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92869
92870 static void reset_ptenuma_scan(struct task_struct *p)
92871 {
92872- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92873+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92874 p->mm->numa_scan_offset = 0;
92875 }
92876
92877@@ -7651,7 +7651,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92878 * run_rebalance_domains is triggered when needed from the scheduler tick.
92879 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92880 */
92881-static void run_rebalance_domains(struct softirq_action *h)
92882+static __latent_entropy void run_rebalance_domains(void)
92883 {
92884 struct rq *this_rq = this_rq();
92885 enum cpu_idle_type idle = this_rq->idle_balance ?
92886diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92887index 9a2a45c..bb91ace 100644
92888--- a/kernel/sched/sched.h
92889+++ b/kernel/sched/sched.h
92890@@ -1182,7 +1182,7 @@ struct sched_class {
92891 #ifdef CONFIG_FAIR_GROUP_SCHED
92892 void (*task_move_group) (struct task_struct *p, int on_rq);
92893 #endif
92894-};
92895+} __do_const;
92896
92897 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92898 {
92899diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92900index 4ef9687..4f44028 100644
92901--- a/kernel/seccomp.c
92902+++ b/kernel/seccomp.c
92903@@ -629,7 +629,9 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd)
92904
92905 switch (action) {
92906 case SECCOMP_RET_ERRNO:
92907- /* Set the low-order 16-bits as a errno. */
92908+ /* Set low-order bits as an errno, capped at MAX_ERRNO. */
92909+ if (data > MAX_ERRNO)
92910+ data = MAX_ERRNO;
92911 syscall_set_return_value(current, task_pt_regs(current),
92912 -data, 0);
92913 goto skip;
92914diff --git a/kernel/signal.c b/kernel/signal.c
92915index 16a30529..25ad033 100644
92916--- a/kernel/signal.c
92917+++ b/kernel/signal.c
92918@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92919
92920 int print_fatal_signals __read_mostly;
92921
92922-static void __user *sig_handler(struct task_struct *t, int sig)
92923+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92924 {
92925 return t->sighand->action[sig - 1].sa.sa_handler;
92926 }
92927
92928-static int sig_handler_ignored(void __user *handler, int sig)
92929+static int sig_handler_ignored(__sighandler_t handler, int sig)
92930 {
92931 /* Is it explicitly or implicitly ignored? */
92932 return handler == SIG_IGN ||
92933@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92934
92935 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92936 {
92937- void __user *handler;
92938+ __sighandler_t handler;
92939
92940 handler = sig_handler(t, sig);
92941
92942@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92943 atomic_inc(&user->sigpending);
92944 rcu_read_unlock();
92945
92946+ if (!override_rlimit)
92947+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92948+
92949 if (override_rlimit ||
92950 atomic_read(&user->sigpending) <=
92951 task_rlimit(t, RLIMIT_SIGPENDING)) {
92952@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92953
92954 int unhandled_signal(struct task_struct *tsk, int sig)
92955 {
92956- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92957+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92958 if (is_global_init(tsk))
92959 return 1;
92960 if (handler != SIG_IGN && handler != SIG_DFL)
92961@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92962 }
92963 }
92964
92965+ /* allow glibc communication via tgkill to other threads in our
92966+ thread group */
92967+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92968+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92969+ && gr_handle_signal(t, sig))
92970+ return -EPERM;
92971+
92972 return security_task_kill(t, info, sig, 0);
92973 }
92974
92975@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92976 return send_signal(sig, info, p, 1);
92977 }
92978
92979-static int
92980+int
92981 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92982 {
92983 return send_signal(sig, info, t, 0);
92984@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92985 unsigned long int flags;
92986 int ret, blocked, ignored;
92987 struct k_sigaction *action;
92988+ int is_unhandled = 0;
92989
92990 spin_lock_irqsave(&t->sighand->siglock, flags);
92991 action = &t->sighand->action[sig-1];
92992@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92993 }
92994 if (action->sa.sa_handler == SIG_DFL)
92995 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92996+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92997+ is_unhandled = 1;
92998 ret = specific_send_sig_info(sig, info, t);
92999 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93000
93001+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93002+ normal operation */
93003+ if (is_unhandled) {
93004+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93005+ gr_handle_crash(t, sig);
93006+ }
93007+
93008 return ret;
93009 }
93010
93011@@ -1310,8 +1330,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93012 ret = check_kill_permission(sig, info, p);
93013 rcu_read_unlock();
93014
93015- if (!ret && sig)
93016+ if (!ret && sig) {
93017 ret = do_send_sig_info(sig, info, p, true);
93018+ if (!ret)
93019+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93020+ }
93021
93022 return ret;
93023 }
93024@@ -2915,7 +2938,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93025 int error = -ESRCH;
93026
93027 rcu_read_lock();
93028- p = find_task_by_vpid(pid);
93029+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93030+ /* allow glibc communication via tgkill to other threads in our
93031+ thread group */
93032+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93033+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93034+ p = find_task_by_vpid_unrestricted(pid);
93035+ else
93036+#endif
93037+ p = find_task_by_vpid(pid);
93038 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93039 error = check_kill_permission(sig, info, p);
93040 /*
93041@@ -3248,8 +3279,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93042 }
93043 seg = get_fs();
93044 set_fs(KERNEL_DS);
93045- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93046- (stack_t __force __user *) &uoss,
93047+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93048+ (stack_t __force_user *) &uoss,
93049 compat_user_stack_pointer());
93050 set_fs(seg);
93051 if (ret >= 0 && uoss_ptr) {
93052diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93053index 40190f2..8861d40 100644
93054--- a/kernel/smpboot.c
93055+++ b/kernel/smpboot.c
93056@@ -290,7 +290,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93057 }
93058 smpboot_unpark_thread(plug_thread, cpu);
93059 }
93060- list_add(&plug_thread->list, &hotplug_threads);
93061+ pax_list_add(&plug_thread->list, &hotplug_threads);
93062 out:
93063 mutex_unlock(&smpboot_threads_lock);
93064 put_online_cpus();
93065@@ -308,7 +308,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93066 {
93067 get_online_cpus();
93068 mutex_lock(&smpboot_threads_lock);
93069- list_del(&plug_thread->list);
93070+ pax_list_del(&plug_thread->list);
93071 smpboot_destroy_threads(plug_thread);
93072 mutex_unlock(&smpboot_threads_lock);
93073 put_online_cpus();
93074diff --git a/kernel/softirq.c b/kernel/softirq.c
93075index c497fcd..e8f90a9 100644
93076--- a/kernel/softirq.c
93077+++ b/kernel/softirq.c
93078@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93079 EXPORT_SYMBOL(irq_stat);
93080 #endif
93081
93082-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93083+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93084
93085 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93086
93087@@ -266,7 +266,7 @@ restart:
93088 kstat_incr_softirqs_this_cpu(vec_nr);
93089
93090 trace_softirq_entry(vec_nr);
93091- h->action(h);
93092+ h->action();
93093 trace_softirq_exit(vec_nr);
93094 if (unlikely(prev_count != preempt_count())) {
93095 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93096@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93097 or_softirq_pending(1UL << nr);
93098 }
93099
93100-void open_softirq(int nr, void (*action)(struct softirq_action *))
93101+void __init open_softirq(int nr, void (*action)(void))
93102 {
93103 softirq_vec[nr].action = action;
93104 }
93105@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93106 }
93107 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93108
93109-static void tasklet_action(struct softirq_action *a)
93110+static void tasklet_action(void)
93111 {
93112 struct tasklet_struct *list;
93113
93114@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93115 }
93116 }
93117
93118-static void tasklet_hi_action(struct softirq_action *a)
93119+static __latent_entropy void tasklet_hi_action(void)
93120 {
93121 struct tasklet_struct *list;
93122
93123@@ -745,7 +745,7 @@ static struct notifier_block cpu_nfb = {
93124 .notifier_call = cpu_callback
93125 };
93126
93127-static struct smp_hotplug_thread softirq_threads = {
93128+static struct smp_hotplug_thread softirq_threads __read_only = {
93129 .store = &ksoftirqd,
93130 .thread_should_run = ksoftirqd_should_run,
93131 .thread_fn = run_ksoftirqd,
93132diff --git a/kernel/sys.c b/kernel/sys.c
93133index ea9c881..2194af5 100644
93134--- a/kernel/sys.c
93135+++ b/kernel/sys.c
93136@@ -154,6 +154,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93137 error = -EACCES;
93138 goto out;
93139 }
93140+
93141+ if (gr_handle_chroot_setpriority(p, niceval)) {
93142+ error = -EACCES;
93143+ goto out;
93144+ }
93145+
93146 no_nice = security_task_setnice(p, niceval);
93147 if (no_nice) {
93148 error = no_nice;
93149@@ -359,6 +365,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93150 goto error;
93151 }
93152
93153+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93154+ goto error;
93155+
93156+ if (!gid_eq(new->gid, old->gid)) {
93157+ /* make sure we generate a learn log for what will
93158+ end up being a role transition after a full-learning
93159+ policy is generated
93160+ CAP_SETGID is required to perform a transition
93161+ we may not log a CAP_SETGID check above, e.g.
93162+ in the case where new rgid = old egid
93163+ */
93164+ gr_learn_cap(current, new, CAP_SETGID);
93165+ }
93166+
93167 if (rgid != (gid_t) -1 ||
93168 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93169 new->sgid = new->egid;
93170@@ -394,6 +414,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93171 old = current_cred();
93172
93173 retval = -EPERM;
93174+
93175+ if (gr_check_group_change(kgid, kgid, kgid))
93176+ goto error;
93177+
93178 if (ns_capable(old->user_ns, CAP_SETGID))
93179 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93180 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93181@@ -411,7 +435,7 @@ error:
93182 /*
93183 * change the user struct in a credentials set to match the new UID
93184 */
93185-static int set_user(struct cred *new)
93186+int set_user(struct cred *new)
93187 {
93188 struct user_struct *new_user;
93189
93190@@ -491,7 +515,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93191 goto error;
93192 }
93193
93194+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93195+ goto error;
93196+
93197 if (!uid_eq(new->uid, old->uid)) {
93198+ /* make sure we generate a learn log for what will
93199+ end up being a role transition after a full-learning
93200+ policy is generated
93201+ CAP_SETUID is required to perform a transition
93202+ we may not log a CAP_SETUID check above, e.g.
93203+ in the case where new ruid = old euid
93204+ */
93205+ gr_learn_cap(current, new, CAP_SETUID);
93206 retval = set_user(new);
93207 if (retval < 0)
93208 goto error;
93209@@ -541,6 +576,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93210 old = current_cred();
93211
93212 retval = -EPERM;
93213+
93214+ if (gr_check_crash_uid(kuid))
93215+ goto error;
93216+ if (gr_check_user_change(kuid, kuid, kuid))
93217+ goto error;
93218+
93219 if (ns_capable(old->user_ns, CAP_SETUID)) {
93220 new->suid = new->uid = kuid;
93221 if (!uid_eq(kuid, old->uid)) {
93222@@ -610,6 +651,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93223 goto error;
93224 }
93225
93226+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93227+ goto error;
93228+
93229 if (ruid != (uid_t) -1) {
93230 new->uid = kruid;
93231 if (!uid_eq(kruid, old->uid)) {
93232@@ -694,6 +738,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93233 goto error;
93234 }
93235
93236+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93237+ goto error;
93238+
93239 if (rgid != (gid_t) -1)
93240 new->gid = krgid;
93241 if (egid != (gid_t) -1)
93242@@ -758,12 +805,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93243 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93244 ns_capable(old->user_ns, CAP_SETUID)) {
93245 if (!uid_eq(kuid, old->fsuid)) {
93246+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93247+ goto error;
93248+
93249 new->fsuid = kuid;
93250 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93251 goto change_okay;
93252 }
93253 }
93254
93255+error:
93256 abort_creds(new);
93257 return old_fsuid;
93258
93259@@ -796,12 +847,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93260 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93261 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93262 ns_capable(old->user_ns, CAP_SETGID)) {
93263+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93264+ goto error;
93265+
93266 if (!gid_eq(kgid, old->fsgid)) {
93267 new->fsgid = kgid;
93268 goto change_okay;
93269 }
93270 }
93271
93272+error:
93273 abort_creds(new);
93274 return old_fsgid;
93275
93276@@ -1178,19 +1233,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93277 return -EFAULT;
93278
93279 down_read(&uts_sem);
93280- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93281+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93282 __OLD_UTS_LEN);
93283 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93284- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93285+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93286 __OLD_UTS_LEN);
93287 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93288- error |= __copy_to_user(&name->release, &utsname()->release,
93289+ error |= __copy_to_user(name->release, &utsname()->release,
93290 __OLD_UTS_LEN);
93291 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93292- error |= __copy_to_user(&name->version, &utsname()->version,
93293+ error |= __copy_to_user(name->version, &utsname()->version,
93294 __OLD_UTS_LEN);
93295 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93296- error |= __copy_to_user(&name->machine, &utsname()->machine,
93297+ error |= __copy_to_user(name->machine, &utsname()->machine,
93298 __OLD_UTS_LEN);
93299 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93300 up_read(&uts_sem);
93301@@ -1391,6 +1446,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93302 */
93303 new_rlim->rlim_cur = 1;
93304 }
93305+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93306+ is changed to a lower value. Since tasks can be created by the same
93307+ user in between this limit change and an execve by this task, force
93308+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93309+ */
93310+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93311+ tsk->flags |= PF_NPROC_EXCEEDED;
93312 }
93313 if (!retval) {
93314 if (old_rlim)
93315diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93316index 88ea2d6..88acc77 100644
93317--- a/kernel/sysctl.c
93318+++ b/kernel/sysctl.c
93319@@ -94,7 +94,6 @@
93320
93321
93322 #if defined(CONFIG_SYSCTL)
93323-
93324 /* External variables not in a header file. */
93325 extern int max_threads;
93326 extern int suid_dumpable;
93327@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93328
93329 /* Constants used for minimum and maximum */
93330 #ifdef CONFIG_LOCKUP_DETECTOR
93331-static int sixty = 60;
93332+static int sixty __read_only = 60;
93333 #endif
93334
93335-static int __maybe_unused neg_one = -1;
93336+static int __maybe_unused neg_one __read_only = -1;
93337
93338-static int zero;
93339-static int __maybe_unused one = 1;
93340-static int __maybe_unused two = 2;
93341-static int __maybe_unused four = 4;
93342-static unsigned long one_ul = 1;
93343-static int one_hundred = 100;
93344+static int zero __read_only = 0;
93345+static int __maybe_unused one __read_only = 1;
93346+static int __maybe_unused two __read_only = 2;
93347+static int __maybe_unused three __read_only = 3;
93348+static int __maybe_unused four __read_only = 4;
93349+static unsigned long one_ul __read_only = 1;
93350+static int one_hundred __read_only = 100;
93351 #ifdef CONFIG_PRINTK
93352-static int ten_thousand = 10000;
93353+static int ten_thousand __read_only = 10000;
93354 #endif
93355
93356 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93357@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93358 void __user *buffer, size_t *lenp, loff_t *ppos);
93359 #endif
93360
93361-#ifdef CONFIG_PRINTK
93362 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93363 void __user *buffer, size_t *lenp, loff_t *ppos);
93364-#endif
93365
93366 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93367 void __user *buffer, size_t *lenp, loff_t *ppos);
93368@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93369
93370 #endif
93371
93372+extern struct ctl_table grsecurity_table[];
93373+
93374 static struct ctl_table kern_table[];
93375 static struct ctl_table vm_table[];
93376 static struct ctl_table fs_table[];
93377@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93378 int sysctl_legacy_va_layout;
93379 #endif
93380
93381+#ifdef CONFIG_PAX_SOFTMODE
93382+static struct ctl_table pax_table[] = {
93383+ {
93384+ .procname = "softmode",
93385+ .data = &pax_softmode,
93386+ .maxlen = sizeof(unsigned int),
93387+ .mode = 0600,
93388+ .proc_handler = &proc_dointvec,
93389+ },
93390+
93391+ { }
93392+};
93393+#endif
93394+
93395 /* The default sysctl tables: */
93396
93397 static struct ctl_table sysctl_base_table[] = {
93398@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93399 #endif
93400
93401 static struct ctl_table kern_table[] = {
93402+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93403+ {
93404+ .procname = "grsecurity",
93405+ .mode = 0500,
93406+ .child = grsecurity_table,
93407+ },
93408+#endif
93409+
93410+#ifdef CONFIG_PAX_SOFTMODE
93411+ {
93412+ .procname = "pax",
93413+ .mode = 0500,
93414+ .child = pax_table,
93415+ },
93416+#endif
93417+
93418 {
93419 .procname = "sched_child_runs_first",
93420 .data = &sysctl_sched_child_runs_first,
93421@@ -649,7 +679,7 @@ static struct ctl_table kern_table[] = {
93422 .data = &modprobe_path,
93423 .maxlen = KMOD_PATH_LEN,
93424 .mode = 0644,
93425- .proc_handler = proc_dostring,
93426+ .proc_handler = proc_dostring_modpriv,
93427 },
93428 {
93429 .procname = "modules_disabled",
93430@@ -816,16 +846,20 @@ static struct ctl_table kern_table[] = {
93431 .extra1 = &zero,
93432 .extra2 = &one,
93433 },
93434+#endif
93435 {
93436 .procname = "kptr_restrict",
93437 .data = &kptr_restrict,
93438 .maxlen = sizeof(int),
93439 .mode = 0644,
93440 .proc_handler = proc_dointvec_minmax_sysadmin,
93441+#ifdef CONFIG_GRKERNSEC_HIDESYM
93442+ .extra1 = &two,
93443+#else
93444 .extra1 = &zero,
93445+#endif
93446 .extra2 = &two,
93447 },
93448-#endif
93449 {
93450 .procname = "ngroups_max",
93451 .data = &ngroups_max,
93452@@ -1072,10 +1106,17 @@ static struct ctl_table kern_table[] = {
93453 */
93454 {
93455 .procname = "perf_event_paranoid",
93456- .data = &sysctl_perf_event_paranoid,
93457- .maxlen = sizeof(sysctl_perf_event_paranoid),
93458+ .data = &sysctl_perf_event_legitimately_concerned,
93459+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93460 .mode = 0644,
93461- .proc_handler = proc_dointvec,
93462+ /* go ahead, be a hero */
93463+ .proc_handler = proc_dointvec_minmax_sysadmin,
93464+ .extra1 = &neg_one,
93465+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93466+ .extra2 = &three,
93467+#else
93468+ .extra2 = &two,
93469+#endif
93470 },
93471 {
93472 .procname = "perf_event_mlock_kb",
93473@@ -1340,6 +1381,13 @@ static struct ctl_table vm_table[] = {
93474 .proc_handler = proc_dointvec_minmax,
93475 .extra1 = &zero,
93476 },
93477+ {
93478+ .procname = "heap_stack_gap",
93479+ .data = &sysctl_heap_stack_gap,
93480+ .maxlen = sizeof(sysctl_heap_stack_gap),
93481+ .mode = 0644,
93482+ .proc_handler = proc_doulongvec_minmax,
93483+ },
93484 #else
93485 {
93486 .procname = "nr_trim_pages",
93487@@ -1822,6 +1870,16 @@ int proc_dostring(struct ctl_table *table, int write,
93488 (char __user *)buffer, lenp, ppos);
93489 }
93490
93491+int proc_dostring_modpriv(struct ctl_table *table, int write,
93492+ void __user *buffer, size_t *lenp, loff_t *ppos)
93493+{
93494+ if (write && !capable(CAP_SYS_MODULE))
93495+ return -EPERM;
93496+
93497+ return _proc_do_string(table->data, table->maxlen, write,
93498+ buffer, lenp, ppos);
93499+}
93500+
93501 static size_t proc_skip_spaces(char **buf)
93502 {
93503 size_t ret;
93504@@ -1927,6 +1985,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93505 len = strlen(tmp);
93506 if (len > *size)
93507 len = *size;
93508+ if (len > sizeof(tmp))
93509+ len = sizeof(tmp);
93510 if (copy_to_user(*buf, tmp, len))
93511 return -EFAULT;
93512 *size -= len;
93513@@ -2104,7 +2164,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93514 static int proc_taint(struct ctl_table *table, int write,
93515 void __user *buffer, size_t *lenp, loff_t *ppos)
93516 {
93517- struct ctl_table t;
93518+ ctl_table_no_const t;
93519 unsigned long tmptaint = get_taint();
93520 int err;
93521
93522@@ -2132,7 +2192,6 @@ static int proc_taint(struct ctl_table *table, int write,
93523 return err;
93524 }
93525
93526-#ifdef CONFIG_PRINTK
93527 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93528 void __user *buffer, size_t *lenp, loff_t *ppos)
93529 {
93530@@ -2141,7 +2200,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93531
93532 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93533 }
93534-#endif
93535
93536 struct do_proc_dointvec_minmax_conv_param {
93537 int *min;
93538@@ -2701,6 +2759,12 @@ int proc_dostring(struct ctl_table *table, int write,
93539 return -ENOSYS;
93540 }
93541
93542+int proc_dostring_modpriv(struct ctl_table *table, int write,
93543+ void __user *buffer, size_t *lenp, loff_t *ppos)
93544+{
93545+ return -ENOSYS;
93546+}
93547+
93548 int proc_dointvec(struct ctl_table *table, int write,
93549 void __user *buffer, size_t *lenp, loff_t *ppos)
93550 {
93551@@ -2757,5 +2821,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93552 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93553 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93554 EXPORT_SYMBOL(proc_dostring);
93555+EXPORT_SYMBOL(proc_dostring_modpriv);
93556 EXPORT_SYMBOL(proc_doulongvec_minmax);
93557 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93558diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93559index 670fff8..a247812 100644
93560--- a/kernel/taskstats.c
93561+++ b/kernel/taskstats.c
93562@@ -28,9 +28,12 @@
93563 #include <linux/fs.h>
93564 #include <linux/file.h>
93565 #include <linux/pid_namespace.h>
93566+#include <linux/grsecurity.h>
93567 #include <net/genetlink.h>
93568 #include <linux/atomic.h>
93569
93570+extern int gr_is_taskstats_denied(int pid);
93571+
93572 /*
93573 * Maximum length of a cpumask that can be specified in
93574 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93575@@ -576,6 +579,9 @@ err:
93576
93577 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93578 {
93579+ if (gr_is_taskstats_denied(current->pid))
93580+ return -EACCES;
93581+
93582 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93583 return cmd_attr_register_cpumask(info);
93584 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93585diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93586index a7077d3..dd48a49 100644
93587--- a/kernel/time/alarmtimer.c
93588+++ b/kernel/time/alarmtimer.c
93589@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93590 struct platform_device *pdev;
93591 int error = 0;
93592 int i;
93593- struct k_clock alarm_clock = {
93594+ static struct k_clock alarm_clock = {
93595 .clock_getres = alarm_clock_getres,
93596 .clock_get = alarm_clock_get,
93597 .timer_create = alarm_timer_create,
93598diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93599index d8c724c..6b331a4 100644
93600--- a/kernel/time/hrtimer.c
93601+++ b/kernel/time/hrtimer.c
93602@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93603 local_irq_restore(flags);
93604 }
93605
93606-static void run_hrtimer_softirq(struct softirq_action *h)
93607+static __latent_entropy void run_hrtimer_softirq(void)
93608 {
93609 hrtimer_peek_ahead_timers();
93610 }
93611diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93612index a16b678..8c5bd9d 100644
93613--- a/kernel/time/posix-cpu-timers.c
93614+++ b/kernel/time/posix-cpu-timers.c
93615@@ -1450,14 +1450,14 @@ struct k_clock clock_posix_cpu = {
93616
93617 static __init int init_posix_cpu_timers(void)
93618 {
93619- struct k_clock process = {
93620+ static struct k_clock process = {
93621 .clock_getres = process_cpu_clock_getres,
93622 .clock_get = process_cpu_clock_get,
93623 .timer_create = process_cpu_timer_create,
93624 .nsleep = process_cpu_nsleep,
93625 .nsleep_restart = process_cpu_nsleep_restart,
93626 };
93627- struct k_clock thread = {
93628+ static struct k_clock thread = {
93629 .clock_getres = thread_cpu_clock_getres,
93630 .clock_get = thread_cpu_clock_get,
93631 .timer_create = thread_cpu_timer_create,
93632diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93633index 31ea01f..7fc61ef 100644
93634--- a/kernel/time/posix-timers.c
93635+++ b/kernel/time/posix-timers.c
93636@@ -43,6 +43,7 @@
93637 #include <linux/hash.h>
93638 #include <linux/posix-clock.h>
93639 #include <linux/posix-timers.h>
93640+#include <linux/grsecurity.h>
93641 #include <linux/syscalls.h>
93642 #include <linux/wait.h>
93643 #include <linux/workqueue.h>
93644@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93645 * which we beg off on and pass to do_sys_settimeofday().
93646 */
93647
93648-static struct k_clock posix_clocks[MAX_CLOCKS];
93649+static struct k_clock *posix_clocks[MAX_CLOCKS];
93650
93651 /*
93652 * These ones are defined below.
93653@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93654 */
93655 static __init int init_posix_timers(void)
93656 {
93657- struct k_clock clock_realtime = {
93658+ static struct k_clock clock_realtime = {
93659 .clock_getres = hrtimer_get_res,
93660 .clock_get = posix_clock_realtime_get,
93661 .clock_set = posix_clock_realtime_set,
93662@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93663 .timer_get = common_timer_get,
93664 .timer_del = common_timer_del,
93665 };
93666- struct k_clock clock_monotonic = {
93667+ static struct k_clock clock_monotonic = {
93668 .clock_getres = hrtimer_get_res,
93669 .clock_get = posix_ktime_get_ts,
93670 .nsleep = common_nsleep,
93671@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93672 .timer_get = common_timer_get,
93673 .timer_del = common_timer_del,
93674 };
93675- struct k_clock clock_monotonic_raw = {
93676+ static struct k_clock clock_monotonic_raw = {
93677 .clock_getres = hrtimer_get_res,
93678 .clock_get = posix_get_monotonic_raw,
93679 };
93680- struct k_clock clock_realtime_coarse = {
93681+ static struct k_clock clock_realtime_coarse = {
93682 .clock_getres = posix_get_coarse_res,
93683 .clock_get = posix_get_realtime_coarse,
93684 };
93685- struct k_clock clock_monotonic_coarse = {
93686+ static struct k_clock clock_monotonic_coarse = {
93687 .clock_getres = posix_get_coarse_res,
93688 .clock_get = posix_get_monotonic_coarse,
93689 };
93690- struct k_clock clock_tai = {
93691+ static struct k_clock clock_tai = {
93692 .clock_getres = hrtimer_get_res,
93693 .clock_get = posix_get_tai,
93694 .nsleep = common_nsleep,
93695@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93696 .timer_get = common_timer_get,
93697 .timer_del = common_timer_del,
93698 };
93699- struct k_clock clock_boottime = {
93700+ static struct k_clock clock_boottime = {
93701 .clock_getres = hrtimer_get_res,
93702 .clock_get = posix_get_boottime,
93703 .nsleep = common_nsleep,
93704@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93705 return;
93706 }
93707
93708- posix_clocks[clock_id] = *new_clock;
93709+ posix_clocks[clock_id] = new_clock;
93710 }
93711 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93712
93713@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93714 return (id & CLOCKFD_MASK) == CLOCKFD ?
93715 &clock_posix_dynamic : &clock_posix_cpu;
93716
93717- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93718+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93719 return NULL;
93720- return &posix_clocks[id];
93721+ return posix_clocks[id];
93722 }
93723
93724 static int common_timer_create(struct k_itimer *new_timer)
93725@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93726 struct k_clock *kc = clockid_to_kclock(which_clock);
93727 struct k_itimer *new_timer;
93728 int error, new_timer_id;
93729- sigevent_t event;
93730+ sigevent_t event = { };
93731 int it_id_set = IT_ID_NOT_SET;
93732
93733 if (!kc)
93734@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93735 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93736 return -EFAULT;
93737
93738+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93739+ have their clock_set fptr set to a nosettime dummy function
93740+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93741+ call common_clock_set, which calls do_sys_settimeofday, which
93742+ we hook
93743+ */
93744+
93745 return kc->clock_set(which_clock, &new_tp);
93746 }
93747
93748diff --git a/kernel/time/time.c b/kernel/time/time.c
93749index 2c85b77..6530536 100644
93750--- a/kernel/time/time.c
93751+++ b/kernel/time/time.c
93752@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93753 return error;
93754
93755 if (tz) {
93756+ /* we log in do_settimeofday called below, so don't log twice
93757+ */
93758+ if (!tv)
93759+ gr_log_timechange();
93760+
93761 sys_tz = *tz;
93762 update_vsyscall_tz();
93763 if (firsttime) {
93764diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93765index 6a93185..288c331 100644
93766--- a/kernel/time/timekeeping.c
93767+++ b/kernel/time/timekeeping.c
93768@@ -15,6 +15,7 @@
93769 #include <linux/init.h>
93770 #include <linux/mm.h>
93771 #include <linux/sched.h>
93772+#include <linux/grsecurity.h>
93773 #include <linux/syscore_ops.h>
93774 #include <linux/clocksource.h>
93775 #include <linux/jiffies.h>
93776@@ -775,6 +776,8 @@ int do_settimeofday64(const struct timespec64 *ts)
93777 if (!timespec64_valid_strict(ts))
93778 return -EINVAL;
93779
93780+ gr_log_timechange();
93781+
93782 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93783 write_seqcount_begin(&tk_core.seq);
93784
93785diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93786index 2d3f5c5..7ed7dc5 100644
93787--- a/kernel/time/timer.c
93788+++ b/kernel/time/timer.c
93789@@ -1393,7 +1393,7 @@ void update_process_times(int user_tick)
93790 /*
93791 * This function runs timers and the timer-tq in bottom half context.
93792 */
93793-static void run_timer_softirq(struct softirq_action *h)
93794+static __latent_entropy void run_timer_softirq(void)
93795 {
93796 struct tvec_base *base = __this_cpu_read(tvec_bases);
93797
93798@@ -1456,7 +1456,7 @@ static void process_timeout(unsigned long __data)
93799 *
93800 * In all cases the return value is guaranteed to be non-negative.
93801 */
93802-signed long __sched schedule_timeout(signed long timeout)
93803+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93804 {
93805 struct timer_list timer;
93806 unsigned long expire;
93807diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93808index 61ed862..3b52c65 100644
93809--- a/kernel/time/timer_list.c
93810+++ b/kernel/time/timer_list.c
93811@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93812
93813 static void print_name_offset(struct seq_file *m, void *sym)
93814 {
93815+#ifdef CONFIG_GRKERNSEC_HIDESYM
93816+ SEQ_printf(m, "<%p>", NULL);
93817+#else
93818 char symname[KSYM_NAME_LEN];
93819
93820 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93821 SEQ_printf(m, "<%pK>", sym);
93822 else
93823 SEQ_printf(m, "%s", symname);
93824+#endif
93825 }
93826
93827 static void
93828@@ -119,7 +123,11 @@ next_one:
93829 static void
93830 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93831 {
93832+#ifdef CONFIG_GRKERNSEC_HIDESYM
93833+ SEQ_printf(m, " .base: %p\n", NULL);
93834+#else
93835 SEQ_printf(m, " .base: %pK\n", base);
93836+#endif
93837 SEQ_printf(m, " .index: %d\n",
93838 base->index);
93839 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93840@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93841 {
93842 struct proc_dir_entry *pe;
93843
93844+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93845+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93846+#else
93847 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93848+#endif
93849 if (!pe)
93850 return -ENOMEM;
93851 return 0;
93852diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93853index 1fb08f2..ca4bb1e 100644
93854--- a/kernel/time/timer_stats.c
93855+++ b/kernel/time/timer_stats.c
93856@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93857 static unsigned long nr_entries;
93858 static struct entry entries[MAX_ENTRIES];
93859
93860-static atomic_t overflow_count;
93861+static atomic_unchecked_t overflow_count;
93862
93863 /*
93864 * The entries are in a hash-table, for fast lookup:
93865@@ -140,7 +140,7 @@ static void reset_entries(void)
93866 nr_entries = 0;
93867 memset(entries, 0, sizeof(entries));
93868 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93869- atomic_set(&overflow_count, 0);
93870+ atomic_set_unchecked(&overflow_count, 0);
93871 }
93872
93873 static struct entry *alloc_entry(void)
93874@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93875 if (likely(entry))
93876 entry->count++;
93877 else
93878- atomic_inc(&overflow_count);
93879+ atomic_inc_unchecked(&overflow_count);
93880
93881 out_unlock:
93882 raw_spin_unlock_irqrestore(lock, flags);
93883@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93884
93885 static void print_name_offset(struct seq_file *m, unsigned long addr)
93886 {
93887+#ifdef CONFIG_GRKERNSEC_HIDESYM
93888+ seq_printf(m, "<%p>", NULL);
93889+#else
93890 char symname[KSYM_NAME_LEN];
93891
93892 if (lookup_symbol_name(addr, symname) < 0)
93893- seq_printf(m, "<%p>", (void *)addr);
93894+ seq_printf(m, "<%pK>", (void *)addr);
93895 else
93896 seq_printf(m, "%s", symname);
93897+#endif
93898 }
93899
93900 static int tstats_show(struct seq_file *m, void *v)
93901@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93902
93903 seq_puts(m, "Timer Stats Version: v0.3\n");
93904 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93905- if (atomic_read(&overflow_count))
93906- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93907+ if (atomic_read_unchecked(&overflow_count))
93908+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93909 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93910
93911 for (i = 0; i < nr_entries; i++) {
93912@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93913 {
93914 struct proc_dir_entry *pe;
93915
93916+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93917+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93918+#else
93919 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93920+#endif
93921 if (!pe)
93922 return -ENOMEM;
93923 return 0;
93924diff --git a/kernel/torture.c b/kernel/torture.c
93925index dd70993..0bf694b 100644
93926--- a/kernel/torture.c
93927+++ b/kernel/torture.c
93928@@ -482,7 +482,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93929 mutex_lock(&fullstop_mutex);
93930 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93931 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93932- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93933+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93934 } else {
93935 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93936 }
93937@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
93938 if (!torture_must_stop()) {
93939 if (stutter > 1) {
93940 schedule_timeout_interruptible(stutter - 1);
93941- ACCESS_ONCE(stutter_pause_test) = 2;
93942+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93943 }
93944 schedule_timeout_interruptible(1);
93945- ACCESS_ONCE(stutter_pause_test) = 1;
93946+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93947 }
93948 if (!torture_must_stop())
93949 schedule_timeout_interruptible(stutter);
93950- ACCESS_ONCE(stutter_pause_test) = 0;
93951+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93952 torture_shutdown_absorb("torture_stutter");
93953 } while (!torture_must_stop());
93954 torture_kthread_stopping("torture_stutter");
93955@@ -648,7 +648,7 @@ bool torture_cleanup_begin(void)
93956 schedule_timeout_uninterruptible(10);
93957 return true;
93958 }
93959- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93960+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93961 mutex_unlock(&fullstop_mutex);
93962 torture_shutdown_cleanup();
93963 torture_shuffle_cleanup();
93964diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93965index 483cecf..ac46091 100644
93966--- a/kernel/trace/blktrace.c
93967+++ b/kernel/trace/blktrace.c
93968@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93969 struct blk_trace *bt = filp->private_data;
93970 char buf[16];
93971
93972- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93973+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93974
93975 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93976 }
93977@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93978 return 1;
93979
93980 bt = buf->chan->private_data;
93981- atomic_inc(&bt->dropped);
93982+ atomic_inc_unchecked(&bt->dropped);
93983 return 0;
93984 }
93985
93986@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93987
93988 bt->dir = dir;
93989 bt->dev = dev;
93990- atomic_set(&bt->dropped, 0);
93991+ atomic_set_unchecked(&bt->dropped, 0);
93992 INIT_LIST_HEAD(&bt->running_list);
93993
93994 ret = -EIO;
93995diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93996index af5bffd..57664b8 100644
93997--- a/kernel/trace/ftrace.c
93998+++ b/kernel/trace/ftrace.c
93999@@ -2382,12 +2382,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94000 if (unlikely(ftrace_disabled))
94001 return 0;
94002
94003+ ret = ftrace_arch_code_modify_prepare();
94004+ FTRACE_WARN_ON(ret);
94005+ if (ret)
94006+ return 0;
94007+
94008 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94009+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94010 if (ret) {
94011 ftrace_bug(ret, rec);
94012- return 0;
94013 }
94014- return 1;
94015+ return ret ? 0 : 1;
94016 }
94017
94018 /*
94019@@ -4776,8 +4781,10 @@ static int ftrace_process_locs(struct module *mod,
94020 if (!count)
94021 return 0;
94022
94023+ pax_open_kernel();
94024 sort(start, count, sizeof(*start),
94025 ftrace_cmp_ips, ftrace_swap_ips);
94026+ pax_close_kernel();
94027
94028 start_pg = ftrace_allocate_pages(count);
94029 if (!start_pg)
94030@@ -5653,7 +5660,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
94031
94032 if (t->ret_stack == NULL) {
94033 atomic_set(&t->tracing_graph_pause, 0);
94034- atomic_set(&t->trace_overrun, 0);
94035+ atomic_set_unchecked(&t->trace_overrun, 0);
94036 t->curr_ret_stack = -1;
94037 /* Make sure the tasks see the -1 first: */
94038 smp_wmb();
94039@@ -5876,7 +5883,7 @@ static void
94040 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
94041 {
94042 atomic_set(&t->tracing_graph_pause, 0);
94043- atomic_set(&t->trace_overrun, 0);
94044+ atomic_set_unchecked(&t->trace_overrun, 0);
94045 t->ftrace_timestamp = 0;
94046 /* make curr_ret_stack visible before we add the ret_stack */
94047 smp_wmb();
94048diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94049index d2e151c..1498973 100644
94050--- a/kernel/trace/ring_buffer.c
94051+++ b/kernel/trace/ring_buffer.c
94052@@ -350,9 +350,9 @@ struct buffer_data_page {
94053 */
94054 struct buffer_page {
94055 struct list_head list; /* list of buffer pages */
94056- local_t write; /* index for next write */
94057+ local_unchecked_t write; /* index for next write */
94058 unsigned read; /* index for next read */
94059- local_t entries; /* entries on this page */
94060+ local_unchecked_t entries; /* entries on this page */
94061 unsigned long real_end; /* real end of data */
94062 struct buffer_data_page *page; /* Actual data page */
94063 };
94064@@ -473,11 +473,11 @@ struct ring_buffer_per_cpu {
94065 unsigned long last_overrun;
94066 local_t entries_bytes;
94067 local_t entries;
94068- local_t overrun;
94069- local_t commit_overrun;
94070- local_t dropped_events;
94071+ local_unchecked_t overrun;
94072+ local_unchecked_t commit_overrun;
94073+ local_unchecked_t dropped_events;
94074 local_t committing;
94075- local_t commits;
94076+ local_unchecked_t commits;
94077 unsigned long read;
94078 unsigned long read_bytes;
94079 u64 write_stamp;
94080@@ -1047,8 +1047,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94081 *
94082 * We add a counter to the write field to denote this.
94083 */
94084- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94085- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94086+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94087+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94088
94089 /*
94090 * Just make sure we have seen our old_write and synchronize
94091@@ -1076,8 +1076,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94092 * cmpxchg to only update if an interrupt did not already
94093 * do it for us. If the cmpxchg fails, we don't care.
94094 */
94095- (void)local_cmpxchg(&next_page->write, old_write, val);
94096- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94097+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94098+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94099
94100 /*
94101 * No need to worry about races with clearing out the commit.
94102@@ -1445,12 +1445,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94103
94104 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94105 {
94106- return local_read(&bpage->entries) & RB_WRITE_MASK;
94107+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94108 }
94109
94110 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94111 {
94112- return local_read(&bpage->write) & RB_WRITE_MASK;
94113+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94114 }
94115
94116 static int
94117@@ -1545,7 +1545,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94118 * bytes consumed in ring buffer from here.
94119 * Increment overrun to account for the lost events.
94120 */
94121- local_add(page_entries, &cpu_buffer->overrun);
94122+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94123 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94124 }
94125
94126@@ -2107,7 +2107,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94127 * it is our responsibility to update
94128 * the counters.
94129 */
94130- local_add(entries, &cpu_buffer->overrun);
94131+ local_add_unchecked(entries, &cpu_buffer->overrun);
94132 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94133
94134 /*
94135@@ -2257,7 +2257,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94136 if (tail == BUF_PAGE_SIZE)
94137 tail_page->real_end = 0;
94138
94139- local_sub(length, &tail_page->write);
94140+ local_sub_unchecked(length, &tail_page->write);
94141 return;
94142 }
94143
94144@@ -2292,7 +2292,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94145 rb_event_set_padding(event);
94146
94147 /* Set the write back to the previous setting */
94148- local_sub(length, &tail_page->write);
94149+ local_sub_unchecked(length, &tail_page->write);
94150 return;
94151 }
94152
94153@@ -2304,7 +2304,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94154
94155 /* Set write to end of buffer */
94156 length = (tail + length) - BUF_PAGE_SIZE;
94157- local_sub(length, &tail_page->write);
94158+ local_sub_unchecked(length, &tail_page->write);
94159 }
94160
94161 /*
94162@@ -2330,7 +2330,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94163 * about it.
94164 */
94165 if (unlikely(next_page == commit_page)) {
94166- local_inc(&cpu_buffer->commit_overrun);
94167+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94168 goto out_reset;
94169 }
94170
94171@@ -2360,7 +2360,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94172 * this is easy, just stop here.
94173 */
94174 if (!(buffer->flags & RB_FL_OVERWRITE)) {
94175- local_inc(&cpu_buffer->dropped_events);
94176+ local_inc_unchecked(&cpu_buffer->dropped_events);
94177 goto out_reset;
94178 }
94179
94180@@ -2386,7 +2386,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94181 cpu_buffer->tail_page) &&
94182 (cpu_buffer->commit_page ==
94183 cpu_buffer->reader_page))) {
94184- local_inc(&cpu_buffer->commit_overrun);
94185+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94186 goto out_reset;
94187 }
94188 }
94189@@ -2434,7 +2434,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94190 length += RB_LEN_TIME_EXTEND;
94191
94192 tail_page = cpu_buffer->tail_page;
94193- write = local_add_return(length, &tail_page->write);
94194+ write = local_add_return_unchecked(length, &tail_page->write);
94195
94196 /* set write to only the index of the write */
94197 write &= RB_WRITE_MASK;
94198@@ -2458,7 +2458,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94199 kmemcheck_annotate_bitfield(event, bitfield);
94200 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94201
94202- local_inc(&tail_page->entries);
94203+ local_inc_unchecked(&tail_page->entries);
94204
94205 /*
94206 * If this is the first commit on the page, then update
94207@@ -2491,7 +2491,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94208
94209 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94210 unsigned long write_mask =
94211- local_read(&bpage->write) & ~RB_WRITE_MASK;
94212+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94213 unsigned long event_length = rb_event_length(event);
94214 /*
94215 * This is on the tail page. It is possible that
94216@@ -2501,7 +2501,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94217 */
94218 old_index += write_mask;
94219 new_index += write_mask;
94220- index = local_cmpxchg(&bpage->write, old_index, new_index);
94221+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94222 if (index == old_index) {
94223 /* update counters */
94224 local_sub(event_length, &cpu_buffer->entries_bytes);
94225@@ -2516,7 +2516,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94226 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
94227 {
94228 local_inc(&cpu_buffer->committing);
94229- local_inc(&cpu_buffer->commits);
94230+ local_inc_unchecked(&cpu_buffer->commits);
94231 }
94232
94233 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94234@@ -2528,7 +2528,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94235 return;
94236
94237 again:
94238- commits = local_read(&cpu_buffer->commits);
94239+ commits = local_read_unchecked(&cpu_buffer->commits);
94240 /* synchronize with interrupts */
94241 barrier();
94242 if (local_read(&cpu_buffer->committing) == 1)
94243@@ -2544,7 +2544,7 @@ static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
94244 * updating of the commit page and the clearing of the
94245 * committing counter.
94246 */
94247- if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
94248+ if (unlikely(local_read_unchecked(&cpu_buffer->commits) != commits) &&
94249 !local_read(&cpu_buffer->committing)) {
94250 local_inc(&cpu_buffer->committing);
94251 goto again;
94252@@ -2574,7 +2574,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
94253 barrier();
94254 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
94255 local_dec(&cpu_buffer->committing);
94256- local_dec(&cpu_buffer->commits);
94257+ local_dec_unchecked(&cpu_buffer->commits);
94258 return NULL;
94259 }
94260 #endif
94261@@ -2904,7 +2904,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94262
94263 /* Do the likely case first */
94264 if (likely(bpage->page == (void *)addr)) {
94265- local_dec(&bpage->entries);
94266+ local_dec_unchecked(&bpage->entries);
94267 return;
94268 }
94269
94270@@ -2916,7 +2916,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94271 start = bpage;
94272 do {
94273 if (bpage->page == (void *)addr) {
94274- local_dec(&bpage->entries);
94275+ local_dec_unchecked(&bpage->entries);
94276 return;
94277 }
94278 rb_inc_page(cpu_buffer, &bpage);
94279@@ -3200,7 +3200,7 @@ static inline unsigned long
94280 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94281 {
94282 return local_read(&cpu_buffer->entries) -
94283- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94284+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94285 }
94286
94287 /**
94288@@ -3289,7 +3289,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94289 return 0;
94290
94291 cpu_buffer = buffer->buffers[cpu];
94292- ret = local_read(&cpu_buffer->overrun);
94293+ ret = local_read_unchecked(&cpu_buffer->overrun);
94294
94295 return ret;
94296 }
94297@@ -3312,7 +3312,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94298 return 0;
94299
94300 cpu_buffer = buffer->buffers[cpu];
94301- ret = local_read(&cpu_buffer->commit_overrun);
94302+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94303
94304 return ret;
94305 }
94306@@ -3334,7 +3334,7 @@ ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
94307 return 0;
94308
94309 cpu_buffer = buffer->buffers[cpu];
94310- ret = local_read(&cpu_buffer->dropped_events);
94311+ ret = local_read_unchecked(&cpu_buffer->dropped_events);
94312
94313 return ret;
94314 }
94315@@ -3397,7 +3397,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94316 /* if you care about this being correct, lock the buffer */
94317 for_each_buffer_cpu(buffer, cpu) {
94318 cpu_buffer = buffer->buffers[cpu];
94319- overruns += local_read(&cpu_buffer->overrun);
94320+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94321 }
94322
94323 return overruns;
94324@@ -3568,8 +3568,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94325 /*
94326 * Reset the reader page to size zero.
94327 */
94328- local_set(&cpu_buffer->reader_page->write, 0);
94329- local_set(&cpu_buffer->reader_page->entries, 0);
94330+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94331+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94332 local_set(&cpu_buffer->reader_page->page->commit, 0);
94333 cpu_buffer->reader_page->real_end = 0;
94334
94335@@ -3603,7 +3603,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94336 * want to compare with the last_overrun.
94337 */
94338 smp_mb();
94339- overwrite = local_read(&(cpu_buffer->overrun));
94340+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94341
94342 /*
94343 * Here's the tricky part.
94344@@ -4175,8 +4175,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94345
94346 cpu_buffer->head_page
94347 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94348- local_set(&cpu_buffer->head_page->write, 0);
94349- local_set(&cpu_buffer->head_page->entries, 0);
94350+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94351+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94352 local_set(&cpu_buffer->head_page->page->commit, 0);
94353
94354 cpu_buffer->head_page->read = 0;
94355@@ -4186,18 +4186,18 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94356
94357 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94358 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94359- local_set(&cpu_buffer->reader_page->write, 0);
94360- local_set(&cpu_buffer->reader_page->entries, 0);
94361+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94362+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94363 local_set(&cpu_buffer->reader_page->page->commit, 0);
94364 cpu_buffer->reader_page->read = 0;
94365
94366 local_set(&cpu_buffer->entries_bytes, 0);
94367- local_set(&cpu_buffer->overrun, 0);
94368- local_set(&cpu_buffer->commit_overrun, 0);
94369- local_set(&cpu_buffer->dropped_events, 0);
94370+ local_set_unchecked(&cpu_buffer->overrun, 0);
94371+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94372+ local_set_unchecked(&cpu_buffer->dropped_events, 0);
94373 local_set(&cpu_buffer->entries, 0);
94374 local_set(&cpu_buffer->committing, 0);
94375- local_set(&cpu_buffer->commits, 0);
94376+ local_set_unchecked(&cpu_buffer->commits, 0);
94377 cpu_buffer->read = 0;
94378 cpu_buffer->read_bytes = 0;
94379
94380@@ -4598,8 +4598,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94381 rb_init_page(bpage);
94382 bpage = reader->page;
94383 reader->page = *data_page;
94384- local_set(&reader->write, 0);
94385- local_set(&reader->entries, 0);
94386+ local_set_unchecked(&reader->write, 0);
94387+ local_set_unchecked(&reader->entries, 0);
94388 reader->read = 0;
94389 *data_page = bpage;
94390
94391diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94392index 361a827..6a319a3 100644
94393--- a/kernel/trace/trace.c
94394+++ b/kernel/trace/trace.c
94395@@ -3499,7 +3499,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94396 return 0;
94397 }
94398
94399-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94400+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94401 {
94402 /* do nothing if flag is already set */
94403 if (!!(trace_flags & mask) == !!enabled)
94404diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94405index 8de48ba..3e5b4fa 100644
94406--- a/kernel/trace/trace.h
94407+++ b/kernel/trace/trace.h
94408@@ -1271,7 +1271,7 @@ extern const char *__stop___tracepoint_str[];
94409 void trace_printk_init_buffers(void);
94410 void trace_printk_start_comm(void);
94411 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94412-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94413+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94414
94415 /*
94416 * Normal trace_printk() and friends allocates special buffers
94417diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94418index 57b67b1..66082a9 100644
94419--- a/kernel/trace/trace_clock.c
94420+++ b/kernel/trace/trace_clock.c
94421@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94422 return now;
94423 }
94424
94425-static atomic64_t trace_counter;
94426+static atomic64_unchecked_t trace_counter;
94427
94428 /*
94429 * trace_clock_counter(): simply an atomic counter.
94430@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94431 */
94432 u64 notrace trace_clock_counter(void)
94433 {
94434- return atomic64_add_return(1, &trace_counter);
94435+ return atomic64_inc_return_unchecked(&trace_counter);
94436 }
94437diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94438index b03a0ea..2df3168 100644
94439--- a/kernel/trace/trace_events.c
94440+++ b/kernel/trace/trace_events.c
94441@@ -1755,7 +1755,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94442 return 0;
94443 }
94444
94445-struct ftrace_module_file_ops;
94446 static void __add_event_to_tracers(struct ftrace_event_call *call);
94447
94448 /* Add an additional event_call dynamically */
94449diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
94450index ba47600..d0e47fa 100644
94451--- a/kernel/trace/trace_functions_graph.c
94452+++ b/kernel/trace/trace_functions_graph.c
94453@@ -133,7 +133,7 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
94454
94455 /* The return trace stack is full */
94456 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
94457- atomic_inc(&current->trace_overrun);
94458+ atomic_inc_unchecked(&current->trace_overrun);
94459 return -EBUSY;
94460 }
94461
94462@@ -230,7 +230,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
94463 *ret = current->ret_stack[index].ret;
94464 trace->func = current->ret_stack[index].func;
94465 trace->calltime = current->ret_stack[index].calltime;
94466- trace->overrun = atomic_read(&current->trace_overrun);
94467+ trace->overrun = atomic_read_unchecked(&current->trace_overrun);
94468 trace->depth = index;
94469 }
94470
94471diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94472index 7a9ba62..2e0e4a1 100644
94473--- a/kernel/trace/trace_mmiotrace.c
94474+++ b/kernel/trace/trace_mmiotrace.c
94475@@ -24,7 +24,7 @@ struct header_iter {
94476 static struct trace_array *mmio_trace_array;
94477 static bool overrun_detected;
94478 static unsigned long prev_overruns;
94479-static atomic_t dropped_count;
94480+static atomic_unchecked_t dropped_count;
94481
94482 static void mmio_reset_data(struct trace_array *tr)
94483 {
94484@@ -124,7 +124,7 @@ static void mmio_close(struct trace_iterator *iter)
94485
94486 static unsigned long count_overruns(struct trace_iterator *iter)
94487 {
94488- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94489+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94490 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94491
94492 if (over > prev_overruns)
94493@@ -307,7 +307,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94494 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94495 sizeof(*entry), 0, pc);
94496 if (!event) {
94497- atomic_inc(&dropped_count);
94498+ atomic_inc_unchecked(&dropped_count);
94499 return;
94500 }
94501 entry = ring_buffer_event_data(event);
94502@@ -337,7 +337,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94503 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94504 sizeof(*entry), 0, pc);
94505 if (!event) {
94506- atomic_inc(&dropped_count);
94507+ atomic_inc_unchecked(&dropped_count);
94508 return;
94509 }
94510 entry = ring_buffer_event_data(event);
94511diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94512index b77b9a6..82f19bd 100644
94513--- a/kernel/trace/trace_output.c
94514+++ b/kernel/trace/trace_output.c
94515@@ -707,14 +707,16 @@ int register_ftrace_event(struct trace_event *event)
94516 goto out;
94517 }
94518
94519+ pax_open_kernel();
94520 if (event->funcs->trace == NULL)
94521- event->funcs->trace = trace_nop_print;
94522+ *(void **)&event->funcs->trace = trace_nop_print;
94523 if (event->funcs->raw == NULL)
94524- event->funcs->raw = trace_nop_print;
94525+ *(void **)&event->funcs->raw = trace_nop_print;
94526 if (event->funcs->hex == NULL)
94527- event->funcs->hex = trace_nop_print;
94528+ *(void **)&event->funcs->hex = trace_nop_print;
94529 if (event->funcs->binary == NULL)
94530- event->funcs->binary = trace_nop_print;
94531+ *(void **)&event->funcs->binary = trace_nop_print;
94532+ pax_close_kernel();
94533
94534 key = event->type & (EVENT_HASHSIZE - 1);
94535
94536diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94537index f8b45d8..70ff6c8 100644
94538--- a/kernel/trace/trace_seq.c
94539+++ b/kernel/trace/trace_seq.c
94540@@ -337,7 +337,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94541 return 0;
94542 }
94543
94544- seq_buf_path(&s->seq, path, "\n");
94545+ seq_buf_path(&s->seq, path, "\n\\");
94546
94547 if (unlikely(seq_buf_has_overflowed(&s->seq))) {
94548 s->seq.len = save_len;
94549diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94550index 16eddb3..758b308 100644
94551--- a/kernel/trace/trace_stack.c
94552+++ b/kernel/trace/trace_stack.c
94553@@ -90,7 +90,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94554 return;
94555
94556 /* we do not handle interrupt stacks yet */
94557- if (!object_is_on_stack(stack))
94558+ if (!object_starts_on_stack(stack))
94559 return;
94560
94561 local_irq_save(flags);
94562diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94563index c6ee36f..78513f3 100644
94564--- a/kernel/trace/trace_syscalls.c
94565+++ b/kernel/trace/trace_syscalls.c
94566@@ -590,6 +590,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94567 int num;
94568
94569 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94570+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94571+ return -EINVAL;
94572
94573 mutex_lock(&syscall_trace_lock);
94574 if (!sys_perf_refcount_enter)
94575@@ -610,6 +612,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94576 int num;
94577
94578 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94579+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94580+ return;
94581
94582 mutex_lock(&syscall_trace_lock);
94583 sys_perf_refcount_enter--;
94584@@ -662,6 +666,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94585 int num;
94586
94587 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94588+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94589+ return -EINVAL;
94590
94591 mutex_lock(&syscall_trace_lock);
94592 if (!sys_perf_refcount_exit)
94593@@ -682,6 +688,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94594 int num;
94595
94596 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94597+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94598+ return;
94599
94600 mutex_lock(&syscall_trace_lock);
94601 sys_perf_refcount_exit--;
94602diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94603index 4109f83..fe1f830 100644
94604--- a/kernel/user_namespace.c
94605+++ b/kernel/user_namespace.c
94606@@ -83,6 +83,21 @@ int create_user_ns(struct cred *new)
94607 !kgid_has_mapping(parent_ns, group))
94608 return -EPERM;
94609
94610+#ifdef CONFIG_GRKERNSEC
94611+ /*
94612+ * This doesn't really inspire confidence:
94613+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94614+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94615+ * Increases kernel attack surface in areas developers
94616+ * previously cared little about ("low importance due
94617+ * to requiring "root" capability")
94618+ * To be removed when this code receives *proper* review
94619+ */
94620+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94621+ !capable(CAP_SETGID))
94622+ return -EPERM;
94623+#endif
94624+
94625 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94626 if (!ns)
94627 return -ENOMEM;
94628@@ -980,7 +995,7 @@ static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
94629 if (atomic_read(&current->mm->mm_users) > 1)
94630 return -EINVAL;
94631
94632- if (current->fs->users != 1)
94633+ if (atomic_read(&current->fs->users) != 1)
94634 return -EINVAL;
94635
94636 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94637diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94638index c8eac43..4b5f08f 100644
94639--- a/kernel/utsname_sysctl.c
94640+++ b/kernel/utsname_sysctl.c
94641@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94642 static int proc_do_uts_string(struct ctl_table *table, int write,
94643 void __user *buffer, size_t *lenp, loff_t *ppos)
94644 {
94645- struct ctl_table uts_table;
94646+ ctl_table_no_const uts_table;
94647 int r;
94648 memcpy(&uts_table, table, sizeof(uts_table));
94649 uts_table.data = get_uts(table, write);
94650diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94651index 70bf118..4be3c37 100644
94652--- a/kernel/watchdog.c
94653+++ b/kernel/watchdog.c
94654@@ -572,7 +572,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94655 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94656 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94657
94658-static struct smp_hotplug_thread watchdog_threads = {
94659+static struct smp_hotplug_thread watchdog_threads __read_only = {
94660 .store = &softlockup_watchdog,
94661 .thread_should_run = watchdog_should_run,
94662 .thread_fn = watchdog,
94663diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94664index 82d0c8d..37f4222 100644
94665--- a/kernel/workqueue.c
94666+++ b/kernel/workqueue.c
94667@@ -4565,7 +4565,7 @@ static void rebind_workers(struct worker_pool *pool)
94668 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94669 worker_flags |= WORKER_REBOUND;
94670 worker_flags &= ~WORKER_UNBOUND;
94671- ACCESS_ONCE(worker->flags) = worker_flags;
94672+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94673 }
94674
94675 spin_unlock_irq(&pool->lock);
94676diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94677index 5f2ce61..85a0b1b 100644
94678--- a/lib/Kconfig.debug
94679+++ b/lib/Kconfig.debug
94680@@ -910,7 +910,7 @@ config DEBUG_MUTEXES
94681
94682 config DEBUG_WW_MUTEX_SLOWPATH
94683 bool "Wait/wound mutex debugging: Slowpath testing"
94684- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94685+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94686 select DEBUG_LOCK_ALLOC
94687 select DEBUG_SPINLOCK
94688 select DEBUG_MUTEXES
94689@@ -927,7 +927,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94690
94691 config DEBUG_LOCK_ALLOC
94692 bool "Lock debugging: detect incorrect freeing of live locks"
94693- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94694+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94695 select DEBUG_SPINLOCK
94696 select DEBUG_MUTEXES
94697 select LOCKDEP
94698@@ -941,7 +941,7 @@ config DEBUG_LOCK_ALLOC
94699
94700 config PROVE_LOCKING
94701 bool "Lock debugging: prove locking correctness"
94702- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94703+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94704 select LOCKDEP
94705 select DEBUG_SPINLOCK
94706 select DEBUG_MUTEXES
94707@@ -992,7 +992,7 @@ config LOCKDEP
94708
94709 config LOCK_STAT
94710 bool "Lock usage statistics"
94711- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94712+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94713 select LOCKDEP
94714 select DEBUG_SPINLOCK
94715 select DEBUG_MUTEXES
94716@@ -1453,6 +1453,7 @@ config LATENCYTOP
94717 depends on DEBUG_KERNEL
94718 depends on STACKTRACE_SUPPORT
94719 depends on PROC_FS
94720+ depends on !GRKERNSEC_HIDESYM
94721 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94722 select KALLSYMS
94723 select KALLSYMS_ALL
94724@@ -1469,7 +1470,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94725 config DEBUG_STRICT_USER_COPY_CHECKS
94726 bool "Strict user copy size checks"
94727 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94728- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94729+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94730 help
94731 Enabling this option turns a certain set of sanity checks for user
94732 copy operations into compile time failures.
94733@@ -1597,7 +1598,7 @@ endmenu # runtime tests
94734
94735 config PROVIDE_OHCI1394_DMA_INIT
94736 bool "Remote debugging over FireWire early on boot"
94737- depends on PCI && X86
94738+ depends on PCI && X86 && !GRKERNSEC
94739 help
94740 If you want to debug problems which hang or crash the kernel early
94741 on boot and the crashing machine has a FireWire port, you can use
94742diff --git a/lib/Makefile b/lib/Makefile
94743index 3c3b30b..ca29102 100644
94744--- a/lib/Makefile
94745+++ b/lib/Makefile
94746@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94747 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94748 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94749 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94750-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94751+obj-y += list_debug.o
94752 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94753
94754 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94755diff --git a/lib/average.c b/lib/average.c
94756index 114d1be..ab0350c 100644
94757--- a/lib/average.c
94758+++ b/lib/average.c
94759@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94760 {
94761 unsigned long internal = ACCESS_ONCE(avg->internal);
94762
94763- ACCESS_ONCE(avg->internal) = internal ?
94764+ ACCESS_ONCE_RW(avg->internal) = internal ?
94765 (((internal << avg->weight) - internal) +
94766 (val << avg->factor)) >> avg->weight :
94767 (val << avg->factor);
94768diff --git a/lib/bitmap.c b/lib/bitmap.c
94769index 324ea9e..46b1ae2 100644
94770--- a/lib/bitmap.c
94771+++ b/lib/bitmap.c
94772@@ -271,7 +271,7 @@ int __bitmap_subset(const unsigned long *bitmap1,
94773 }
94774 EXPORT_SYMBOL(__bitmap_subset);
94775
94776-int __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94777+int __intentional_overflow(-1) __bitmap_weight(const unsigned long *bitmap, unsigned int bits)
94778 {
94779 unsigned int k, lim = bits/BITS_PER_LONG;
94780 int w = 0;
94781@@ -437,7 +437,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94782 {
94783 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94784 u32 chunk;
94785- const char __user __force *ubuf = (const char __user __force *)buf;
94786+ const char __user *ubuf = (const char __force_user *)buf;
94787
94788 bitmap_zero(maskp, nmaskbits);
94789
94790@@ -522,7 +522,7 @@ int bitmap_parse_user(const char __user *ubuf,
94791 {
94792 if (!access_ok(VERIFY_READ, ubuf, ulen))
94793 return -EFAULT;
94794- return __bitmap_parse((const char __force *)ubuf,
94795+ return __bitmap_parse((const char __force_kernel *)ubuf,
94796 ulen, 1, maskp, nmaskbits);
94797
94798 }
94799@@ -640,7 +640,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94800 {
94801 unsigned a, b;
94802 int c, old_c, totaldigits;
94803- const char __user __force *ubuf = (const char __user __force *)buf;
94804+ const char __user *ubuf = (const char __force_user *)buf;
94805 int exp_digit, in_range;
94806
94807 totaldigits = c = 0;
94808@@ -735,7 +735,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94809 {
94810 if (!access_ok(VERIFY_READ, ubuf, ulen))
94811 return -EFAULT;
94812- return __bitmap_parselist((const char __force *)ubuf,
94813+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94814 ulen, 1, maskp, nmaskbits);
94815 }
94816 EXPORT_SYMBOL(bitmap_parselist_user);
94817diff --git a/lib/bug.c b/lib/bug.c
94818index 0c3bd95..5a615a1 100644
94819--- a/lib/bug.c
94820+++ b/lib/bug.c
94821@@ -145,6 +145,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94822 return BUG_TRAP_TYPE_NONE;
94823
94824 bug = find_bug(bugaddr);
94825+ if (!bug)
94826+ return BUG_TRAP_TYPE_NONE;
94827
94828 file = NULL;
94829 line = 0;
94830diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94831index 547f7f9..a6d4ba0 100644
94832--- a/lib/debugobjects.c
94833+++ b/lib/debugobjects.c
94834@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94835 if (limit > 4)
94836 return;
94837
94838- is_on_stack = object_is_on_stack(addr);
94839+ is_on_stack = object_starts_on_stack(addr);
94840 if (is_on_stack == onstack)
94841 return;
94842
94843diff --git a/lib/div64.c b/lib/div64.c
94844index 4382ad7..08aa558 100644
94845--- a/lib/div64.c
94846+++ b/lib/div64.c
94847@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94848 EXPORT_SYMBOL(__div64_32);
94849
94850 #ifndef div_s64_rem
94851-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94852+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94853 {
94854 u64 quotient;
94855
94856@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94857 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94858 */
94859 #ifndef div64_u64
94860-u64 div64_u64(u64 dividend, u64 divisor)
94861+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94862 {
94863 u32 high = divisor >> 32;
94864 u64 quot;
94865diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94866index 9722bd2..0d826f4 100644
94867--- a/lib/dma-debug.c
94868+++ b/lib/dma-debug.c
94869@@ -979,7 +979,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94870
94871 void dma_debug_add_bus(struct bus_type *bus)
94872 {
94873- struct notifier_block *nb;
94874+ notifier_block_no_const *nb;
94875
94876 if (dma_debug_disabled())
94877 return;
94878@@ -1161,7 +1161,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94879
94880 static void check_for_stack(struct device *dev, void *addr)
94881 {
94882- if (object_is_on_stack(addr))
94883+ if (object_starts_on_stack(addr))
94884 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
94885 "stack [addr=%p]\n", addr);
94886 }
94887diff --git a/lib/inflate.c b/lib/inflate.c
94888index 013a761..c28f3fc 100644
94889--- a/lib/inflate.c
94890+++ b/lib/inflate.c
94891@@ -269,7 +269,7 @@ static void free(void *where)
94892 malloc_ptr = free_mem_ptr;
94893 }
94894 #else
94895-#define malloc(a) kmalloc(a, GFP_KERNEL)
94896+#define malloc(a) kmalloc((a), GFP_KERNEL)
94897 #define free(a) kfree(a)
94898 #endif
94899
94900diff --git a/lib/ioremap.c b/lib/ioremap.c
94901index 0c9216c..863bd89 100644
94902--- a/lib/ioremap.c
94903+++ b/lib/ioremap.c
94904@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94905 unsigned long next;
94906
94907 phys_addr -= addr;
94908- pmd = pmd_alloc(&init_mm, pud, addr);
94909+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94910 if (!pmd)
94911 return -ENOMEM;
94912 do {
94913@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94914 unsigned long next;
94915
94916 phys_addr -= addr;
94917- pud = pud_alloc(&init_mm, pgd, addr);
94918+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94919 if (!pud)
94920 return -ENOMEM;
94921 do {
94922diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94923index bd2bea9..6b3c95e 100644
94924--- a/lib/is_single_threaded.c
94925+++ b/lib/is_single_threaded.c
94926@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94927 struct task_struct *p, *t;
94928 bool ret;
94929
94930+ if (!mm)
94931+ return true;
94932+
94933 if (atomic_read(&task->signal->live) != 1)
94934 return false;
94935
94936diff --git a/lib/kobject.c b/lib/kobject.c
94937index 03d4ab3..46f6374 100644
94938--- a/lib/kobject.c
94939+++ b/lib/kobject.c
94940@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94941
94942
94943 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94944-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94945+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94946
94947-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94948+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94949 {
94950 enum kobj_ns_type type = ops->type;
94951 int error;
94952diff --git a/lib/list_debug.c b/lib/list_debug.c
94953index c24c2f7..f0296f4 100644
94954--- a/lib/list_debug.c
94955+++ b/lib/list_debug.c
94956@@ -11,7 +11,9 @@
94957 #include <linux/bug.h>
94958 #include <linux/kernel.h>
94959 #include <linux/rculist.h>
94960+#include <linux/mm.h>
94961
94962+#ifdef CONFIG_DEBUG_LIST
94963 /*
94964 * Insert a new entry between two known consecutive entries.
94965 *
94966@@ -19,21 +21,40 @@
94967 * the prev/next entries already!
94968 */
94969
94970+static bool __list_add_debug(struct list_head *new,
94971+ struct list_head *prev,
94972+ struct list_head *next)
94973+{
94974+ if (unlikely(next->prev != prev)) {
94975+ printk(KERN_ERR "list_add corruption. next->prev should be "
94976+ "prev (%p), but was %p. (next=%p).\n",
94977+ prev, next->prev, next);
94978+ BUG();
94979+ return false;
94980+ }
94981+ if (unlikely(prev->next != next)) {
94982+ printk(KERN_ERR "list_add corruption. prev->next should be "
94983+ "next (%p), but was %p. (prev=%p).\n",
94984+ next, prev->next, prev);
94985+ BUG();
94986+ return false;
94987+ }
94988+ if (unlikely(new == prev || new == next)) {
94989+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94990+ new, prev, next);
94991+ BUG();
94992+ return false;
94993+ }
94994+ return true;
94995+}
94996+
94997 void __list_add(struct list_head *new,
94998- struct list_head *prev,
94999- struct list_head *next)
95000+ struct list_head *prev,
95001+ struct list_head *next)
95002 {
95003- WARN(next->prev != prev,
95004- "list_add corruption. next->prev should be "
95005- "prev (%p), but was %p. (next=%p).\n",
95006- prev, next->prev, next);
95007- WARN(prev->next != next,
95008- "list_add corruption. prev->next should be "
95009- "next (%p), but was %p. (prev=%p).\n",
95010- next, prev->next, prev);
95011- WARN(new == prev || new == next,
95012- "list_add double add: new=%p, prev=%p, next=%p.\n",
95013- new, prev, next);
95014+ if (!__list_add_debug(new, prev, next))
95015+ return;
95016+
95017 next->prev = new;
95018 new->next = next;
95019 new->prev = prev;
95020@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
95021 }
95022 EXPORT_SYMBOL(__list_add);
95023
95024-void __list_del_entry(struct list_head *entry)
95025+static bool __list_del_entry_debug(struct list_head *entry)
95026 {
95027 struct list_head *prev, *next;
95028
95029 prev = entry->prev;
95030 next = entry->next;
95031
95032- if (WARN(next == LIST_POISON1,
95033- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95034- entry, LIST_POISON1) ||
95035- WARN(prev == LIST_POISON2,
95036- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95037- entry, LIST_POISON2) ||
95038- WARN(prev->next != entry,
95039- "list_del corruption. prev->next should be %p, "
95040- "but was %p\n", entry, prev->next) ||
95041- WARN(next->prev != entry,
95042- "list_del corruption. next->prev should be %p, "
95043- "but was %p\n", entry, next->prev))
95044+ if (unlikely(next == LIST_POISON1)) {
95045+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
95046+ entry, LIST_POISON1);
95047+ BUG();
95048+ return false;
95049+ }
95050+ if (unlikely(prev == LIST_POISON2)) {
95051+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
95052+ entry, LIST_POISON2);
95053+ BUG();
95054+ return false;
95055+ }
95056+ if (unlikely(entry->prev->next != entry)) {
95057+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
95058+ "but was %p\n", entry, prev->next);
95059+ BUG();
95060+ return false;
95061+ }
95062+ if (unlikely(entry->next->prev != entry)) {
95063+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
95064+ "but was %p\n", entry, next->prev);
95065+ BUG();
95066+ return false;
95067+ }
95068+ return true;
95069+}
95070+
95071+void __list_del_entry(struct list_head *entry)
95072+{
95073+ if (!__list_del_entry_debug(entry))
95074 return;
95075
95076- __list_del(prev, next);
95077+ __list_del(entry->prev, entry->next);
95078 }
95079 EXPORT_SYMBOL(__list_del_entry);
95080
95081@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
95082 void __list_add_rcu(struct list_head *new,
95083 struct list_head *prev, struct list_head *next)
95084 {
95085- WARN(next->prev != prev,
95086- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
95087- prev, next->prev, next);
95088- WARN(prev->next != next,
95089- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
95090- next, prev->next, prev);
95091+ if (!__list_add_debug(new, prev, next))
95092+ return;
95093+
95094 new->next = next;
95095 new->prev = prev;
95096 rcu_assign_pointer(list_next_rcu(prev), new);
95097 next->prev = new;
95098 }
95099 EXPORT_SYMBOL(__list_add_rcu);
95100+#endif
95101+
95102+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
95103+{
95104+#ifdef CONFIG_DEBUG_LIST
95105+ if (!__list_add_debug(new, prev, next))
95106+ return;
95107+#endif
95108+
95109+ pax_open_kernel();
95110+ next->prev = new;
95111+ new->next = next;
95112+ new->prev = prev;
95113+ prev->next = new;
95114+ pax_close_kernel();
95115+}
95116+EXPORT_SYMBOL(__pax_list_add);
95117+
95118+void pax_list_del(struct list_head *entry)
95119+{
95120+#ifdef CONFIG_DEBUG_LIST
95121+ if (!__list_del_entry_debug(entry))
95122+ return;
95123+#endif
95124+
95125+ pax_open_kernel();
95126+ __list_del(entry->prev, entry->next);
95127+ entry->next = LIST_POISON1;
95128+ entry->prev = LIST_POISON2;
95129+ pax_close_kernel();
95130+}
95131+EXPORT_SYMBOL(pax_list_del);
95132+
95133+void pax_list_del_init(struct list_head *entry)
95134+{
95135+ pax_open_kernel();
95136+ __list_del(entry->prev, entry->next);
95137+ INIT_LIST_HEAD(entry);
95138+ pax_close_kernel();
95139+}
95140+EXPORT_SYMBOL(pax_list_del_init);
95141+
95142+void __pax_list_add_rcu(struct list_head *new,
95143+ struct list_head *prev, struct list_head *next)
95144+{
95145+#ifdef CONFIG_DEBUG_LIST
95146+ if (!__list_add_debug(new, prev, next))
95147+ return;
95148+#endif
95149+
95150+ pax_open_kernel();
95151+ new->next = next;
95152+ new->prev = prev;
95153+ rcu_assign_pointer(list_next_rcu(prev), new);
95154+ next->prev = new;
95155+ pax_close_kernel();
95156+}
95157+EXPORT_SYMBOL(__pax_list_add_rcu);
95158+
95159+void pax_list_del_rcu(struct list_head *entry)
95160+{
95161+#ifdef CONFIG_DEBUG_LIST
95162+ if (!__list_del_entry_debug(entry))
95163+ return;
95164+#endif
95165+
95166+ pax_open_kernel();
95167+ __list_del(entry->prev, entry->next);
95168+ entry->next = LIST_POISON1;
95169+ entry->prev = LIST_POISON2;
95170+ pax_close_kernel();
95171+}
95172+EXPORT_SYMBOL(pax_list_del_rcu);
95173diff --git a/lib/lockref.c b/lib/lockref.c
95174index d2233de..fa1a2f6 100644
95175--- a/lib/lockref.c
95176+++ b/lib/lockref.c
95177@@ -48,13 +48,13 @@
95178 void lockref_get(struct lockref *lockref)
95179 {
95180 CMPXCHG_LOOP(
95181- new.count++;
95182+ __lockref_inc(&new);
95183 ,
95184 return;
95185 );
95186
95187 spin_lock(&lockref->lock);
95188- lockref->count++;
95189+ __lockref_inc(lockref);
95190 spin_unlock(&lockref->lock);
95191 }
95192 EXPORT_SYMBOL(lockref_get);
95193@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95194 int retval;
95195
95196 CMPXCHG_LOOP(
95197- new.count++;
95198+ __lockref_inc(&new);
95199 if (!old.count)
95200 return 0;
95201 ,
95202@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95203 spin_lock(&lockref->lock);
95204 retval = 0;
95205 if (lockref->count) {
95206- lockref->count++;
95207+ __lockref_inc(lockref);
95208 retval = 1;
95209 }
95210 spin_unlock(&lockref->lock);
95211@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95212 int lockref_get_or_lock(struct lockref *lockref)
95213 {
95214 CMPXCHG_LOOP(
95215- new.count++;
95216+ __lockref_inc(&new);
95217 if (!old.count)
95218 break;
95219 ,
95220@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95221 spin_lock(&lockref->lock);
95222 if (!lockref->count)
95223 return 0;
95224- lockref->count++;
95225+ __lockref_inc(lockref);
95226 spin_unlock(&lockref->lock);
95227 return 1;
95228 }
95229@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95230 int lockref_put_or_lock(struct lockref *lockref)
95231 {
95232 CMPXCHG_LOOP(
95233- new.count--;
95234+ __lockref_dec(&new);
95235 if (old.count <= 1)
95236 break;
95237 ,
95238@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95239 spin_lock(&lockref->lock);
95240 if (lockref->count <= 1)
95241 return 0;
95242- lockref->count--;
95243+ __lockref_dec(lockref);
95244 spin_unlock(&lockref->lock);
95245 return 1;
95246 }
95247@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95248 int retval;
95249
95250 CMPXCHG_LOOP(
95251- new.count++;
95252+ __lockref_inc(&new);
95253 if ((int)old.count < 0)
95254 return 0;
95255 ,
95256@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95257 spin_lock(&lockref->lock);
95258 retval = 0;
95259 if ((int) lockref->count >= 0) {
95260- lockref->count++;
95261+ __lockref_inc(lockref);
95262 retval = 1;
95263 }
95264 spin_unlock(&lockref->lock);
95265diff --git a/lib/nlattr.c b/lib/nlattr.c
95266index 9c3e85f..0affd1b 100644
95267--- a/lib/nlattr.c
95268+++ b/lib/nlattr.c
95269@@ -279,7 +279,11 @@ int nla_memcpy(void *dest, const struct nlattr *src, int count)
95270 {
95271 int minlen = min_t(int, count, nla_len(src));
95272
95273+ BUG_ON(minlen < 0);
95274+
95275 memcpy(dest, nla_data(src), minlen);
95276+ if (count > minlen)
95277+ memset(dest + minlen, 0, count - minlen);
95278
95279 return minlen;
95280 }
95281diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95282index 6111bcb..02e816b 100644
95283--- a/lib/percpu-refcount.c
95284+++ b/lib/percpu-refcount.c
95285@@ -31,7 +31,7 @@
95286 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
95287 */
95288
95289-#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
95290+#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 2))
95291
95292 static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
95293
95294diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95295index 3291a8e..346a91e 100644
95296--- a/lib/radix-tree.c
95297+++ b/lib/radix-tree.c
95298@@ -67,7 +67,7 @@ struct radix_tree_preload {
95299 int nr;
95300 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95301 };
95302-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95303+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95304
95305 static inline void *ptr_to_indirect(void *ptr)
95306 {
95307diff --git a/lib/random32.c b/lib/random32.c
95308index 0bee183..526f12f 100644
95309--- a/lib/random32.c
95310+++ b/lib/random32.c
95311@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
95312 }
95313 #endif
95314
95315-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95316+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95317
95318 /**
95319 * prandom_u32_state - seeded pseudo-random number generator.
95320diff --git a/lib/rbtree.c b/lib/rbtree.c
95321index c16c81a..4dcbda1 100644
95322--- a/lib/rbtree.c
95323+++ b/lib/rbtree.c
95324@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95325 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95326
95327 static const struct rb_augment_callbacks dummy_callbacks = {
95328- dummy_propagate, dummy_copy, dummy_rotate
95329+ .propagate = dummy_propagate,
95330+ .copy = dummy_copy,
95331+ .rotate = dummy_rotate
95332 };
95333
95334 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95335diff --git a/lib/show_mem.c b/lib/show_mem.c
95336index 7de89f4..00d70b7 100644
95337--- a/lib/show_mem.c
95338+++ b/lib/show_mem.c
95339@@ -50,6 +50,6 @@ void show_mem(unsigned int filter)
95340 quicklist_total_size());
95341 #endif
95342 #ifdef CONFIG_MEMORY_FAILURE
95343- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95344+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95345 #endif
95346 }
95347diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95348index bb2b201..46abaf9 100644
95349--- a/lib/strncpy_from_user.c
95350+++ b/lib/strncpy_from_user.c
95351@@ -21,7 +21,7 @@
95352 */
95353 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95354 {
95355- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95356+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95357 long res = 0;
95358
95359 /*
95360diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95361index a28df52..3d55877 100644
95362--- a/lib/strnlen_user.c
95363+++ b/lib/strnlen_user.c
95364@@ -26,7 +26,7 @@
95365 */
95366 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95367 {
95368- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95369+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95370 long align, res = 0;
95371 unsigned long c;
95372
95373diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95374index 4abda07..b9d3765 100644
95375--- a/lib/swiotlb.c
95376+++ b/lib/swiotlb.c
95377@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95378
95379 void
95380 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95381- dma_addr_t dev_addr)
95382+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95383 {
95384 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95385
95386diff --git a/lib/usercopy.c b/lib/usercopy.c
95387index 4f5b1dd..7cab418 100644
95388--- a/lib/usercopy.c
95389+++ b/lib/usercopy.c
95390@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95391 WARN(1, "Buffer overflow detected!\n");
95392 }
95393 EXPORT_SYMBOL(copy_from_user_overflow);
95394+
95395+void copy_to_user_overflow(void)
95396+{
95397+ WARN(1, "Buffer overflow detected!\n");
95398+}
95399+EXPORT_SYMBOL(copy_to_user_overflow);
95400diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95401index ec337f6..8484eb2 100644
95402--- a/lib/vsprintf.c
95403+++ b/lib/vsprintf.c
95404@@ -16,6 +16,9 @@
95405 * - scnprintf and vscnprintf
95406 */
95407
95408+#ifdef CONFIG_GRKERNSEC_HIDESYM
95409+#define __INCLUDED_BY_HIDESYM 1
95410+#endif
95411 #include <stdarg.h>
95412 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95413 #include <linux/types.h>
95414@@ -625,7 +628,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95415 #ifdef CONFIG_KALLSYMS
95416 if (*fmt == 'B')
95417 sprint_backtrace(sym, value);
95418- else if (*fmt != 'f' && *fmt != 's')
95419+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95420 sprint_symbol(sym, value);
95421 else
95422 sprint_symbol_no_offset(sym, value);
95423@@ -1240,7 +1243,11 @@ char *address_val(char *buf, char *end, const void *addr,
95424 return number(buf, end, num, spec);
95425 }
95426
95427+#ifdef CONFIG_GRKERNSEC_HIDESYM
95428+int kptr_restrict __read_mostly = 2;
95429+#else
95430 int kptr_restrict __read_mostly;
95431+#endif
95432
95433 /*
95434 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95435@@ -1251,8 +1258,10 @@ int kptr_restrict __read_mostly;
95436 *
95437 * - 'F' For symbolic function descriptor pointers with offset
95438 * - 'f' For simple symbolic function names without offset
95439+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95440 * - 'S' For symbolic direct pointers with offset
95441 * - 's' For symbolic direct pointers without offset
95442+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95443 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95444 * - 'B' For backtraced symbolic direct pointers with offset
95445 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95446@@ -1331,12 +1340,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95447
95448 if (!ptr && *fmt != 'K') {
95449 /*
95450- * Print (null) with the same width as a pointer so it makes
95451+ * Print (nil) with the same width as a pointer so it makes
95452 * tabular output look nice.
95453 */
95454 if (spec.field_width == -1)
95455 spec.field_width = default_width;
95456- return string(buf, end, "(null)", spec);
95457+ return string(buf, end, "(nil)", spec);
95458 }
95459
95460 switch (*fmt) {
95461@@ -1346,6 +1355,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95462 /* Fallthrough */
95463 case 'S':
95464 case 's':
95465+#ifdef CONFIG_GRKERNSEC_HIDESYM
95466+ break;
95467+#else
95468+ return symbol_string(buf, end, ptr, spec, fmt);
95469+#endif
95470+ case 'X':
95471+ ptr = dereference_function_descriptor(ptr);
95472+ case 'A':
95473 case 'B':
95474 return symbol_string(buf, end, ptr, spec, fmt);
95475 case 'R':
95476@@ -1403,6 +1420,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95477 va_end(va);
95478 return buf;
95479 }
95480+ case 'P':
95481+ break;
95482 case 'K':
95483 /*
95484 * %pK cannot be used in IRQ context because its test
95485@@ -1460,6 +1479,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95486 ((const struct file *)ptr)->f_path.dentry,
95487 spec, fmt);
95488 }
95489+
95490+#ifdef CONFIG_GRKERNSEC_HIDESYM
95491+ /* 'P' = approved pointers to copy to userland,
95492+ as in the /proc/kallsyms case, as we make it display nothing
95493+ for non-root users, and the real contents for root users
95494+ 'X' = approved simple symbols
95495+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95496+ above
95497+ */
95498+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95499+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95500+ dump_stack();
95501+ ptr = NULL;
95502+ }
95503+#endif
95504+
95505 spec.flags |= SMALL;
95506 if (spec.field_width == -1) {
95507 spec.field_width = default_width;
95508@@ -2160,11 +2195,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95509 typeof(type) value; \
95510 if (sizeof(type) == 8) { \
95511 args = PTR_ALIGN(args, sizeof(u32)); \
95512- *(u32 *)&value = *(u32 *)args; \
95513- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95514+ *(u32 *)&value = *(const u32 *)args; \
95515+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95516 } else { \
95517 args = PTR_ALIGN(args, sizeof(type)); \
95518- value = *(typeof(type) *)args; \
95519+ value = *(const typeof(type) *)args; \
95520 } \
95521 args += sizeof(type); \
95522 value; \
95523@@ -2227,7 +2262,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95524 case FORMAT_TYPE_STR: {
95525 const char *str_arg = args;
95526 args += strlen(str_arg) + 1;
95527- str = string(str, end, (char *)str_arg, spec);
95528+ str = string(str, end, str_arg, spec);
95529 break;
95530 }
95531
95532diff --git a/localversion-grsec b/localversion-grsec
95533new file mode 100644
95534index 0000000..7cd6065
95535--- /dev/null
95536+++ b/localversion-grsec
95537@@ -0,0 +1 @@
95538+-grsec
95539diff --git a/mm/Kconfig b/mm/Kconfig
95540index 1d1ae6b..0f05885 100644
95541--- a/mm/Kconfig
95542+++ b/mm/Kconfig
95543@@ -341,10 +341,11 @@ config KSM
95544 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95545
95546 config DEFAULT_MMAP_MIN_ADDR
95547- int "Low address space to protect from user allocation"
95548+ int "Low address space to protect from user allocation"
95549 depends on MMU
95550- default 4096
95551- help
95552+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95553+ default 65536
95554+ help
95555 This is the portion of low virtual memory which should be protected
95556 from userspace allocation. Keeping a user from writing to low pages
95557 can help reduce the impact of kernel NULL pointer bugs.
95558@@ -375,7 +376,7 @@ config MEMORY_FAILURE
95559
95560 config HWPOISON_INJECT
95561 tristate "HWPoison pages injector"
95562- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95563+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95564 select PROC_PAGE_MONITOR
95565
95566 config NOMMU_INITIAL_TRIM_EXCESS
95567diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
95568index 957d3da..1d34e20 100644
95569--- a/mm/Kconfig.debug
95570+++ b/mm/Kconfig.debug
95571@@ -10,6 +10,7 @@ config PAGE_EXTENSION
95572 config DEBUG_PAGEALLOC
95573 bool "Debug page memory allocations"
95574 depends on DEBUG_KERNEL
95575+ depends on !PAX_MEMORY_SANITIZE
95576 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
95577 depends on !KMEMCHECK
95578 select PAGE_EXTENSION
95579diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95580index 0ae0df5..82ac56b 100644
95581--- a/mm/backing-dev.c
95582+++ b/mm/backing-dev.c
95583@@ -12,7 +12,7 @@
95584 #include <linux/device.h>
95585 #include <trace/events/writeback.h>
95586
95587-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95588+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95589
95590 struct backing_dev_info default_backing_dev_info = {
95591 .name = "default",
95592@@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95593 return err;
95594
95595 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95596- atomic_long_inc_return(&bdi_seq));
95597+ atomic_long_inc_return_unchecked(&bdi_seq));
95598 if (err) {
95599 bdi_destroy(bdi);
95600 return err;
95601diff --git a/mm/filemap.c b/mm/filemap.c
95602index 673e458..7192013 100644
95603--- a/mm/filemap.c
95604+++ b/mm/filemap.c
95605@@ -2097,7 +2097,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95606 struct address_space *mapping = file->f_mapping;
95607
95608 if (!mapping->a_ops->readpage)
95609- return -ENOEXEC;
95610+ return -ENODEV;
95611 file_accessed(file);
95612 vma->vm_ops = &generic_file_vm_ops;
95613 return 0;
95614@@ -2275,6 +2275,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95615 *pos = i_size_read(inode);
95616
95617 if (limit != RLIM_INFINITY) {
95618+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95619 if (*pos >= limit) {
95620 send_sig(SIGXFSZ, current, 0);
95621 return -EFBIG;
95622diff --git a/mm/fremap.c b/mm/fremap.c
95623index 2805d71..8b56e7d 100644
95624--- a/mm/fremap.c
95625+++ b/mm/fremap.c
95626@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95627 retry:
95628 vma = find_vma(mm, start);
95629
95630+#ifdef CONFIG_PAX_SEGMEXEC
95631+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95632+ goto out;
95633+#endif
95634+
95635 /*
95636 * Make sure the vma is shared, that it supports prefaulting,
95637 * and that the remapped range is valid and fully within
95638diff --git a/mm/gup.c b/mm/gup.c
95639index 9b2afbf..647297c 100644
95640--- a/mm/gup.c
95641+++ b/mm/gup.c
95642@@ -274,11 +274,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95643 unsigned int fault_flags = 0;
95644 int ret;
95645
95646- /* For mlock, just skip the stack guard page. */
95647- if ((*flags & FOLL_MLOCK) &&
95648- (stack_guard_page_start(vma, address) ||
95649- stack_guard_page_end(vma, address + PAGE_SIZE)))
95650- return -ENOENT;
95651 if (*flags & FOLL_WRITE)
95652 fault_flags |= FAULT_FLAG_WRITE;
95653 if (nonblocking)
95654@@ -444,14 +439,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95655 if (!(gup_flags & FOLL_FORCE))
95656 gup_flags |= FOLL_NUMA;
95657
95658- do {
95659+ while (nr_pages) {
95660 struct page *page;
95661 unsigned int foll_flags = gup_flags;
95662 unsigned int page_increm;
95663
95664 /* first iteration or cross vma bound */
95665 if (!vma || start >= vma->vm_end) {
95666- vma = find_extend_vma(mm, start);
95667+ vma = find_vma(mm, start);
95668 if (!vma && in_gate_area(mm, start)) {
95669 int ret;
95670 ret = get_gate_page(mm, start & PAGE_MASK,
95671@@ -463,7 +458,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95672 goto next_page;
95673 }
95674
95675- if (!vma || check_vma_flags(vma, gup_flags))
95676+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95677 return i ? : -EFAULT;
95678 if (is_vm_hugetlb_page(vma)) {
95679 i = follow_hugetlb_page(mm, vma, pages, vmas,
95680@@ -518,7 +513,7 @@ next_page:
95681 i += page_increm;
95682 start += page_increm * PAGE_SIZE;
95683 nr_pages -= page_increm;
95684- } while (nr_pages);
95685+ }
95686 return i;
95687 }
95688 EXPORT_SYMBOL(__get_user_pages);
95689diff --git a/mm/highmem.c b/mm/highmem.c
95690index 123bcd3..0de52ba 100644
95691--- a/mm/highmem.c
95692+++ b/mm/highmem.c
95693@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95694 * So no dangers, even with speculative execution.
95695 */
95696 page = pte_page(pkmap_page_table[i]);
95697+ pax_open_kernel();
95698 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95699-
95700+ pax_close_kernel();
95701 set_page_address(page, NULL);
95702 need_flush = 1;
95703 }
95704@@ -259,9 +260,11 @@ start:
95705 }
95706 }
95707 vaddr = PKMAP_ADDR(last_pkmap_nr);
95708+
95709+ pax_open_kernel();
95710 set_pte_at(&init_mm, vaddr,
95711 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95712-
95713+ pax_close_kernel();
95714 pkmap_count[last_pkmap_nr] = 1;
95715 set_page_address(page, (void *)vaddr);
95716
95717diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95718index 267e419..394bed9 100644
95719--- a/mm/hugetlb.c
95720+++ b/mm/hugetlb.c
95721@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95722 struct ctl_table *table, int write,
95723 void __user *buffer, size_t *length, loff_t *ppos)
95724 {
95725+ ctl_table_no_const t;
95726 struct hstate *h = &default_hstate;
95727 unsigned long tmp = h->max_huge_pages;
95728 int ret;
95729@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95730 if (!hugepages_supported())
95731 return -ENOTSUPP;
95732
95733- table->data = &tmp;
95734- table->maxlen = sizeof(unsigned long);
95735- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95736+ t = *table;
95737+ t.data = &tmp;
95738+ t.maxlen = sizeof(unsigned long);
95739+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95740 if (ret)
95741 goto out;
95742
95743@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95744 struct hstate *h = &default_hstate;
95745 unsigned long tmp;
95746 int ret;
95747+ ctl_table_no_const hugetlb_table;
95748
95749 if (!hugepages_supported())
95750 return -ENOTSUPP;
95751@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95752 if (write && hstate_is_gigantic(h))
95753 return -EINVAL;
95754
95755- table->data = &tmp;
95756- table->maxlen = sizeof(unsigned long);
95757- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95758+ hugetlb_table = *table;
95759+ hugetlb_table.data = &tmp;
95760+ hugetlb_table.maxlen = sizeof(unsigned long);
95761+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95762 if (ret)
95763 goto out;
95764
95765@@ -2798,6 +2802,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95766 i_mmap_unlock_write(mapping);
95767 }
95768
95769+#ifdef CONFIG_PAX_SEGMEXEC
95770+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95771+{
95772+ struct mm_struct *mm = vma->vm_mm;
95773+ struct vm_area_struct *vma_m;
95774+ unsigned long address_m;
95775+ pte_t *ptep_m;
95776+
95777+ vma_m = pax_find_mirror_vma(vma);
95778+ if (!vma_m)
95779+ return;
95780+
95781+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95782+ address_m = address + SEGMEXEC_TASK_SIZE;
95783+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95784+ get_page(page_m);
95785+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95786+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95787+}
95788+#endif
95789+
95790 /*
95791 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95792 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95793@@ -2910,6 +2935,11 @@ retry_avoidcopy:
95794 make_huge_pte(vma, new_page, 1));
95795 page_remove_rmap(old_page);
95796 hugepage_add_new_anon_rmap(new_page, vma, address);
95797+
95798+#ifdef CONFIG_PAX_SEGMEXEC
95799+ pax_mirror_huge_pte(vma, address, new_page);
95800+#endif
95801+
95802 /* Make the old page be freed below */
95803 new_page = old_page;
95804 }
95805@@ -3070,6 +3100,10 @@ retry:
95806 && (vma->vm_flags & VM_SHARED)));
95807 set_huge_pte_at(mm, address, ptep, new_pte);
95808
95809+#ifdef CONFIG_PAX_SEGMEXEC
95810+ pax_mirror_huge_pte(vma, address, page);
95811+#endif
95812+
95813 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95814 /* Optimization, do the COW without a second fault */
95815 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95816@@ -3137,6 +3171,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95817 struct address_space *mapping;
95818 int need_wait_lock = 0;
95819
95820+#ifdef CONFIG_PAX_SEGMEXEC
95821+ struct vm_area_struct *vma_m;
95822+#endif
95823+
95824 address &= huge_page_mask(h);
95825
95826 ptep = huge_pte_offset(mm, address);
95827@@ -3150,6 +3188,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95828 VM_FAULT_SET_HINDEX(hstate_index(h));
95829 }
95830
95831+#ifdef CONFIG_PAX_SEGMEXEC
95832+ vma_m = pax_find_mirror_vma(vma);
95833+ if (vma_m) {
95834+ unsigned long address_m;
95835+
95836+ if (vma->vm_start > vma_m->vm_start) {
95837+ address_m = address;
95838+ address -= SEGMEXEC_TASK_SIZE;
95839+ vma = vma_m;
95840+ h = hstate_vma(vma);
95841+ } else
95842+ address_m = address + SEGMEXEC_TASK_SIZE;
95843+
95844+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95845+ return VM_FAULT_OOM;
95846+ address_m &= HPAGE_MASK;
95847+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95848+ }
95849+#endif
95850+
95851 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95852 if (!ptep)
95853 return VM_FAULT_OOM;
95854diff --git a/mm/internal.h b/mm/internal.h
95855index efad241..57ae4ca 100644
95856--- a/mm/internal.h
95857+++ b/mm/internal.h
95858@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95859
95860 extern int __isolate_free_page(struct page *page, unsigned int order);
95861 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95862+extern void free_compound_page(struct page *page);
95863 extern void prep_compound_page(struct page *page, unsigned long order);
95864 #ifdef CONFIG_MEMORY_FAILURE
95865 extern bool is_free_buddy_page(struct page *page);
95866@@ -387,7 +388,7 @@ extern u32 hwpoison_filter_enable;
95867
95868 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95869 unsigned long, unsigned long,
95870- unsigned long, unsigned long);
95871+ unsigned long, unsigned long) __intentional_overflow(-1);
95872
95873 extern void set_pageblock_order(void);
95874 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95875diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95876index 3cda50c..032ba634 100644
95877--- a/mm/kmemleak.c
95878+++ b/mm/kmemleak.c
95879@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95880
95881 for (i = 0; i < object->trace_len; i++) {
95882 void *ptr = (void *)object->trace[i];
95883- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95884+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95885 }
95886 }
95887
95888@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95889 return -ENOMEM;
95890 }
95891
95892- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95893+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95894 &kmemleak_fops);
95895 if (!dentry)
95896 pr_warning("Failed to create the debugfs kmemleak file\n");
95897diff --git a/mm/maccess.c b/mm/maccess.c
95898index d53adf9..03a24bf 100644
95899--- a/mm/maccess.c
95900+++ b/mm/maccess.c
95901@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95902 set_fs(KERNEL_DS);
95903 pagefault_disable();
95904 ret = __copy_from_user_inatomic(dst,
95905- (__force const void __user *)src, size);
95906+ (const void __force_user *)src, size);
95907 pagefault_enable();
95908 set_fs(old_fs);
95909
95910@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95911
95912 set_fs(KERNEL_DS);
95913 pagefault_disable();
95914- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95915+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95916 pagefault_enable();
95917 set_fs(old_fs);
95918
95919diff --git a/mm/madvise.c b/mm/madvise.c
95920index a271adc..5e1a2b4 100644
95921--- a/mm/madvise.c
95922+++ b/mm/madvise.c
95923@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95924 pgoff_t pgoff;
95925 unsigned long new_flags = vma->vm_flags;
95926
95927+#ifdef CONFIG_PAX_SEGMEXEC
95928+ struct vm_area_struct *vma_m;
95929+#endif
95930+
95931 switch (behavior) {
95932 case MADV_NORMAL:
95933 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95934@@ -126,6 +130,13 @@ success:
95935 /*
95936 * vm_flags is protected by the mmap_sem held in write mode.
95937 */
95938+
95939+#ifdef CONFIG_PAX_SEGMEXEC
95940+ vma_m = pax_find_mirror_vma(vma);
95941+ if (vma_m)
95942+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95943+#endif
95944+
95945 vma->vm_flags = new_flags;
95946
95947 out:
95948@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95949 struct vm_area_struct **prev,
95950 unsigned long start, unsigned long end)
95951 {
95952+
95953+#ifdef CONFIG_PAX_SEGMEXEC
95954+ struct vm_area_struct *vma_m;
95955+#endif
95956+
95957 *prev = vma;
95958 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95959 return -EINVAL;
95960@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95961 zap_page_range(vma, start, end - start, &details);
95962 } else
95963 zap_page_range(vma, start, end - start, NULL);
95964+
95965+#ifdef CONFIG_PAX_SEGMEXEC
95966+ vma_m = pax_find_mirror_vma(vma);
95967+ if (vma_m) {
95968+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95969+ struct zap_details details = {
95970+ .nonlinear_vma = vma_m,
95971+ .last_index = ULONG_MAX,
95972+ };
95973+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95974+ } else
95975+ zap_page_range(vma_m, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95976+ }
95977+#endif
95978+
95979 return 0;
95980 }
95981
95982@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95983 if (end < start)
95984 return error;
95985
95986+#ifdef CONFIG_PAX_SEGMEXEC
95987+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95988+ if (end > SEGMEXEC_TASK_SIZE)
95989+ return error;
95990+ } else
95991+#endif
95992+
95993+ if (end > TASK_SIZE)
95994+ return error;
95995+
95996 error = 0;
95997 if (end == start)
95998 return error;
95999diff --git a/mm/memory-failure.c b/mm/memory-failure.c
96000index 20c29dd..22bd8e2 100644
96001--- a/mm/memory-failure.c
96002+++ b/mm/memory-failure.c
96003@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
96004
96005 int sysctl_memory_failure_recovery __read_mostly = 1;
96006
96007-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96008+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
96009
96010 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
96011
96012@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
96013 pfn, t->comm, t->pid);
96014 si.si_signo = SIGBUS;
96015 si.si_errno = 0;
96016- si.si_addr = (void *)addr;
96017+ si.si_addr = (void __user *)addr;
96018 #ifdef __ARCH_SI_TRAPNO
96019 si.si_trapno = trapno;
96020 #endif
96021@@ -786,7 +786,7 @@ static struct page_state {
96022 unsigned long res;
96023 char *msg;
96024 int (*action)(struct page *p, unsigned long pfn);
96025-} error_states[] = {
96026+} __do_const error_states[] = {
96027 { reserved, reserved, "reserved kernel", me_kernel },
96028 /*
96029 * free pages are specially detected outside this table:
96030@@ -1094,7 +1094,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96031 nr_pages = 1 << compound_order(hpage);
96032 else /* normal page or thp */
96033 nr_pages = 1;
96034- atomic_long_add(nr_pages, &num_poisoned_pages);
96035+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
96036
96037 /*
96038 * We need/can do nothing about count=0 pages.
96039@@ -1123,7 +1123,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96040 if (PageHWPoison(hpage)) {
96041 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
96042 || (p != hpage && TestSetPageHWPoison(hpage))) {
96043- atomic_long_sub(nr_pages, &num_poisoned_pages);
96044+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96045 unlock_page(hpage);
96046 return 0;
96047 }
96048@@ -1191,14 +1191,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
96049 */
96050 if (!PageHWPoison(p)) {
96051 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
96052- atomic_long_sub(nr_pages, &num_poisoned_pages);
96053+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96054 put_page(hpage);
96055 res = 0;
96056 goto out;
96057 }
96058 if (hwpoison_filter(p)) {
96059 if (TestClearPageHWPoison(p))
96060- atomic_long_sub(nr_pages, &num_poisoned_pages);
96061+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96062 unlock_page(hpage);
96063 put_page(hpage);
96064 return 0;
96065@@ -1428,7 +1428,7 @@ int unpoison_memory(unsigned long pfn)
96066 return 0;
96067 }
96068 if (TestClearPageHWPoison(p))
96069- atomic_long_dec(&num_poisoned_pages);
96070+ atomic_long_dec_unchecked(&num_poisoned_pages);
96071 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
96072 return 0;
96073 }
96074@@ -1442,7 +1442,7 @@ int unpoison_memory(unsigned long pfn)
96075 */
96076 if (TestClearPageHWPoison(page)) {
96077 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
96078- atomic_long_sub(nr_pages, &num_poisoned_pages);
96079+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
96080 freeit = 1;
96081 if (PageHuge(page))
96082 clear_page_hwpoison_huge_page(page);
96083@@ -1567,11 +1567,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
96084 if (PageHuge(page)) {
96085 set_page_hwpoison_huge_page(hpage);
96086 dequeue_hwpoisoned_huge_page(hpage);
96087- atomic_long_add(1 << compound_order(hpage),
96088+ atomic_long_add_unchecked(1 << compound_order(hpage),
96089 &num_poisoned_pages);
96090 } else {
96091 SetPageHWPoison(page);
96092- atomic_long_inc(&num_poisoned_pages);
96093+ atomic_long_inc_unchecked(&num_poisoned_pages);
96094 }
96095 }
96096 return ret;
96097@@ -1610,7 +1610,7 @@ static int __soft_offline_page(struct page *page, int flags)
96098 put_page(page);
96099 pr_info("soft_offline: %#lx: invalidated\n", pfn);
96100 SetPageHWPoison(page);
96101- atomic_long_inc(&num_poisoned_pages);
96102+ atomic_long_inc_unchecked(&num_poisoned_pages);
96103 return 0;
96104 }
96105
96106@@ -1659,7 +1659,7 @@ static int __soft_offline_page(struct page *page, int flags)
96107 if (!is_free_buddy_page(page))
96108 pr_info("soft offline: %#lx: page leaked\n",
96109 pfn);
96110- atomic_long_inc(&num_poisoned_pages);
96111+ atomic_long_inc_unchecked(&num_poisoned_pages);
96112 }
96113 } else {
96114 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96115@@ -1729,11 +1729,11 @@ int soft_offline_page(struct page *page, int flags)
96116 if (PageHuge(page)) {
96117 set_page_hwpoison_huge_page(hpage);
96118 dequeue_hwpoisoned_huge_page(hpage);
96119- atomic_long_add(1 << compound_order(hpage),
96120+ atomic_long_add_unchecked(1 << compound_order(hpage),
96121 &num_poisoned_pages);
96122 } else {
96123 SetPageHWPoison(page);
96124- atomic_long_inc(&num_poisoned_pages);
96125+ atomic_long_inc_unchecked(&num_poisoned_pages);
96126 }
96127 }
96128 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96129diff --git a/mm/memory.c b/mm/memory.c
96130index 6aa7822..3c76005 100644
96131--- a/mm/memory.c
96132+++ b/mm/memory.c
96133@@ -414,6 +414,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96134 free_pte_range(tlb, pmd, addr);
96135 } while (pmd++, addr = next, addr != end);
96136
96137+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96138 start &= PUD_MASK;
96139 if (start < floor)
96140 return;
96141@@ -428,6 +429,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96142 pmd = pmd_offset(pud, start);
96143 pud_clear(pud);
96144 pmd_free_tlb(tlb, pmd, start);
96145+#endif
96146+
96147 }
96148
96149 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96150@@ -447,6 +450,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96151 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96152 } while (pud++, addr = next, addr != end);
96153
96154+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96155 start &= PGDIR_MASK;
96156 if (start < floor)
96157 return;
96158@@ -461,6 +465,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96159 pud = pud_offset(pgd, start);
96160 pgd_clear(pgd);
96161 pud_free_tlb(tlb, pud, start);
96162+#endif
96163+
96164 }
96165
96166 /*
96167@@ -690,10 +696,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96168 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96169 */
96170 if (vma->vm_ops)
96171- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96172+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96173 vma->vm_ops->fault);
96174 if (vma->vm_file)
96175- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96176+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96177 vma->vm_file->f_op->mmap);
96178 dump_stack();
96179 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96180@@ -1488,6 +1494,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96181 page_add_file_rmap(page);
96182 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96183
96184+#ifdef CONFIG_PAX_SEGMEXEC
96185+ pax_mirror_file_pte(vma, addr, page, ptl);
96186+#endif
96187+
96188 retval = 0;
96189 pte_unmap_unlock(pte, ptl);
96190 return retval;
96191@@ -1532,9 +1542,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96192 if (!page_count(page))
96193 return -EINVAL;
96194 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96195+
96196+#ifdef CONFIG_PAX_SEGMEXEC
96197+ struct vm_area_struct *vma_m;
96198+#endif
96199+
96200 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96201 BUG_ON(vma->vm_flags & VM_PFNMAP);
96202 vma->vm_flags |= VM_MIXEDMAP;
96203+
96204+#ifdef CONFIG_PAX_SEGMEXEC
96205+ vma_m = pax_find_mirror_vma(vma);
96206+ if (vma_m)
96207+ vma_m->vm_flags |= VM_MIXEDMAP;
96208+#endif
96209+
96210 }
96211 return insert_page(vma, addr, page, vma->vm_page_prot);
96212 }
96213@@ -1617,6 +1639,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96214 unsigned long pfn)
96215 {
96216 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96217+ BUG_ON(vma->vm_mirror);
96218
96219 if (addr < vma->vm_start || addr >= vma->vm_end)
96220 return -EFAULT;
96221@@ -1864,7 +1887,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96222
96223 BUG_ON(pud_huge(*pud));
96224
96225- pmd = pmd_alloc(mm, pud, addr);
96226+ pmd = (mm == &init_mm) ?
96227+ pmd_alloc_kernel(mm, pud, addr) :
96228+ pmd_alloc(mm, pud, addr);
96229 if (!pmd)
96230 return -ENOMEM;
96231 do {
96232@@ -1884,7 +1909,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96233 unsigned long next;
96234 int err;
96235
96236- pud = pud_alloc(mm, pgd, addr);
96237+ pud = (mm == &init_mm) ?
96238+ pud_alloc_kernel(mm, pgd, addr) :
96239+ pud_alloc(mm, pgd, addr);
96240 if (!pud)
96241 return -ENOMEM;
96242 do {
96243@@ -2006,6 +2033,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96244 return ret;
96245 }
96246
96247+#ifdef CONFIG_PAX_SEGMEXEC
96248+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96249+{
96250+ struct mm_struct *mm = vma->vm_mm;
96251+ spinlock_t *ptl;
96252+ pte_t *pte, entry;
96253+
96254+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96255+ entry = *pte;
96256+ if (!pte_present(entry)) {
96257+ if (!pte_none(entry)) {
96258+ BUG_ON(pte_file(entry));
96259+ free_swap_and_cache(pte_to_swp_entry(entry));
96260+ pte_clear_not_present_full(mm, address, pte, 0);
96261+ }
96262+ } else {
96263+ struct page *page;
96264+
96265+ flush_cache_page(vma, address, pte_pfn(entry));
96266+ entry = ptep_clear_flush(vma, address, pte);
96267+ BUG_ON(pte_dirty(entry));
96268+ page = vm_normal_page(vma, address, entry);
96269+ if (page) {
96270+ update_hiwater_rss(mm);
96271+ if (PageAnon(page))
96272+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96273+ else
96274+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96275+ page_remove_rmap(page);
96276+ page_cache_release(page);
96277+ }
96278+ }
96279+ pte_unmap_unlock(pte, ptl);
96280+}
96281+
96282+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96283+ *
96284+ * the ptl of the lower mapped page is held on entry and is not released on exit
96285+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96286+ */
96287+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96288+{
96289+ struct mm_struct *mm = vma->vm_mm;
96290+ unsigned long address_m;
96291+ spinlock_t *ptl_m;
96292+ struct vm_area_struct *vma_m;
96293+ pmd_t *pmd_m;
96294+ pte_t *pte_m, entry_m;
96295+
96296+ BUG_ON(!page_m || !PageAnon(page_m));
96297+
96298+ vma_m = pax_find_mirror_vma(vma);
96299+ if (!vma_m)
96300+ return;
96301+
96302+ BUG_ON(!PageLocked(page_m));
96303+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96304+ address_m = address + SEGMEXEC_TASK_SIZE;
96305+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96306+ pte_m = pte_offset_map(pmd_m, address_m);
96307+ ptl_m = pte_lockptr(mm, pmd_m);
96308+ if (ptl != ptl_m) {
96309+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96310+ if (!pte_none(*pte_m))
96311+ goto out;
96312+ }
96313+
96314+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96315+ page_cache_get(page_m);
96316+ page_add_anon_rmap(page_m, vma_m, address_m);
96317+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96318+ set_pte_at(mm, address_m, pte_m, entry_m);
96319+ update_mmu_cache(vma_m, address_m, pte_m);
96320+out:
96321+ if (ptl != ptl_m)
96322+ spin_unlock(ptl_m);
96323+ pte_unmap(pte_m);
96324+ unlock_page(page_m);
96325+}
96326+
96327+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96328+{
96329+ struct mm_struct *mm = vma->vm_mm;
96330+ unsigned long address_m;
96331+ spinlock_t *ptl_m;
96332+ struct vm_area_struct *vma_m;
96333+ pmd_t *pmd_m;
96334+ pte_t *pte_m, entry_m;
96335+
96336+ BUG_ON(!page_m || PageAnon(page_m));
96337+
96338+ vma_m = pax_find_mirror_vma(vma);
96339+ if (!vma_m)
96340+ return;
96341+
96342+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96343+ address_m = address + SEGMEXEC_TASK_SIZE;
96344+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96345+ pte_m = pte_offset_map(pmd_m, address_m);
96346+ ptl_m = pte_lockptr(mm, pmd_m);
96347+ if (ptl != ptl_m) {
96348+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96349+ if (!pte_none(*pte_m))
96350+ goto out;
96351+ }
96352+
96353+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96354+ page_cache_get(page_m);
96355+ page_add_file_rmap(page_m);
96356+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96357+ set_pte_at(mm, address_m, pte_m, entry_m);
96358+ update_mmu_cache(vma_m, address_m, pte_m);
96359+out:
96360+ if (ptl != ptl_m)
96361+ spin_unlock(ptl_m);
96362+ pte_unmap(pte_m);
96363+}
96364+
96365+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96366+{
96367+ struct mm_struct *mm = vma->vm_mm;
96368+ unsigned long address_m;
96369+ spinlock_t *ptl_m;
96370+ struct vm_area_struct *vma_m;
96371+ pmd_t *pmd_m;
96372+ pte_t *pte_m, entry_m;
96373+
96374+ vma_m = pax_find_mirror_vma(vma);
96375+ if (!vma_m)
96376+ return;
96377+
96378+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96379+ address_m = address + SEGMEXEC_TASK_SIZE;
96380+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96381+ pte_m = pte_offset_map(pmd_m, address_m);
96382+ ptl_m = pte_lockptr(mm, pmd_m);
96383+ if (ptl != ptl_m) {
96384+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96385+ if (!pte_none(*pte_m))
96386+ goto out;
96387+ }
96388+
96389+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96390+ set_pte_at(mm, address_m, pte_m, entry_m);
96391+out:
96392+ if (ptl != ptl_m)
96393+ spin_unlock(ptl_m);
96394+ pte_unmap(pte_m);
96395+}
96396+
96397+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96398+{
96399+ struct page *page_m;
96400+ pte_t entry;
96401+
96402+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96403+ goto out;
96404+
96405+ entry = *pte;
96406+ page_m = vm_normal_page(vma, address, entry);
96407+ if (!page_m)
96408+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96409+ else if (PageAnon(page_m)) {
96410+ if (pax_find_mirror_vma(vma)) {
96411+ pte_unmap_unlock(pte, ptl);
96412+ lock_page(page_m);
96413+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96414+ if (pte_same(entry, *pte))
96415+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96416+ else
96417+ unlock_page(page_m);
96418+ }
96419+ } else
96420+ pax_mirror_file_pte(vma, address, page_m, ptl);
96421+
96422+out:
96423+ pte_unmap_unlock(pte, ptl);
96424+}
96425+#endif
96426+
96427 /*
96428 * This routine handles present pages, when users try to write
96429 * to a shared page. It is done by copying the page to a new address
96430@@ -2212,6 +2419,12 @@ gotten:
96431 */
96432 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96433 if (likely(pte_same(*page_table, orig_pte))) {
96434+
96435+#ifdef CONFIG_PAX_SEGMEXEC
96436+ if (pax_find_mirror_vma(vma))
96437+ BUG_ON(!trylock_page(new_page));
96438+#endif
96439+
96440 if (old_page) {
96441 if (!PageAnon(old_page)) {
96442 dec_mm_counter_fast(mm, MM_FILEPAGES);
96443@@ -2265,6 +2478,10 @@ gotten:
96444 page_remove_rmap(old_page);
96445 }
96446
96447+#ifdef CONFIG_PAX_SEGMEXEC
96448+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96449+#endif
96450+
96451 /* Free the old page.. */
96452 new_page = old_page;
96453 ret |= VM_FAULT_WRITE;
96454@@ -2539,6 +2756,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96455 swap_free(entry);
96456 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96457 try_to_free_swap(page);
96458+
96459+#ifdef CONFIG_PAX_SEGMEXEC
96460+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96461+#endif
96462+
96463 unlock_page(page);
96464 if (page != swapcache) {
96465 /*
96466@@ -2562,6 +2784,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96467
96468 /* No need to invalidate - it was non-present before */
96469 update_mmu_cache(vma, address, page_table);
96470+
96471+#ifdef CONFIG_PAX_SEGMEXEC
96472+ pax_mirror_anon_pte(vma, address, page, ptl);
96473+#endif
96474+
96475 unlock:
96476 pte_unmap_unlock(page_table, ptl);
96477 out:
96478@@ -2581,40 +2808,6 @@ out_release:
96479 }
96480
96481 /*
96482- * This is like a special single-page "expand_{down|up}wards()",
96483- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96484- * doesn't hit another vma.
96485- */
96486-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96487-{
96488- address &= PAGE_MASK;
96489- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96490- struct vm_area_struct *prev = vma->vm_prev;
96491-
96492- /*
96493- * Is there a mapping abutting this one below?
96494- *
96495- * That's only ok if it's the same stack mapping
96496- * that has gotten split..
96497- */
96498- if (prev && prev->vm_end == address)
96499- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96500-
96501- return expand_downwards(vma, address - PAGE_SIZE);
96502- }
96503- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96504- struct vm_area_struct *next = vma->vm_next;
96505-
96506- /* As VM_GROWSDOWN but s/below/above/ */
96507- if (next && next->vm_start == address + PAGE_SIZE)
96508- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96509-
96510- return expand_upwards(vma, address + PAGE_SIZE);
96511- }
96512- return 0;
96513-}
96514-
96515-/*
96516 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96517 * but allow concurrent faults), and pte mapped but not yet locked.
96518 * We return with mmap_sem still held, but pte unmapped and unlocked.
96519@@ -2624,27 +2817,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96520 unsigned int flags)
96521 {
96522 struct mem_cgroup *memcg;
96523- struct page *page;
96524+ struct page *page = NULL;
96525 spinlock_t *ptl;
96526 pte_t entry;
96527
96528- pte_unmap(page_table);
96529-
96530- /* Check if we need to add a guard page to the stack */
96531- if (check_stack_guard_page(vma, address) < 0)
96532- return VM_FAULT_SIGSEGV;
96533-
96534- /* Use the zero-page for reads */
96535 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
96536 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96537 vma->vm_page_prot));
96538- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96539+ ptl = pte_lockptr(mm, pmd);
96540+ spin_lock(ptl);
96541 if (!pte_none(*page_table))
96542 goto unlock;
96543 goto setpte;
96544 }
96545
96546 /* Allocate our own private page. */
96547+ pte_unmap(page_table);
96548+
96549 if (unlikely(anon_vma_prepare(vma)))
96550 goto oom;
96551 page = alloc_zeroed_user_highpage_movable(vma, address);
96552@@ -2668,6 +2857,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96553 if (!pte_none(*page_table))
96554 goto release;
96555
96556+#ifdef CONFIG_PAX_SEGMEXEC
96557+ if (pax_find_mirror_vma(vma))
96558+ BUG_ON(!trylock_page(page));
96559+#endif
96560+
96561 inc_mm_counter_fast(mm, MM_ANONPAGES);
96562 page_add_new_anon_rmap(page, vma, address);
96563 mem_cgroup_commit_charge(page, memcg, false);
96564@@ -2677,6 +2871,12 @@ setpte:
96565
96566 /* No need to invalidate - it was non-present before */
96567 update_mmu_cache(vma, address, page_table);
96568+
96569+#ifdef CONFIG_PAX_SEGMEXEC
96570+ if (page)
96571+ pax_mirror_anon_pte(vma, address, page, ptl);
96572+#endif
96573+
96574 unlock:
96575 pte_unmap_unlock(page_table, ptl);
96576 return 0;
96577@@ -2907,6 +3107,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96578 return ret;
96579 }
96580 do_set_pte(vma, address, fault_page, pte, false, false);
96581+
96582+#ifdef CONFIG_PAX_SEGMEXEC
96583+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96584+#endif
96585+
96586 unlock_page(fault_page);
96587 unlock_out:
96588 pte_unmap_unlock(pte, ptl);
96589@@ -2949,7 +3154,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96590 page_cache_release(fault_page);
96591 goto uncharge_out;
96592 }
96593+
96594+#ifdef CONFIG_PAX_SEGMEXEC
96595+ if (pax_find_mirror_vma(vma))
96596+ BUG_ON(!trylock_page(new_page));
96597+#endif
96598+
96599 do_set_pte(vma, address, new_page, pte, true, true);
96600+
96601+#ifdef CONFIG_PAX_SEGMEXEC
96602+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96603+#endif
96604+
96605 mem_cgroup_commit_charge(new_page, memcg, false);
96606 lru_cache_add_active_or_unevictable(new_page, vma);
96607 pte_unmap_unlock(pte, ptl);
96608@@ -2999,6 +3215,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96609 return ret;
96610 }
96611 do_set_pte(vma, address, fault_page, pte, true, false);
96612+
96613+#ifdef CONFIG_PAX_SEGMEXEC
96614+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96615+#endif
96616+
96617 pte_unmap_unlock(pte, ptl);
96618
96619 if (set_page_dirty(fault_page))
96620@@ -3255,6 +3476,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96621 if (flags & FAULT_FLAG_WRITE)
96622 flush_tlb_fix_spurious_fault(vma, address);
96623 }
96624+
96625+#ifdef CONFIG_PAX_SEGMEXEC
96626+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96627+ return 0;
96628+#endif
96629+
96630 unlock:
96631 pte_unmap_unlock(pte, ptl);
96632 return 0;
96633@@ -3274,9 +3501,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96634 pmd_t *pmd;
96635 pte_t *pte;
96636
96637+#ifdef CONFIG_PAX_SEGMEXEC
96638+ struct vm_area_struct *vma_m;
96639+#endif
96640+
96641 if (unlikely(is_vm_hugetlb_page(vma)))
96642 return hugetlb_fault(mm, vma, address, flags);
96643
96644+#ifdef CONFIG_PAX_SEGMEXEC
96645+ vma_m = pax_find_mirror_vma(vma);
96646+ if (vma_m) {
96647+ unsigned long address_m;
96648+ pgd_t *pgd_m;
96649+ pud_t *pud_m;
96650+ pmd_t *pmd_m;
96651+
96652+ if (vma->vm_start > vma_m->vm_start) {
96653+ address_m = address;
96654+ address -= SEGMEXEC_TASK_SIZE;
96655+ vma = vma_m;
96656+ } else
96657+ address_m = address + SEGMEXEC_TASK_SIZE;
96658+
96659+ pgd_m = pgd_offset(mm, address_m);
96660+ pud_m = pud_alloc(mm, pgd_m, address_m);
96661+ if (!pud_m)
96662+ return VM_FAULT_OOM;
96663+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96664+ if (!pmd_m)
96665+ return VM_FAULT_OOM;
96666+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96667+ return VM_FAULT_OOM;
96668+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96669+ }
96670+#endif
96671+
96672 pgd = pgd_offset(mm, address);
96673 pud = pud_alloc(mm, pgd, address);
96674 if (!pud)
96675@@ -3411,6 +3670,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96676 spin_unlock(&mm->page_table_lock);
96677 return 0;
96678 }
96679+
96680+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96681+{
96682+ pud_t *new = pud_alloc_one(mm, address);
96683+ if (!new)
96684+ return -ENOMEM;
96685+
96686+ smp_wmb(); /* See comment in __pte_alloc */
96687+
96688+ spin_lock(&mm->page_table_lock);
96689+ if (pgd_present(*pgd)) /* Another has populated it */
96690+ pud_free(mm, new);
96691+ else
96692+ pgd_populate_kernel(mm, pgd, new);
96693+ spin_unlock(&mm->page_table_lock);
96694+ return 0;
96695+}
96696 #endif /* __PAGETABLE_PUD_FOLDED */
96697
96698 #ifndef __PAGETABLE_PMD_FOLDED
96699@@ -3441,6 +3717,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96700 spin_unlock(&mm->page_table_lock);
96701 return 0;
96702 }
96703+
96704+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96705+{
96706+ pmd_t *new = pmd_alloc_one(mm, address);
96707+ if (!new)
96708+ return -ENOMEM;
96709+
96710+ smp_wmb(); /* See comment in __pte_alloc */
96711+
96712+ spin_lock(&mm->page_table_lock);
96713+#ifndef __ARCH_HAS_4LEVEL_HACK
96714+ if (pud_present(*pud)) /* Another has populated it */
96715+ pmd_free(mm, new);
96716+ else
96717+ pud_populate_kernel(mm, pud, new);
96718+#else
96719+ if (pgd_present(*pud)) /* Another has populated it */
96720+ pmd_free(mm, new);
96721+ else
96722+ pgd_populate_kernel(mm, pud, new);
96723+#endif /* __ARCH_HAS_4LEVEL_HACK */
96724+ spin_unlock(&mm->page_table_lock);
96725+ return 0;
96726+}
96727 #endif /* __PAGETABLE_PMD_FOLDED */
96728
96729 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96730@@ -3550,8 +3850,8 @@ out:
96731 return ret;
96732 }
96733
96734-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96735- void *buf, int len, int write)
96736+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96737+ void *buf, size_t len, int write)
96738 {
96739 resource_size_t phys_addr;
96740 unsigned long prot = 0;
96741@@ -3577,8 +3877,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96742 * Access another process' address space as given in mm. If non-NULL, use the
96743 * given task for page fault accounting.
96744 */
96745-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96746- unsigned long addr, void *buf, int len, int write)
96747+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96748+ unsigned long addr, void *buf, size_t len, int write)
96749 {
96750 struct vm_area_struct *vma;
96751 void *old_buf = buf;
96752@@ -3586,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96753 down_read(&mm->mmap_sem);
96754 /* ignore errors, just check how much was successfully transferred */
96755 while (len) {
96756- int bytes, ret, offset;
96757+ ssize_t bytes, ret, offset;
96758 void *maddr;
96759 struct page *page = NULL;
96760
96761@@ -3647,8 +3947,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96762 *
96763 * The caller must hold a reference on @mm.
96764 */
96765-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96766- void *buf, int len, int write)
96767+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96768+ void *buf, size_t len, int write)
96769 {
96770 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96771 }
96772@@ -3658,11 +3958,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96773 * Source/target buffer must be kernel space,
96774 * Do not walk the page table directly, use get_user_pages
96775 */
96776-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96777- void *buf, int len, int write)
96778+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96779+ void *buf, size_t len, int write)
96780 {
96781 struct mm_struct *mm;
96782- int ret;
96783+ ssize_t ret;
96784
96785 mm = get_task_mm(tsk);
96786 if (!mm)
96787diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96788index 0e0961b..c9143b9 100644
96789--- a/mm/mempolicy.c
96790+++ b/mm/mempolicy.c
96791@@ -744,6 +744,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96792 unsigned long vmstart;
96793 unsigned long vmend;
96794
96795+#ifdef CONFIG_PAX_SEGMEXEC
96796+ struct vm_area_struct *vma_m;
96797+#endif
96798+
96799 vma = find_vma(mm, start);
96800 if (!vma || vma->vm_start > start)
96801 return -EFAULT;
96802@@ -787,6 +791,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96803 err = vma_replace_policy(vma, new_pol);
96804 if (err)
96805 goto out;
96806+
96807+#ifdef CONFIG_PAX_SEGMEXEC
96808+ vma_m = pax_find_mirror_vma(vma);
96809+ if (vma_m) {
96810+ err = vma_replace_policy(vma_m, new_pol);
96811+ if (err)
96812+ goto out;
96813+ }
96814+#endif
96815+
96816 }
96817
96818 out:
96819@@ -1201,6 +1215,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96820
96821 if (end < start)
96822 return -EINVAL;
96823+
96824+#ifdef CONFIG_PAX_SEGMEXEC
96825+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96826+ if (end > SEGMEXEC_TASK_SIZE)
96827+ return -EINVAL;
96828+ } else
96829+#endif
96830+
96831+ if (end > TASK_SIZE)
96832+ return -EINVAL;
96833+
96834 if (end == start)
96835 return 0;
96836
96837@@ -1426,8 +1451,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96838 */
96839 tcred = __task_cred(task);
96840 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96841- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96842- !capable(CAP_SYS_NICE)) {
96843+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96844 rcu_read_unlock();
96845 err = -EPERM;
96846 goto out_put;
96847@@ -1458,6 +1482,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96848 goto out;
96849 }
96850
96851+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96852+ if (mm != current->mm &&
96853+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96854+ mmput(mm);
96855+ err = -EPERM;
96856+ goto out;
96857+ }
96858+#endif
96859+
96860 err = do_migrate_pages(mm, old, new,
96861 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96862
96863diff --git a/mm/migrate.c b/mm/migrate.c
96864index 344cdf6..07399500 100644
96865--- a/mm/migrate.c
96866+++ b/mm/migrate.c
96867@@ -1503,8 +1503,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96868 */
96869 tcred = __task_cred(task);
96870 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96871- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96872- !capable(CAP_SYS_NICE)) {
96873+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96874 rcu_read_unlock();
96875 err = -EPERM;
96876 goto out;
96877diff --git a/mm/mlock.c b/mm/mlock.c
96878index 73cf098..ab547c7 100644
96879--- a/mm/mlock.c
96880+++ b/mm/mlock.c
96881@@ -14,6 +14,7 @@
96882 #include <linux/pagevec.h>
96883 #include <linux/mempolicy.h>
96884 #include <linux/syscalls.h>
96885+#include <linux/security.h>
96886 #include <linux/sched.h>
96887 #include <linux/export.h>
96888 #include <linux/rmap.h>
96889@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96890 {
96891 unsigned long nstart, end, tmp;
96892 struct vm_area_struct * vma, * prev;
96893- int error;
96894+ int error = 0;
96895
96896 VM_BUG_ON(start & ~PAGE_MASK);
96897 VM_BUG_ON(len != PAGE_ALIGN(len));
96898@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96899 return -EINVAL;
96900 if (end == start)
96901 return 0;
96902+ if (end > TASK_SIZE)
96903+ return -EINVAL;
96904+
96905 vma = find_vma(current->mm, start);
96906 if (!vma || vma->vm_start > start)
96907 return -ENOMEM;
96908@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96909 for (nstart = start ; ; ) {
96910 vm_flags_t newflags;
96911
96912+#ifdef CONFIG_PAX_SEGMEXEC
96913+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96914+ break;
96915+#endif
96916+
96917 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96918
96919 newflags = vma->vm_flags & ~VM_LOCKED;
96920@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96921 locked += current->mm->locked_vm;
96922
96923 /* check against resource limits */
96924+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96925 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96926 error = do_mlock(start, len, 1);
96927
96928@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96929 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96930 vm_flags_t newflags;
96931
96932+#ifdef CONFIG_PAX_SEGMEXEC
96933+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96934+ break;
96935+#endif
96936+
96937 newflags = vma->vm_flags & ~VM_LOCKED;
96938 if (flags & MCL_CURRENT)
96939 newflags |= VM_LOCKED;
96940@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96941 lock_limit >>= PAGE_SHIFT;
96942
96943 ret = -ENOMEM;
96944+
96945+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96946+
96947 down_write(&current->mm->mmap_sem);
96948-
96949 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96950 capable(CAP_IPC_LOCK))
96951 ret = do_mlockall(flags);
96952diff --git a/mm/mmap.c b/mm/mmap.c
96953index 0bc66f1..2bfa432 100644
96954--- a/mm/mmap.c
96955+++ b/mm/mmap.c
96956@@ -41,6 +41,7 @@
96957 #include <linux/notifier.h>
96958 #include <linux/memory.h>
96959 #include <linux/printk.h>
96960+#include <linux/random.h>
96961
96962 #include <asm/uaccess.h>
96963 #include <asm/cacheflush.h>
96964@@ -57,6 +58,16 @@
96965 #define arch_rebalance_pgtables(addr, len) (addr)
96966 #endif
96967
96968+static inline void verify_mm_writelocked(struct mm_struct *mm)
96969+{
96970+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96971+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96972+ up_read(&mm->mmap_sem);
96973+ BUG();
96974+ }
96975+#endif
96976+}
96977+
96978 static void unmap_region(struct mm_struct *mm,
96979 struct vm_area_struct *vma, struct vm_area_struct *prev,
96980 unsigned long start, unsigned long end);
96981@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96982 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96983 *
96984 */
96985-pgprot_t protection_map[16] = {
96986+pgprot_t protection_map[16] __read_only = {
96987 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96988 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96989 };
96990
96991-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96992+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96993 {
96994- return __pgprot(pgprot_val(protection_map[vm_flags &
96995+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96996 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96997 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96998+
96999+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97000+ if (!(__supported_pte_mask & _PAGE_NX) &&
97001+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
97002+ (vm_flags & (VM_READ | VM_WRITE)))
97003+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
97004+#endif
97005+
97006+ return prot;
97007 }
97008 EXPORT_SYMBOL(vm_get_page_prot);
97009
97010@@ -114,6 +134,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
97011 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
97012 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
97013 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
97014+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
97015 /*
97016 * Make sure vm_committed_as in one cacheline and not cacheline shared with
97017 * other variables. It can be updated by several CPUs frequently.
97018@@ -274,6 +295,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
97019 struct vm_area_struct *next = vma->vm_next;
97020
97021 might_sleep();
97022+ BUG_ON(vma->vm_mirror);
97023 if (vma->vm_ops && vma->vm_ops->close)
97024 vma->vm_ops->close(vma);
97025 if (vma->vm_file)
97026@@ -287,6 +309,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
97027
97028 SYSCALL_DEFINE1(brk, unsigned long, brk)
97029 {
97030+ unsigned long rlim;
97031 unsigned long retval;
97032 unsigned long newbrk, oldbrk;
97033 struct mm_struct *mm = current->mm;
97034@@ -317,7 +340,13 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
97035 * segment grow beyond its set limit the in case where the limit is
97036 * not page aligned -Ram Gupta
97037 */
97038- if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
97039+ rlim = rlimit(RLIMIT_DATA);
97040+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
97041+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
97042+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
97043+ rlim = 4096 * PAGE_SIZE;
97044+#endif
97045+ if (check_data_rlimit(rlim, brk, mm->start_brk,
97046 mm->end_data, mm->start_data))
97047 goto out;
97048
97049@@ -976,6 +1005,12 @@ static int
97050 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
97051 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97052 {
97053+
97054+#ifdef CONFIG_PAX_SEGMEXEC
97055+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
97056+ return 0;
97057+#endif
97058+
97059 if (is_mergeable_vma(vma, file, vm_flags) &&
97060 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97061 if (vma->vm_pgoff == vm_pgoff)
97062@@ -995,6 +1030,12 @@ static int
97063 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97064 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
97065 {
97066+
97067+#ifdef CONFIG_PAX_SEGMEXEC
97068+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
97069+ return 0;
97070+#endif
97071+
97072 if (is_mergeable_vma(vma, file, vm_flags) &&
97073 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
97074 pgoff_t vm_pglen;
97075@@ -1044,6 +1085,13 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97076 struct vm_area_struct *area, *next;
97077 int err;
97078
97079+#ifdef CONFIG_PAX_SEGMEXEC
97080+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97081+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97082+
97083+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97084+#endif
97085+
97086 /*
97087 * We later require that vma->vm_flags == vm_flags,
97088 * so this tests vma->vm_flags & VM_SPECIAL, too.
97089@@ -1059,6 +1107,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97090 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97091 next = next->vm_next;
97092
97093+#ifdef CONFIG_PAX_SEGMEXEC
97094+ if (prev)
97095+ prev_m = pax_find_mirror_vma(prev);
97096+ if (area)
97097+ area_m = pax_find_mirror_vma(area);
97098+ if (next)
97099+ next_m = pax_find_mirror_vma(next);
97100+#endif
97101+
97102 /*
97103 * Can it merge with the predecessor?
97104 */
97105@@ -1078,9 +1135,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97106 /* cases 1, 6 */
97107 err = vma_adjust(prev, prev->vm_start,
97108 next->vm_end, prev->vm_pgoff, NULL);
97109- } else /* cases 2, 5, 7 */
97110+
97111+#ifdef CONFIG_PAX_SEGMEXEC
97112+ if (!err && prev_m)
97113+ err = vma_adjust(prev_m, prev_m->vm_start,
97114+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97115+#endif
97116+
97117+ } else { /* cases 2, 5, 7 */
97118 err = vma_adjust(prev, prev->vm_start,
97119 end, prev->vm_pgoff, NULL);
97120+
97121+#ifdef CONFIG_PAX_SEGMEXEC
97122+ if (!err && prev_m)
97123+ err = vma_adjust(prev_m, prev_m->vm_start,
97124+ end_m, prev_m->vm_pgoff, NULL);
97125+#endif
97126+
97127+ }
97128 if (err)
97129 return NULL;
97130 khugepaged_enter_vma_merge(prev, vm_flags);
97131@@ -1094,12 +1166,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97132 mpol_equal(policy, vma_policy(next)) &&
97133 can_vma_merge_before(next, vm_flags,
97134 anon_vma, file, pgoff+pglen)) {
97135- if (prev && addr < prev->vm_end) /* case 4 */
97136+ if (prev && addr < prev->vm_end) { /* case 4 */
97137 err = vma_adjust(prev, prev->vm_start,
97138 addr, prev->vm_pgoff, NULL);
97139- else /* cases 3, 8 */
97140+
97141+#ifdef CONFIG_PAX_SEGMEXEC
97142+ if (!err && prev_m)
97143+ err = vma_adjust(prev_m, prev_m->vm_start,
97144+ addr_m, prev_m->vm_pgoff, NULL);
97145+#endif
97146+
97147+ } else { /* cases 3, 8 */
97148 err = vma_adjust(area, addr, next->vm_end,
97149 next->vm_pgoff - pglen, NULL);
97150+
97151+#ifdef CONFIG_PAX_SEGMEXEC
97152+ if (!err && area_m)
97153+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97154+ next_m->vm_pgoff - pglen, NULL);
97155+#endif
97156+
97157+ }
97158 if (err)
97159 return NULL;
97160 khugepaged_enter_vma_merge(area, vm_flags);
97161@@ -1208,8 +1295,10 @@ none:
97162 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97163 struct file *file, long pages)
97164 {
97165- const unsigned long stack_flags
97166- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97167+
97168+#ifdef CONFIG_PAX_RANDMMAP
97169+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97170+#endif
97171
97172 mm->total_vm += pages;
97173
97174@@ -1217,7 +1306,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97175 mm->shared_vm += pages;
97176 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97177 mm->exec_vm += pages;
97178- } else if (flags & stack_flags)
97179+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97180 mm->stack_vm += pages;
97181 }
97182 #endif /* CONFIG_PROC_FS */
97183@@ -1247,6 +1336,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97184 locked += mm->locked_vm;
97185 lock_limit = rlimit(RLIMIT_MEMLOCK);
97186 lock_limit >>= PAGE_SHIFT;
97187+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97188 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97189 return -EAGAIN;
97190 }
97191@@ -1273,7 +1363,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97192 * (the exception is when the underlying filesystem is noexec
97193 * mounted, in which case we dont add PROT_EXEC.)
97194 */
97195- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97196+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97197 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97198 prot |= PROT_EXEC;
97199
97200@@ -1299,7 +1389,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97201 /* Obtain the address to map to. we verify (or select) it and ensure
97202 * that it represents a valid section of the address space.
97203 */
97204- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97205+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97206 if (addr & ~PAGE_MASK)
97207 return addr;
97208
97209@@ -1310,6 +1400,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97210 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97211 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97212
97213+#ifdef CONFIG_PAX_MPROTECT
97214+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97215+
97216+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97217+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97218+ mm->binfmt->handle_mmap)
97219+ mm->binfmt->handle_mmap(file);
97220+#endif
97221+
97222+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97223+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97224+ gr_log_rwxmmap(file);
97225+
97226+#ifdef CONFIG_PAX_EMUPLT
97227+ vm_flags &= ~VM_EXEC;
97228+#else
97229+ return -EPERM;
97230+#endif
97231+
97232+ }
97233+
97234+ if (!(vm_flags & VM_EXEC))
97235+ vm_flags &= ~VM_MAYEXEC;
97236+#else
97237+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97238+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97239+#endif
97240+ else
97241+ vm_flags &= ~VM_MAYWRITE;
97242+ }
97243+#endif
97244+
97245+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97246+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97247+ vm_flags &= ~VM_PAGEEXEC;
97248+#endif
97249+
97250 if (flags & MAP_LOCKED)
97251 if (!can_do_mlock())
97252 return -EPERM;
97253@@ -1397,6 +1524,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97254 vm_flags |= VM_NORESERVE;
97255 }
97256
97257+ if (!gr_acl_handle_mmap(file, prot))
97258+ return -EACCES;
97259+
97260 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97261 if (!IS_ERR_VALUE(addr) &&
97262 ((vm_flags & VM_LOCKED) ||
97263@@ -1490,7 +1620,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97264 vm_flags_t vm_flags = vma->vm_flags;
97265
97266 /* If it was private or non-writable, the write bit is already clear */
97267- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97268+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97269 return 0;
97270
97271 /* The backer wishes to know when pages are first written to? */
97272@@ -1541,7 +1671,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97273 struct rb_node **rb_link, *rb_parent;
97274 unsigned long charged = 0;
97275
97276+#ifdef CONFIG_PAX_SEGMEXEC
97277+ struct vm_area_struct *vma_m = NULL;
97278+#endif
97279+
97280+ /*
97281+ * mm->mmap_sem is required to protect against another thread
97282+ * changing the mappings in case we sleep.
97283+ */
97284+ verify_mm_writelocked(mm);
97285+
97286 /* Check against address space limit. */
97287+
97288+#ifdef CONFIG_PAX_RANDMMAP
97289+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97290+#endif
97291+
97292 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97293 unsigned long nr_pages;
97294
97295@@ -1560,11 +1705,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97296
97297 /* Clear old maps */
97298 error = -ENOMEM;
97299-munmap_back:
97300 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97301 if (do_munmap(mm, addr, len))
97302 return -ENOMEM;
97303- goto munmap_back;
97304+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97305 }
97306
97307 /*
97308@@ -1595,6 +1739,16 @@ munmap_back:
97309 goto unacct_error;
97310 }
97311
97312+#ifdef CONFIG_PAX_SEGMEXEC
97313+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97314+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97315+ if (!vma_m) {
97316+ error = -ENOMEM;
97317+ goto free_vma;
97318+ }
97319+ }
97320+#endif
97321+
97322 vma->vm_mm = mm;
97323 vma->vm_start = addr;
97324 vma->vm_end = addr + len;
97325@@ -1625,6 +1779,13 @@ munmap_back:
97326 if (error)
97327 goto unmap_and_free_vma;
97328
97329+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97330+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97331+ vma->vm_flags |= VM_PAGEEXEC;
97332+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97333+ }
97334+#endif
97335+
97336 /* Can addr have changed??
97337 *
97338 * Answer: Yes, several device drivers can do it in their
97339@@ -1643,6 +1804,12 @@ munmap_back:
97340 }
97341
97342 vma_link(mm, vma, prev, rb_link, rb_parent);
97343+
97344+#ifdef CONFIG_PAX_SEGMEXEC
97345+ if (vma_m)
97346+ BUG_ON(pax_mirror_vma(vma_m, vma));
97347+#endif
97348+
97349 /* Once vma denies write, undo our temporary denial count */
97350 if (file) {
97351 if (vm_flags & VM_SHARED)
97352@@ -1655,6 +1822,7 @@ out:
97353 perf_event_mmap(vma);
97354
97355 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97356+ track_exec_limit(mm, addr, addr + len, vm_flags);
97357 if (vm_flags & VM_LOCKED) {
97358 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97359 vma == get_gate_vma(current->mm)))
97360@@ -1692,6 +1860,12 @@ allow_write_and_free_vma:
97361 if (vm_flags & VM_DENYWRITE)
97362 allow_write_access(file);
97363 free_vma:
97364+
97365+#ifdef CONFIG_PAX_SEGMEXEC
97366+ if (vma_m)
97367+ kmem_cache_free(vm_area_cachep, vma_m);
97368+#endif
97369+
97370 kmem_cache_free(vm_area_cachep, vma);
97371 unacct_error:
97372 if (charged)
97373@@ -1699,7 +1873,63 @@ unacct_error:
97374 return error;
97375 }
97376
97377-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97378+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97379+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97380+{
97381+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97382+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97383+
97384+ return 0;
97385+}
97386+#endif
97387+
97388+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97389+{
97390+ if (!vma) {
97391+#ifdef CONFIG_STACK_GROWSUP
97392+ if (addr > sysctl_heap_stack_gap)
97393+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97394+ else
97395+ vma = find_vma(current->mm, 0);
97396+ if (vma && (vma->vm_flags & VM_GROWSUP))
97397+ return false;
97398+#endif
97399+ return true;
97400+ }
97401+
97402+ if (addr + len > vma->vm_start)
97403+ return false;
97404+
97405+ if (vma->vm_flags & VM_GROWSDOWN)
97406+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97407+#ifdef CONFIG_STACK_GROWSUP
97408+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97409+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97410+#endif
97411+ else if (offset)
97412+ return offset <= vma->vm_start - addr - len;
97413+
97414+ return true;
97415+}
97416+
97417+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97418+{
97419+ if (vma->vm_start < len)
97420+ return -ENOMEM;
97421+
97422+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97423+ if (offset <= vma->vm_start - len)
97424+ return vma->vm_start - len - offset;
97425+ else
97426+ return -ENOMEM;
97427+ }
97428+
97429+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97430+ return vma->vm_start - len - sysctl_heap_stack_gap;
97431+ return -ENOMEM;
97432+}
97433+
97434+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97435 {
97436 /*
97437 * We implement the search by looking for an rbtree node that
97438@@ -1747,11 +1977,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97439 }
97440 }
97441
97442- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97443+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97444 check_current:
97445 /* Check if current node has a suitable gap */
97446 if (gap_start > high_limit)
97447 return -ENOMEM;
97448+
97449+ if (gap_end - gap_start > info->threadstack_offset)
97450+ gap_start += info->threadstack_offset;
97451+ else
97452+ gap_start = gap_end;
97453+
97454+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97455+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97456+ gap_start += sysctl_heap_stack_gap;
97457+ else
97458+ gap_start = gap_end;
97459+ }
97460+ if (vma->vm_flags & VM_GROWSDOWN) {
97461+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97462+ gap_end -= sysctl_heap_stack_gap;
97463+ else
97464+ gap_end = gap_start;
97465+ }
97466 if (gap_end >= low_limit && gap_end - gap_start >= length)
97467 goto found;
97468
97469@@ -1801,7 +2049,7 @@ found:
97470 return gap_start;
97471 }
97472
97473-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97474+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97475 {
97476 struct mm_struct *mm = current->mm;
97477 struct vm_area_struct *vma;
97478@@ -1855,6 +2103,24 @@ check_current:
97479 gap_end = vma->vm_start;
97480 if (gap_end < low_limit)
97481 return -ENOMEM;
97482+
97483+ if (gap_end - gap_start > info->threadstack_offset)
97484+ gap_end -= info->threadstack_offset;
97485+ else
97486+ gap_end = gap_start;
97487+
97488+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97489+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97490+ gap_start += sysctl_heap_stack_gap;
97491+ else
97492+ gap_start = gap_end;
97493+ }
97494+ if (vma->vm_flags & VM_GROWSDOWN) {
97495+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97496+ gap_end -= sysctl_heap_stack_gap;
97497+ else
97498+ gap_end = gap_start;
97499+ }
97500 if (gap_start <= high_limit && gap_end - gap_start >= length)
97501 goto found;
97502
97503@@ -1918,6 +2184,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97504 struct mm_struct *mm = current->mm;
97505 struct vm_area_struct *vma;
97506 struct vm_unmapped_area_info info;
97507+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97508
97509 if (len > TASK_SIZE - mmap_min_addr)
97510 return -ENOMEM;
97511@@ -1925,11 +2192,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97512 if (flags & MAP_FIXED)
97513 return addr;
97514
97515+#ifdef CONFIG_PAX_RANDMMAP
97516+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97517+#endif
97518+
97519 if (addr) {
97520 addr = PAGE_ALIGN(addr);
97521 vma = find_vma(mm, addr);
97522 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97523- (!vma || addr + len <= vma->vm_start))
97524+ check_heap_stack_gap(vma, addr, len, offset))
97525 return addr;
97526 }
97527
97528@@ -1938,6 +2209,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97529 info.low_limit = mm->mmap_base;
97530 info.high_limit = TASK_SIZE;
97531 info.align_mask = 0;
97532+ info.threadstack_offset = offset;
97533 return vm_unmapped_area(&info);
97534 }
97535 #endif
97536@@ -1956,6 +2228,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97537 struct mm_struct *mm = current->mm;
97538 unsigned long addr = addr0;
97539 struct vm_unmapped_area_info info;
97540+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97541
97542 /* requested length too big for entire address space */
97543 if (len > TASK_SIZE - mmap_min_addr)
97544@@ -1964,12 +2237,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97545 if (flags & MAP_FIXED)
97546 return addr;
97547
97548+#ifdef CONFIG_PAX_RANDMMAP
97549+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97550+#endif
97551+
97552 /* requesting a specific address */
97553 if (addr) {
97554 addr = PAGE_ALIGN(addr);
97555 vma = find_vma(mm, addr);
97556 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97557- (!vma || addr + len <= vma->vm_start))
97558+ check_heap_stack_gap(vma, addr, len, offset))
97559 return addr;
97560 }
97561
97562@@ -1978,6 +2255,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97563 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97564 info.high_limit = mm->mmap_base;
97565 info.align_mask = 0;
97566+ info.threadstack_offset = offset;
97567 addr = vm_unmapped_area(&info);
97568
97569 /*
97570@@ -1990,6 +2268,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97571 VM_BUG_ON(addr != -ENOMEM);
97572 info.flags = 0;
97573 info.low_limit = TASK_UNMAPPED_BASE;
97574+
97575+#ifdef CONFIG_PAX_RANDMMAP
97576+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97577+ info.low_limit += mm->delta_mmap;
97578+#endif
97579+
97580 info.high_limit = TASK_SIZE;
97581 addr = vm_unmapped_area(&info);
97582 }
97583@@ -2090,6 +2374,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97584 return vma;
97585 }
97586
97587+#ifdef CONFIG_PAX_SEGMEXEC
97588+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97589+{
97590+ struct vm_area_struct *vma_m;
97591+
97592+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97593+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97594+ BUG_ON(vma->vm_mirror);
97595+ return NULL;
97596+ }
97597+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97598+ vma_m = vma->vm_mirror;
97599+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97600+ BUG_ON(vma->vm_file != vma_m->vm_file);
97601+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97602+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97603+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97604+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97605+ return vma_m;
97606+}
97607+#endif
97608+
97609 /*
97610 * Verify that the stack growth is acceptable and
97611 * update accounting. This is shared with both the
97612@@ -2107,8 +2413,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97613
97614 /* Stack limit test */
97615 actual_size = size;
97616- if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
97617- actual_size -= PAGE_SIZE;
97618+ gr_learn_resource(current, RLIMIT_STACK, actual_size, 1);
97619 if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97620 return -ENOMEM;
97621
97622@@ -2119,6 +2424,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97623 locked = mm->locked_vm + grow;
97624 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97625 limit >>= PAGE_SHIFT;
97626+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97627 if (locked > limit && !capable(CAP_IPC_LOCK))
97628 return -ENOMEM;
97629 }
97630@@ -2148,37 +2454,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97631 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97632 * vma is the last one with address > vma->vm_end. Have to extend vma.
97633 */
97634+#ifndef CONFIG_IA64
97635+static
97636+#endif
97637 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97638 {
97639 int error;
97640+ bool locknext;
97641
97642 if (!(vma->vm_flags & VM_GROWSUP))
97643 return -EFAULT;
97644
97645+ /* Also guard against wrapping around to address 0. */
97646+ if (address < PAGE_ALIGN(address+1))
97647+ address = PAGE_ALIGN(address+1);
97648+ else
97649+ return -ENOMEM;
97650+
97651 /*
97652 * We must make sure the anon_vma is allocated
97653 * so that the anon_vma locking is not a noop.
97654 */
97655 if (unlikely(anon_vma_prepare(vma)))
97656 return -ENOMEM;
97657+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97658+ if (locknext && anon_vma_prepare(vma->vm_next))
97659+ return -ENOMEM;
97660 vma_lock_anon_vma(vma);
97661+ if (locknext)
97662+ vma_lock_anon_vma(vma->vm_next);
97663
97664 /*
97665 * vma->vm_start/vm_end cannot change under us because the caller
97666 * is required to hold the mmap_sem in read mode. We need the
97667- * anon_vma lock to serialize against concurrent expand_stacks.
97668- * Also guard against wrapping around to address 0.
97669+ * anon_vma locks to serialize against concurrent expand_stacks
97670+ * and expand_upwards.
97671 */
97672- if (address < PAGE_ALIGN(address+4))
97673- address = PAGE_ALIGN(address+4);
97674- else {
97675- vma_unlock_anon_vma(vma);
97676- return -ENOMEM;
97677- }
97678 error = 0;
97679
97680 /* Somebody else might have raced and expanded it already */
97681- if (address > vma->vm_end) {
97682+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97683+ error = -ENOMEM;
97684+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97685 unsigned long size, grow;
97686
97687 size = address - vma->vm_start;
97688@@ -2213,6 +2530,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97689 }
97690 }
97691 }
97692+ if (locknext)
97693+ vma_unlock_anon_vma(vma->vm_next);
97694 vma_unlock_anon_vma(vma);
97695 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97696 validate_mm(vma->vm_mm);
97697@@ -2227,6 +2546,8 @@ int expand_downwards(struct vm_area_struct *vma,
97698 unsigned long address)
97699 {
97700 int error;
97701+ bool lockprev = false;
97702+ struct vm_area_struct *prev;
97703
97704 /*
97705 * We must make sure the anon_vma is allocated
97706@@ -2240,6 +2561,15 @@ int expand_downwards(struct vm_area_struct *vma,
97707 if (error)
97708 return error;
97709
97710+ prev = vma->vm_prev;
97711+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97712+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97713+#endif
97714+ if (lockprev && anon_vma_prepare(prev))
97715+ return -ENOMEM;
97716+ if (lockprev)
97717+ vma_lock_anon_vma(prev);
97718+
97719 vma_lock_anon_vma(vma);
97720
97721 /*
97722@@ -2249,9 +2579,17 @@ int expand_downwards(struct vm_area_struct *vma,
97723 */
97724
97725 /* Somebody else might have raced and expanded it already */
97726- if (address < vma->vm_start) {
97727+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97728+ error = -ENOMEM;
97729+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97730 unsigned long size, grow;
97731
97732+#ifdef CONFIG_PAX_SEGMEXEC
97733+ struct vm_area_struct *vma_m;
97734+
97735+ vma_m = pax_find_mirror_vma(vma);
97736+#endif
97737+
97738 size = vma->vm_end - address;
97739 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97740
97741@@ -2276,13 +2614,27 @@ int expand_downwards(struct vm_area_struct *vma,
97742 vma->vm_pgoff -= grow;
97743 anon_vma_interval_tree_post_update_vma(vma);
97744 vma_gap_update(vma);
97745+
97746+#ifdef CONFIG_PAX_SEGMEXEC
97747+ if (vma_m) {
97748+ anon_vma_interval_tree_pre_update_vma(vma_m);
97749+ vma_m->vm_start -= grow << PAGE_SHIFT;
97750+ vma_m->vm_pgoff -= grow;
97751+ anon_vma_interval_tree_post_update_vma(vma_m);
97752+ vma_gap_update(vma_m);
97753+ }
97754+#endif
97755+
97756 spin_unlock(&vma->vm_mm->page_table_lock);
97757
97758+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97759 perf_event_mmap(vma);
97760 }
97761 }
97762 }
97763 vma_unlock_anon_vma(vma);
97764+ if (lockprev)
97765+ vma_unlock_anon_vma(prev);
97766 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97767 validate_mm(vma->vm_mm);
97768 return error;
97769@@ -2382,6 +2734,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97770 do {
97771 long nrpages = vma_pages(vma);
97772
97773+#ifdef CONFIG_PAX_SEGMEXEC
97774+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97775+ vma = remove_vma(vma);
97776+ continue;
97777+ }
97778+#endif
97779+
97780 if (vma->vm_flags & VM_ACCOUNT)
97781 nr_accounted += nrpages;
97782 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97783@@ -2426,6 +2785,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97784 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97785 vma->vm_prev = NULL;
97786 do {
97787+
97788+#ifdef CONFIG_PAX_SEGMEXEC
97789+ if (vma->vm_mirror) {
97790+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97791+ vma->vm_mirror->vm_mirror = NULL;
97792+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97793+ vma->vm_mirror = NULL;
97794+ }
97795+#endif
97796+
97797 vma_rb_erase(vma, &mm->mm_rb);
97798 mm->map_count--;
97799 tail_vma = vma;
97800@@ -2453,14 +2822,33 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97801 struct vm_area_struct *new;
97802 int err = -ENOMEM;
97803
97804+#ifdef CONFIG_PAX_SEGMEXEC
97805+ struct vm_area_struct *vma_m, *new_m = NULL;
97806+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97807+#endif
97808+
97809 if (is_vm_hugetlb_page(vma) && (addr &
97810 ~(huge_page_mask(hstate_vma(vma)))))
97811 return -EINVAL;
97812
97813+#ifdef CONFIG_PAX_SEGMEXEC
97814+ vma_m = pax_find_mirror_vma(vma);
97815+#endif
97816+
97817 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97818 if (!new)
97819 goto out_err;
97820
97821+#ifdef CONFIG_PAX_SEGMEXEC
97822+ if (vma_m) {
97823+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97824+ if (!new_m) {
97825+ kmem_cache_free(vm_area_cachep, new);
97826+ goto out_err;
97827+ }
97828+ }
97829+#endif
97830+
97831 /* most fields are the same, copy all, and then fixup */
97832 *new = *vma;
97833
97834@@ -2473,6 +2861,22 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97835 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97836 }
97837
97838+#ifdef CONFIG_PAX_SEGMEXEC
97839+ if (vma_m) {
97840+ *new_m = *vma_m;
97841+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97842+ new_m->vm_mirror = new;
97843+ new->vm_mirror = new_m;
97844+
97845+ if (new_below)
97846+ new_m->vm_end = addr_m;
97847+ else {
97848+ new_m->vm_start = addr_m;
97849+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97850+ }
97851+ }
97852+#endif
97853+
97854 err = vma_dup_policy(vma, new);
97855 if (err)
97856 goto out_free_vma;
97857@@ -2493,6 +2897,38 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97858 else
97859 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97860
97861+#ifdef CONFIG_PAX_SEGMEXEC
97862+ if (!err && vma_m) {
97863+ struct mempolicy *pol = vma_policy(new);
97864+
97865+ if (anon_vma_clone(new_m, vma_m))
97866+ goto out_free_mpol;
97867+
97868+ mpol_get(pol);
97869+ set_vma_policy(new_m, pol);
97870+
97871+ if (new_m->vm_file)
97872+ get_file(new_m->vm_file);
97873+
97874+ if (new_m->vm_ops && new_m->vm_ops->open)
97875+ new_m->vm_ops->open(new_m);
97876+
97877+ if (new_below)
97878+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97879+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97880+ else
97881+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97882+
97883+ if (err) {
97884+ if (new_m->vm_ops && new_m->vm_ops->close)
97885+ new_m->vm_ops->close(new_m);
97886+ if (new_m->vm_file)
97887+ fput(new_m->vm_file);
97888+ mpol_put(pol);
97889+ }
97890+ }
97891+#endif
97892+
97893 /* Success. */
97894 if (!err)
97895 return 0;
97896@@ -2502,10 +2938,18 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97897 new->vm_ops->close(new);
97898 if (new->vm_file)
97899 fput(new->vm_file);
97900- unlink_anon_vmas(new);
97901 out_free_mpol:
97902 mpol_put(vma_policy(new));
97903 out_free_vma:
97904+
97905+#ifdef CONFIG_PAX_SEGMEXEC
97906+ if (new_m) {
97907+ unlink_anon_vmas(new_m);
97908+ kmem_cache_free(vm_area_cachep, new_m);
97909+ }
97910+#endif
97911+
97912+ unlink_anon_vmas(new);
97913 kmem_cache_free(vm_area_cachep, new);
97914 out_err:
97915 return err;
97916@@ -2518,6 +2962,15 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97917 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97918 unsigned long addr, int new_below)
97919 {
97920+
97921+#ifdef CONFIG_PAX_SEGMEXEC
97922+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97923+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97924+ if (mm->map_count >= sysctl_max_map_count-1)
97925+ return -ENOMEM;
97926+ } else
97927+#endif
97928+
97929 if (mm->map_count >= sysctl_max_map_count)
97930 return -ENOMEM;
97931
97932@@ -2529,11 +2982,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97933 * work. This now handles partial unmappings.
97934 * Jeremy Fitzhardinge <jeremy@goop.org>
97935 */
97936+#ifdef CONFIG_PAX_SEGMEXEC
97937 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97938 {
97939+ int ret = __do_munmap(mm, start, len);
97940+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97941+ return ret;
97942+
97943+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97944+}
97945+
97946+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97947+#else
97948+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97949+#endif
97950+{
97951 unsigned long end;
97952 struct vm_area_struct *vma, *prev, *last;
97953
97954+ /*
97955+ * mm->mmap_sem is required to protect against another thread
97956+ * changing the mappings in case we sleep.
97957+ */
97958+ verify_mm_writelocked(mm);
97959+
97960 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97961 return -EINVAL;
97962
97963@@ -2611,6 +3083,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97964 /* Fix up all other VM information */
97965 remove_vma_list(mm, vma);
97966
97967+ track_exec_limit(mm, start, end, 0UL);
97968+
97969 return 0;
97970 }
97971
97972@@ -2619,6 +3093,13 @@ int vm_munmap(unsigned long start, size_t len)
97973 int ret;
97974 struct mm_struct *mm = current->mm;
97975
97976+
97977+#ifdef CONFIG_PAX_SEGMEXEC
97978+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97979+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97980+ return -EINVAL;
97981+#endif
97982+
97983 down_write(&mm->mmap_sem);
97984 ret = do_munmap(mm, start, len);
97985 up_write(&mm->mmap_sem);
97986@@ -2632,16 +3113,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97987 return vm_munmap(addr, len);
97988 }
97989
97990-static inline void verify_mm_writelocked(struct mm_struct *mm)
97991-{
97992-#ifdef CONFIG_DEBUG_VM
97993- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97994- WARN_ON(1);
97995- up_read(&mm->mmap_sem);
97996- }
97997-#endif
97998-}
97999-
98000 /*
98001 * this is really a simplified "do_mmap". it only handles
98002 * anonymous maps. eventually we may be able to do some
98003@@ -2655,6 +3126,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98004 struct rb_node **rb_link, *rb_parent;
98005 pgoff_t pgoff = addr >> PAGE_SHIFT;
98006 int error;
98007+ unsigned long charged;
98008
98009 len = PAGE_ALIGN(len);
98010 if (!len)
98011@@ -2662,10 +3134,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98012
98013 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
98014
98015+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
98016+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
98017+ flags &= ~VM_EXEC;
98018+
98019+#ifdef CONFIG_PAX_MPROTECT
98020+ if (mm->pax_flags & MF_PAX_MPROTECT)
98021+ flags &= ~VM_MAYEXEC;
98022+#endif
98023+
98024+ }
98025+#endif
98026+
98027 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
98028 if (error & ~PAGE_MASK)
98029 return error;
98030
98031+ charged = len >> PAGE_SHIFT;
98032+
98033 error = mlock_future_check(mm, mm->def_flags, len);
98034 if (error)
98035 return error;
98036@@ -2679,21 +3165,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98037 /*
98038 * Clear old maps. this also does some error checking for us
98039 */
98040- munmap_back:
98041 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
98042 if (do_munmap(mm, addr, len))
98043 return -ENOMEM;
98044- goto munmap_back;
98045+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
98046 }
98047
98048 /* Check against address space limits *after* clearing old maps... */
98049- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
98050+ if (!may_expand_vm(mm, charged))
98051 return -ENOMEM;
98052
98053 if (mm->map_count > sysctl_max_map_count)
98054 return -ENOMEM;
98055
98056- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
98057+ if (security_vm_enough_memory_mm(mm, charged))
98058 return -ENOMEM;
98059
98060 /* Can we just expand an old private anonymous mapping? */
98061@@ -2707,7 +3192,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98062 */
98063 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98064 if (!vma) {
98065- vm_unacct_memory(len >> PAGE_SHIFT);
98066+ vm_unacct_memory(charged);
98067 return -ENOMEM;
98068 }
98069
98070@@ -2721,10 +3206,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98071 vma_link(mm, vma, prev, rb_link, rb_parent);
98072 out:
98073 perf_event_mmap(vma);
98074- mm->total_vm += len >> PAGE_SHIFT;
98075+ mm->total_vm += charged;
98076 if (flags & VM_LOCKED)
98077- mm->locked_vm += (len >> PAGE_SHIFT);
98078+ mm->locked_vm += charged;
98079 vma->vm_flags |= VM_SOFTDIRTY;
98080+ track_exec_limit(mm, addr, addr + len, flags);
98081 return addr;
98082 }
98083
98084@@ -2786,6 +3272,7 @@ void exit_mmap(struct mm_struct *mm)
98085 while (vma) {
98086 if (vma->vm_flags & VM_ACCOUNT)
98087 nr_accounted += vma_pages(vma);
98088+ vma->vm_mirror = NULL;
98089 vma = remove_vma(vma);
98090 }
98091 vm_unacct_memory(nr_accounted);
98092@@ -2803,6 +3290,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98093 struct vm_area_struct *prev;
98094 struct rb_node **rb_link, *rb_parent;
98095
98096+#ifdef CONFIG_PAX_SEGMEXEC
98097+ struct vm_area_struct *vma_m = NULL;
98098+#endif
98099+
98100+ if (security_mmap_addr(vma->vm_start))
98101+ return -EPERM;
98102+
98103 /*
98104 * The vm_pgoff of a purely anonymous vma should be irrelevant
98105 * until its first write fault, when page's anon_vma and index
98106@@ -2826,7 +3320,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98107 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98108 return -ENOMEM;
98109
98110+#ifdef CONFIG_PAX_SEGMEXEC
98111+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98112+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98113+ if (!vma_m)
98114+ return -ENOMEM;
98115+ }
98116+#endif
98117+
98118 vma_link(mm, vma, prev, rb_link, rb_parent);
98119+
98120+#ifdef CONFIG_PAX_SEGMEXEC
98121+ if (vma_m)
98122+ BUG_ON(pax_mirror_vma(vma_m, vma));
98123+#endif
98124+
98125 return 0;
98126 }
98127
98128@@ -2845,6 +3353,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98129 struct rb_node **rb_link, *rb_parent;
98130 bool faulted_in_anon_vma = true;
98131
98132+ BUG_ON(vma->vm_mirror);
98133+
98134 /*
98135 * If anonymous vma has not yet been faulted, update new pgoff
98136 * to match new location, to increase its chance of merging.
98137@@ -2909,6 +3419,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98138 return NULL;
98139 }
98140
98141+#ifdef CONFIG_PAX_SEGMEXEC
98142+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98143+{
98144+ struct vm_area_struct *prev_m;
98145+ struct rb_node **rb_link_m, *rb_parent_m;
98146+ struct mempolicy *pol_m;
98147+
98148+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98149+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98150+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98151+ *vma_m = *vma;
98152+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98153+ if (anon_vma_clone(vma_m, vma))
98154+ return -ENOMEM;
98155+ pol_m = vma_policy(vma_m);
98156+ mpol_get(pol_m);
98157+ set_vma_policy(vma_m, pol_m);
98158+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98159+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98160+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98161+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98162+ if (vma_m->vm_file)
98163+ get_file(vma_m->vm_file);
98164+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98165+ vma_m->vm_ops->open(vma_m);
98166+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98167+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98168+ vma_m->vm_mirror = vma;
98169+ vma->vm_mirror = vma_m;
98170+ return 0;
98171+}
98172+#endif
98173+
98174 /*
98175 * Return true if the calling process may expand its vm space by the passed
98176 * number of pages
98177@@ -2920,6 +3463,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98178
98179 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98180
98181+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98182 if (cur + npages > lim)
98183 return 0;
98184 return 1;
98185@@ -3002,6 +3546,22 @@ static struct vm_area_struct *__install_special_mapping(
98186 vma->vm_start = addr;
98187 vma->vm_end = addr + len;
98188
98189+#ifdef CONFIG_PAX_MPROTECT
98190+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98191+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98192+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98193+ return ERR_PTR(-EPERM);
98194+ if (!(vm_flags & VM_EXEC))
98195+ vm_flags &= ~VM_MAYEXEC;
98196+#else
98197+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98198+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98199+#endif
98200+ else
98201+ vm_flags &= ~VM_MAYWRITE;
98202+ }
98203+#endif
98204+
98205 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98206 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98207
98208diff --git a/mm/mprotect.c b/mm/mprotect.c
98209index ace9345..63320dc 100644
98210--- a/mm/mprotect.c
98211+++ b/mm/mprotect.c
98212@@ -24,10 +24,18 @@
98213 #include <linux/migrate.h>
98214 #include <linux/perf_event.h>
98215 #include <linux/ksm.h>
98216+#include <linux/sched/sysctl.h>
98217+
98218+#ifdef CONFIG_PAX_MPROTECT
98219+#include <linux/elf.h>
98220+#include <linux/binfmts.h>
98221+#endif
98222+
98223 #include <asm/uaccess.h>
98224 #include <asm/pgtable.h>
98225 #include <asm/cacheflush.h>
98226 #include <asm/tlbflush.h>
98227+#include <asm/mmu_context.h>
98228
98229 /*
98230 * For a prot_numa update we only hold mmap_sem for read so there is a
98231@@ -251,6 +259,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98232 return pages;
98233 }
98234
98235+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98236+/* called while holding the mmap semaphor for writing except stack expansion */
98237+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98238+{
98239+ unsigned long oldlimit, newlimit = 0UL;
98240+
98241+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98242+ return;
98243+
98244+ spin_lock(&mm->page_table_lock);
98245+ oldlimit = mm->context.user_cs_limit;
98246+ if ((prot & VM_EXEC) && oldlimit < end)
98247+ /* USER_CS limit moved up */
98248+ newlimit = end;
98249+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98250+ /* USER_CS limit moved down */
98251+ newlimit = start;
98252+
98253+ if (newlimit) {
98254+ mm->context.user_cs_limit = newlimit;
98255+
98256+#ifdef CONFIG_SMP
98257+ wmb();
98258+ cpus_clear(mm->context.cpu_user_cs_mask);
98259+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98260+#endif
98261+
98262+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98263+ }
98264+ spin_unlock(&mm->page_table_lock);
98265+ if (newlimit == end) {
98266+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98267+
98268+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98269+ if (is_vm_hugetlb_page(vma))
98270+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98271+ else
98272+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98273+ }
98274+}
98275+#endif
98276+
98277 int
98278 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98279 unsigned long start, unsigned long end, unsigned long newflags)
98280@@ -263,11 +313,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98281 int error;
98282 int dirty_accountable = 0;
98283
98284+#ifdef CONFIG_PAX_SEGMEXEC
98285+ struct vm_area_struct *vma_m = NULL;
98286+ unsigned long start_m, end_m;
98287+
98288+ start_m = start + SEGMEXEC_TASK_SIZE;
98289+ end_m = end + SEGMEXEC_TASK_SIZE;
98290+#endif
98291+
98292 if (newflags == oldflags) {
98293 *pprev = vma;
98294 return 0;
98295 }
98296
98297+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98298+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98299+
98300+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98301+ return -ENOMEM;
98302+
98303+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98304+ return -ENOMEM;
98305+ }
98306+
98307 /*
98308 * If we make a private mapping writable we increase our commit;
98309 * but (without finer accounting) cannot reduce our commit if we
98310@@ -284,6 +352,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98311 }
98312 }
98313
98314+#ifdef CONFIG_PAX_SEGMEXEC
98315+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98316+ if (start != vma->vm_start) {
98317+ error = split_vma(mm, vma, start, 1);
98318+ if (error)
98319+ goto fail;
98320+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98321+ *pprev = (*pprev)->vm_next;
98322+ }
98323+
98324+ if (end != vma->vm_end) {
98325+ error = split_vma(mm, vma, end, 0);
98326+ if (error)
98327+ goto fail;
98328+ }
98329+
98330+ if (pax_find_mirror_vma(vma)) {
98331+ error = __do_munmap(mm, start_m, end_m - start_m);
98332+ if (error)
98333+ goto fail;
98334+ } else {
98335+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98336+ if (!vma_m) {
98337+ error = -ENOMEM;
98338+ goto fail;
98339+ }
98340+ vma->vm_flags = newflags;
98341+ error = pax_mirror_vma(vma_m, vma);
98342+ if (error) {
98343+ vma->vm_flags = oldflags;
98344+ goto fail;
98345+ }
98346+ }
98347+ }
98348+#endif
98349+
98350 /*
98351 * First try to merge with previous and/or next vma.
98352 */
98353@@ -314,7 +418,19 @@ success:
98354 * vm_flags and vm_page_prot are protected by the mmap_sem
98355 * held in write mode.
98356 */
98357+
98358+#ifdef CONFIG_PAX_SEGMEXEC
98359+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98360+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98361+#endif
98362+
98363 vma->vm_flags = newflags;
98364+
98365+#ifdef CONFIG_PAX_MPROTECT
98366+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98367+ mm->binfmt->handle_mprotect(vma, newflags);
98368+#endif
98369+
98370 dirty_accountable = vma_wants_writenotify(vma);
98371 vma_set_page_prot(vma);
98372
98373@@ -350,6 +466,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98374 end = start + len;
98375 if (end <= start)
98376 return -ENOMEM;
98377+
98378+#ifdef CONFIG_PAX_SEGMEXEC
98379+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98380+ if (end > SEGMEXEC_TASK_SIZE)
98381+ return -EINVAL;
98382+ } else
98383+#endif
98384+
98385+ if (end > TASK_SIZE)
98386+ return -EINVAL;
98387+
98388 if (!arch_validate_prot(prot))
98389 return -EINVAL;
98390
98391@@ -357,7 +484,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98392 /*
98393 * Does the application expect PROT_READ to imply PROT_EXEC:
98394 */
98395- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98396+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98397 prot |= PROT_EXEC;
98398
98399 vm_flags = calc_vm_prot_bits(prot);
98400@@ -389,6 +516,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98401 if (start > vma->vm_start)
98402 prev = vma;
98403
98404+#ifdef CONFIG_PAX_MPROTECT
98405+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98406+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98407+#endif
98408+
98409 for (nstart = start ; ; ) {
98410 unsigned long newflags;
98411
98412@@ -399,6 +531,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98413
98414 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98415 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98416+ if (prot & (PROT_WRITE | PROT_EXEC))
98417+ gr_log_rwxmprotect(vma);
98418+
98419+ error = -EACCES;
98420+ goto out;
98421+ }
98422+
98423+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98424 error = -EACCES;
98425 goto out;
98426 }
98427@@ -413,6 +553,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98428 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98429 if (error)
98430 goto out;
98431+
98432+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98433+
98434 nstart = tmp;
98435
98436 if (nstart < prev->vm_end)
98437diff --git a/mm/mremap.c b/mm/mremap.c
98438index 17fa018..6f7892b 100644
98439--- a/mm/mremap.c
98440+++ b/mm/mremap.c
98441@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98442 continue;
98443 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98444 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98445+
98446+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98447+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98448+ pte = pte_exprotect(pte);
98449+#endif
98450+
98451 pte = move_soft_dirty_pte(pte);
98452 set_pte_at(mm, new_addr, new_pte, pte);
98453 }
98454@@ -346,6 +352,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98455 if (is_vm_hugetlb_page(vma))
98456 goto Einval;
98457
98458+#ifdef CONFIG_PAX_SEGMEXEC
98459+ if (pax_find_mirror_vma(vma))
98460+ goto Einval;
98461+#endif
98462+
98463 /* We can't remap across vm area boundaries */
98464 if (old_len > vma->vm_end - addr)
98465 goto Efault;
98466@@ -401,20 +412,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98467 unsigned long ret = -EINVAL;
98468 unsigned long charged = 0;
98469 unsigned long map_flags;
98470+ unsigned long pax_task_size = TASK_SIZE;
98471
98472 if (new_addr & ~PAGE_MASK)
98473 goto out;
98474
98475- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98476+#ifdef CONFIG_PAX_SEGMEXEC
98477+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98478+ pax_task_size = SEGMEXEC_TASK_SIZE;
98479+#endif
98480+
98481+ pax_task_size -= PAGE_SIZE;
98482+
98483+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98484 goto out;
98485
98486 /* Check if the location we're moving into overlaps the
98487 * old location at all, and fail if it does.
98488 */
98489- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98490- goto out;
98491-
98492- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98493+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98494 goto out;
98495
98496 ret = do_munmap(mm, new_addr, new_len);
98497@@ -483,6 +499,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98498 unsigned long ret = -EINVAL;
98499 unsigned long charged = 0;
98500 bool locked = false;
98501+ unsigned long pax_task_size = TASK_SIZE;
98502
98503 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98504 return ret;
98505@@ -504,6 +521,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98506 if (!new_len)
98507 return ret;
98508
98509+#ifdef CONFIG_PAX_SEGMEXEC
98510+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98511+ pax_task_size = SEGMEXEC_TASK_SIZE;
98512+#endif
98513+
98514+ pax_task_size -= PAGE_SIZE;
98515+
98516+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98517+ old_len > pax_task_size || addr > pax_task_size-old_len)
98518+ return ret;
98519+
98520 down_write(&current->mm->mmap_sem);
98521
98522 if (flags & MREMAP_FIXED) {
98523@@ -554,6 +582,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98524 new_addr = addr;
98525 }
98526 ret = addr;
98527+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98528 goto out;
98529 }
98530 }
98531@@ -577,7 +606,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98532 goto out;
98533 }
98534
98535+ map_flags = vma->vm_flags;
98536 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98537+ if (!(ret & ~PAGE_MASK)) {
98538+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98539+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98540+ }
98541 }
98542 out:
98543 if (ret & ~PAGE_MASK)
98544diff --git a/mm/nommu.c b/mm/nommu.c
98545index ae5baae..cbb2ed5 100644
98546--- a/mm/nommu.c
98547+++ b/mm/nommu.c
98548@@ -71,7 +71,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98549 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98550 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98551 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98552-int heap_stack_gap = 0;
98553
98554 atomic_long_t mmap_pages_allocated;
98555
98556@@ -858,15 +857,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98557 EXPORT_SYMBOL(find_vma);
98558
98559 /*
98560- * find a VMA
98561- * - we don't extend stack VMAs under NOMMU conditions
98562- */
98563-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98564-{
98565- return find_vma(mm, addr);
98566-}
98567-
98568-/*
98569 * expand a stack to a given address
98570 * - not supported under NOMMU conditions
98571 */
98572@@ -1560,6 +1550,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98573
98574 /* most fields are the same, copy all, and then fixup */
98575 *new = *vma;
98576+ INIT_LIST_HEAD(&new->anon_vma_chain);
98577 *region = *vma->vm_region;
98578 new->vm_region = region;
98579
98580@@ -1990,8 +1981,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98581 }
98582 EXPORT_SYMBOL(generic_file_remap_pages);
98583
98584-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98585- unsigned long addr, void *buf, int len, int write)
98586+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98587+ unsigned long addr, void *buf, size_t len, int write)
98588 {
98589 struct vm_area_struct *vma;
98590
98591@@ -2032,8 +2023,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98592 *
98593 * The caller must hold a reference on @mm.
98594 */
98595-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98596- void *buf, int len, int write)
98597+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98598+ void *buf, size_t len, int write)
98599 {
98600 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98601 }
98602@@ -2042,7 +2033,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98603 * Access another process' address space.
98604 * - source/target buffer must be kernel space
98605 */
98606-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98607+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98608 {
98609 struct mm_struct *mm;
98610
98611diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98612index f24d4c9..77820e3 100644
98613--- a/mm/page-writeback.c
98614+++ b/mm/page-writeback.c
98615@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98616 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98617 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98618 */
98619-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98620+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98621 unsigned long thresh,
98622 unsigned long bg_thresh,
98623 unsigned long dirty,
98624diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98625index 8bbef06..a8d1989 100644
98626--- a/mm/page_alloc.c
98627+++ b/mm/page_alloc.c
98628@@ -60,6 +60,7 @@
98629 #include <linux/hugetlb.h>
98630 #include <linux/sched/rt.h>
98631 #include <linux/page_owner.h>
98632+#include <linux/random.h>
98633
98634 #include <asm/sections.h>
98635 #include <asm/tlbflush.h>
98636@@ -358,7 +359,7 @@ out:
98637 * This usage means that zero-order pages may not be compound.
98638 */
98639
98640-static void free_compound_page(struct page *page)
98641+void free_compound_page(struct page *page)
98642 {
98643 __free_pages_ok(page, compound_order(page));
98644 }
98645@@ -511,7 +512,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
98646 __mod_zone_freepage_state(zone, (1 << order), migratetype);
98647 }
98648 #else
98649-struct page_ext_operations debug_guardpage_ops = { NULL, };
98650+struct page_ext_operations debug_guardpage_ops = { .need = NULL, .init = NULL };
98651 static inline void set_page_guard(struct zone *zone, struct page *page,
98652 unsigned int order, int migratetype) {}
98653 static inline void clear_page_guard(struct zone *zone, struct page *page,
98654@@ -802,6 +803,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98655 int i;
98656 int bad = 0;
98657
98658+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98659+ unsigned long index = 1UL << order;
98660+#endif
98661+
98662 VM_BUG_ON_PAGE(PageTail(page), page);
98663 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
98664
98665@@ -823,6 +828,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98666 debug_check_no_obj_freed(page_address(page),
98667 PAGE_SIZE << order);
98668 }
98669+
98670+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98671+ for (; index; --index)
98672+ sanitize_highpage(page + index - 1);
98673+#endif
98674+
98675 arch_free_page(page, order);
98676 kernel_map_pages(page, 1 << order, 0);
98677
98678@@ -846,6 +857,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98679 local_irq_restore(flags);
98680 }
98681
98682+#ifdef CONFIG_PAX_LATENT_ENTROPY
98683+bool __meminitdata extra_latent_entropy;
98684+
98685+static int __init setup_pax_extra_latent_entropy(char *str)
98686+{
98687+ extra_latent_entropy = true;
98688+ return 0;
98689+}
98690+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98691+
98692+volatile u64 latent_entropy __latent_entropy;
98693+EXPORT_SYMBOL(latent_entropy);
98694+#endif
98695+
98696 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98697 {
98698 unsigned int nr_pages = 1 << order;
98699@@ -861,6 +886,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98700 __ClearPageReserved(p);
98701 set_page_count(p, 0);
98702
98703+#ifdef CONFIG_PAX_LATENT_ENTROPY
98704+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98705+ u64 hash = 0;
98706+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98707+ const u64 *data = lowmem_page_address(page);
98708+
98709+ for (index = 0; index < end; index++)
98710+ hash ^= hash + data[index];
98711+ latent_entropy ^= hash;
98712+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98713+ }
98714+#endif
98715+
98716 page_zone(page)->managed_pages += nr_pages;
98717 set_page_refcounted(page);
98718 __free_pages(page, order);
98719@@ -986,8 +1024,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98720 arch_alloc_page(page, order);
98721 kernel_map_pages(page, 1 << order, 1);
98722
98723+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98724 if (gfp_flags & __GFP_ZERO)
98725 prep_zero_page(page, order, gfp_flags);
98726+#endif
98727
98728 if (order && (gfp_flags & __GFP_COMP))
98729 prep_compound_page(page, order);
98730@@ -1700,7 +1740,7 @@ again:
98731 }
98732
98733 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98734- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98735+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98736 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
98737 set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98738
98739@@ -2021,7 +2061,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98740 do {
98741 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98742 high_wmark_pages(zone) - low_wmark_pages(zone) -
98743- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98744+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98745 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
98746 } while (zone++ != preferred_zone);
98747 }
98748@@ -5781,7 +5821,7 @@ static void __setup_per_zone_wmarks(void)
98749
98750 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98751 high_wmark_pages(zone) - low_wmark_pages(zone) -
98752- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98753+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98754
98755 setup_zone_migrate_reserve(zone);
98756 spin_unlock_irqrestore(&zone->lock, flags);
98757diff --git a/mm/percpu.c b/mm/percpu.c
98758index d39e2f4..de5f4b4 100644
98759--- a/mm/percpu.c
98760+++ b/mm/percpu.c
98761@@ -131,7 +131,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98762 static unsigned int pcpu_high_unit_cpu __read_mostly;
98763
98764 /* the address of the first chunk which starts with the kernel static area */
98765-void *pcpu_base_addr __read_mostly;
98766+void *pcpu_base_addr __read_only;
98767 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98768
98769 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98770diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98771index 5077afc..846c9ef 100644
98772--- a/mm/process_vm_access.c
98773+++ b/mm/process_vm_access.c
98774@@ -13,6 +13,7 @@
98775 #include <linux/uio.h>
98776 #include <linux/sched.h>
98777 #include <linux/highmem.h>
98778+#include <linux/security.h>
98779 #include <linux/ptrace.h>
98780 #include <linux/slab.h>
98781 #include <linux/syscalls.h>
98782@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98783 ssize_t iov_len;
98784 size_t total_len = iov_iter_count(iter);
98785
98786+ return -ENOSYS; // PaX: until properly audited
98787+
98788 /*
98789 * Work out how many pages of struct pages we're going to need
98790 * when eventually calling get_user_pages
98791 */
98792 for (i = 0; i < riovcnt; i++) {
98793 iov_len = rvec[i].iov_len;
98794- if (iov_len > 0) {
98795- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98796- + iov_len)
98797- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98798- / PAGE_SIZE + 1;
98799- nr_pages = max(nr_pages, nr_pages_iov);
98800- }
98801+ if (iov_len <= 0)
98802+ continue;
98803+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98804+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98805+ nr_pages = max(nr_pages, nr_pages_iov);
98806 }
98807
98808 if (nr_pages == 0)
98809@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98810 goto free_proc_pages;
98811 }
98812
98813+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98814+ rc = -EPERM;
98815+ goto put_task_struct;
98816+ }
98817+
98818 mm = mm_access(task, PTRACE_MODE_ATTACH);
98819 if (!mm || IS_ERR(mm)) {
98820 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98821diff --git a/mm/rmap.c b/mm/rmap.c
98822index ecb444a..0ff9eb3 100644
98823--- a/mm/rmap.c
98824+++ b/mm/rmap.c
98825@@ -166,6 +166,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98826 struct anon_vma *anon_vma = vma->anon_vma;
98827 struct anon_vma_chain *avc;
98828
98829+#ifdef CONFIG_PAX_SEGMEXEC
98830+ struct anon_vma_chain *avc_m = NULL;
98831+#endif
98832+
98833 might_sleep();
98834 if (unlikely(!anon_vma)) {
98835 struct mm_struct *mm = vma->vm_mm;
98836@@ -175,6 +179,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98837 if (!avc)
98838 goto out_enomem;
98839
98840+#ifdef CONFIG_PAX_SEGMEXEC
98841+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98842+ if (!avc_m)
98843+ goto out_enomem_free_avc;
98844+#endif
98845+
98846 anon_vma = find_mergeable_anon_vma(vma);
98847 allocated = NULL;
98848 if (!anon_vma) {
98849@@ -188,6 +198,19 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98850 /* page_table_lock to protect against threads */
98851 spin_lock(&mm->page_table_lock);
98852 if (likely(!vma->anon_vma)) {
98853+
98854+#ifdef CONFIG_PAX_SEGMEXEC
98855+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98856+
98857+ if (vma_m) {
98858+ BUG_ON(vma_m->anon_vma);
98859+ vma_m->anon_vma = anon_vma;
98860+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98861+ anon_vma->degree++;
98862+ avc_m = NULL;
98863+ }
98864+#endif
98865+
98866 vma->anon_vma = anon_vma;
98867 anon_vma_chain_link(vma, avc, anon_vma);
98868 /* vma reference or self-parent link for new root */
98869@@ -200,12 +223,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98870
98871 if (unlikely(allocated))
98872 put_anon_vma(allocated);
98873+
98874+#ifdef CONFIG_PAX_SEGMEXEC
98875+ if (unlikely(avc_m))
98876+ anon_vma_chain_free(avc_m);
98877+#endif
98878+
98879 if (unlikely(avc))
98880 anon_vma_chain_free(avc);
98881 }
98882 return 0;
98883
98884 out_enomem_free_avc:
98885+
98886+#ifdef CONFIG_PAX_SEGMEXEC
98887+ if (avc_m)
98888+ anon_vma_chain_free(avc_m);
98889+#endif
98890+
98891 anon_vma_chain_free(avc);
98892 out_enomem:
98893 return -ENOMEM;
98894@@ -249,7 +284,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98895 * good chance of avoiding scanning the whole hierarchy when it searches where
98896 * page is mapped.
98897 */
98898-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98899+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98900 {
98901 struct anon_vma_chain *avc, *pavc;
98902 struct anon_vma *root = NULL;
98903@@ -303,7 +338,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98904 * the corresponding VMA in the parent process is attached to.
98905 * Returns 0 on success, non-zero on failure.
98906 */
98907-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98908+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98909 {
98910 struct anon_vma_chain *avc;
98911 struct anon_vma *anon_vma;
98912@@ -423,8 +458,10 @@ static void anon_vma_ctor(void *data)
98913 void __init anon_vma_init(void)
98914 {
98915 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98916- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98917- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98918+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98919+ anon_vma_ctor);
98920+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98921+ SLAB_PANIC|SLAB_NO_SANITIZE);
98922 }
98923
98924 /*
98925diff --git a/mm/shmem.c b/mm/shmem.c
98926index 993e6ba..a962ba3 100644
98927--- a/mm/shmem.c
98928+++ b/mm/shmem.c
98929@@ -33,7 +33,7 @@
98930 #include <linux/swap.h>
98931 #include <linux/aio.h>
98932
98933-static struct vfsmount *shm_mnt;
98934+struct vfsmount *shm_mnt;
98935
98936 #ifdef CONFIG_SHMEM
98937 /*
98938@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98939 #define BOGO_DIRENT_SIZE 20
98940
98941 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98942-#define SHORT_SYMLINK_LEN 128
98943+#define SHORT_SYMLINK_LEN 64
98944
98945 /*
98946 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98947@@ -2558,6 +2558,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98948 static int shmem_xattr_validate(const char *name)
98949 {
98950 struct { const char *prefix; size_t len; } arr[] = {
98951+
98952+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98953+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98954+#endif
98955+
98956 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98957 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98958 };
98959@@ -2613,6 +2618,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98960 if (err)
98961 return err;
98962
98963+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98964+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98965+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98966+ return -EOPNOTSUPP;
98967+ if (size > 8)
98968+ return -EINVAL;
98969+ }
98970+#endif
98971+
98972 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98973 }
98974
98975@@ -2996,8 +3010,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98976 int err = -ENOMEM;
98977
98978 /* Round up to L1_CACHE_BYTES to resist false sharing */
98979- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98980- L1_CACHE_BYTES), GFP_KERNEL);
98981+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98982 if (!sbinfo)
98983 return -ENOMEM;
98984
98985diff --git a/mm/slab.c b/mm/slab.c
98986index 65b5dcb..d53d866 100644
98987--- a/mm/slab.c
98988+++ b/mm/slab.c
98989@@ -314,10 +314,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98990 if ((x)->max_freeable < i) \
98991 (x)->max_freeable = i; \
98992 } while (0)
98993-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98994-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98995-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98996-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98997+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98998+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98999+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
99000+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
99001+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
99002+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
99003 #else
99004 #define STATS_INC_ACTIVE(x) do { } while (0)
99005 #define STATS_DEC_ACTIVE(x) do { } while (0)
99006@@ -334,6 +336,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
99007 #define STATS_INC_ALLOCMISS(x) do { } while (0)
99008 #define STATS_INC_FREEHIT(x) do { } while (0)
99009 #define STATS_INC_FREEMISS(x) do { } while (0)
99010+#define STATS_INC_SANITIZED(x) do { } while (0)
99011+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
99012 #endif
99013
99014 #if DEBUG
99015@@ -450,7 +454,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
99016 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
99017 */
99018 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
99019- const struct page *page, void *obj)
99020+ const struct page *page, const void *obj)
99021 {
99022 u32 offset = (obj - page->s_mem);
99023 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
99024@@ -1438,7 +1442,7 @@ void __init kmem_cache_init(void)
99025 * structures first. Without this, further allocations will bug.
99026 */
99027 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
99028- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
99029+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
99030 slab_state = PARTIAL_NODE;
99031
99032 slab_early_init = 0;
99033@@ -2059,7 +2063,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99034
99035 cachep = find_mergeable(size, align, flags, name, ctor);
99036 if (cachep) {
99037- cachep->refcount++;
99038+ atomic_inc(&cachep->refcount);
99039
99040 /*
99041 * Adjust the object sizes so that we clear
99042@@ -3357,6 +3361,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
99043 struct array_cache *ac = cpu_cache_get(cachep);
99044
99045 check_irq_off();
99046+
99047+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99048+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
99049+ STATS_INC_NOT_SANITIZED(cachep);
99050+ else {
99051+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
99052+
99053+ if (cachep->ctor)
99054+ cachep->ctor(objp);
99055+
99056+ STATS_INC_SANITIZED(cachep);
99057+ }
99058+#endif
99059+
99060 kmemleak_free_recursive(objp, cachep->flags);
99061 objp = cache_free_debugcheck(cachep, objp, caller);
99062
99063@@ -3469,7 +3487,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
99064 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
99065 }
99066
99067-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99068+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99069 {
99070 return __do_kmalloc_node(size, flags, node, _RET_IP_);
99071 }
99072@@ -3489,7 +3507,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
99073 * @flags: the type of memory to allocate (see kmalloc).
99074 * @caller: function caller for debug tracking of the caller
99075 */
99076-static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
99077+static __always_inline void * __size_overflow(1) __do_kmalloc(size_t size, gfp_t flags,
99078 unsigned long caller)
99079 {
99080 struct kmem_cache *cachep;
99081@@ -3562,6 +3580,7 @@ void kfree(const void *objp)
99082
99083 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99084 return;
99085+ VM_BUG_ON(!virt_addr_valid(objp));
99086 local_irq_save(flags);
99087 kfree_debugcheck(objp);
99088 c = virt_to_cache(objp);
99089@@ -3984,14 +4003,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99090 }
99091 /* cpu stats */
99092 {
99093- unsigned long allochit = atomic_read(&cachep->allochit);
99094- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99095- unsigned long freehit = atomic_read(&cachep->freehit);
99096- unsigned long freemiss = atomic_read(&cachep->freemiss);
99097+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99098+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99099+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99100+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99101
99102 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99103 allochit, allocmiss, freehit, freemiss);
99104 }
99105+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99106+ {
99107+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99108+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99109+
99110+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99111+ }
99112+#endif
99113 #endif
99114 }
99115
99116@@ -4199,13 +4226,69 @@ static const struct file_operations proc_slabstats_operations = {
99117 static int __init slab_proc_init(void)
99118 {
99119 #ifdef CONFIG_DEBUG_SLAB_LEAK
99120- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99121+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99122 #endif
99123 return 0;
99124 }
99125 module_init(slab_proc_init);
99126 #endif
99127
99128+bool is_usercopy_object(const void *ptr)
99129+{
99130+ struct page *page;
99131+ struct kmem_cache *cachep;
99132+
99133+ if (ZERO_OR_NULL_PTR(ptr))
99134+ return false;
99135+
99136+ if (!slab_is_available())
99137+ return false;
99138+
99139+ if (!virt_addr_valid(ptr))
99140+ return false;
99141+
99142+ page = virt_to_head_page(ptr);
99143+
99144+ if (!PageSlab(page))
99145+ return false;
99146+
99147+ cachep = page->slab_cache;
99148+ return cachep->flags & SLAB_USERCOPY;
99149+}
99150+
99151+#ifdef CONFIG_PAX_USERCOPY
99152+const char *check_heap_object(const void *ptr, unsigned long n)
99153+{
99154+ struct page *page;
99155+ struct kmem_cache *cachep;
99156+ unsigned int objnr;
99157+ unsigned long offset;
99158+
99159+ if (ZERO_OR_NULL_PTR(ptr))
99160+ return "<null>";
99161+
99162+ if (!virt_addr_valid(ptr))
99163+ return NULL;
99164+
99165+ page = virt_to_head_page(ptr);
99166+
99167+ if (!PageSlab(page))
99168+ return NULL;
99169+
99170+ cachep = page->slab_cache;
99171+ if (!(cachep->flags & SLAB_USERCOPY))
99172+ return cachep->name;
99173+
99174+ objnr = obj_to_index(cachep, page, ptr);
99175+ BUG_ON(objnr >= cachep->num);
99176+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99177+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99178+ return NULL;
99179+
99180+ return cachep->name;
99181+}
99182+#endif
99183+
99184 /**
99185 * ksize - get the actual amount of memory allocated for a given object
99186 * @objp: Pointer to the object
99187diff --git a/mm/slab.h b/mm/slab.h
99188index 1cf40054..10ad563 100644
99189--- a/mm/slab.h
99190+++ b/mm/slab.h
99191@@ -22,7 +22,7 @@ struct kmem_cache {
99192 unsigned int align; /* Alignment as calculated */
99193 unsigned long flags; /* Active flags on the slab */
99194 const char *name; /* Slab name for sysfs */
99195- int refcount; /* Use counter */
99196+ atomic_t refcount; /* Use counter */
99197 void (*ctor)(void *); /* Called on object slot creation */
99198 struct list_head list; /* List of all slab caches on the system */
99199 };
99200@@ -66,6 +66,20 @@ extern struct list_head slab_caches;
99201 /* The slab cache that manages slab cache information */
99202 extern struct kmem_cache *kmem_cache;
99203
99204+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99205+#ifdef CONFIG_X86_64
99206+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99207+#else
99208+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99209+#endif
99210+enum pax_sanitize_mode {
99211+ PAX_SANITIZE_SLAB_OFF = 0,
99212+ PAX_SANITIZE_SLAB_FAST,
99213+ PAX_SANITIZE_SLAB_FULL,
99214+};
99215+extern enum pax_sanitize_mode pax_sanitize_slab;
99216+#endif
99217+
99218 unsigned long calculate_alignment(unsigned long flags,
99219 unsigned long align, unsigned long size);
99220
99221@@ -116,7 +130,8 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
99222
99223 /* Legal flag mask for kmem_cache_create(), for various configurations */
99224 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99225- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99226+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99227+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99228
99229 #if defined(CONFIG_DEBUG_SLAB)
99230 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99231@@ -300,6 +315,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99232 return s;
99233
99234 page = virt_to_head_page(x);
99235+
99236+ BUG_ON(!PageSlab(page));
99237+
99238 cachep = page->slab_cache;
99239 if (slab_equal_or_root(cachep, s))
99240 return cachep;
99241diff --git a/mm/slab_common.c b/mm/slab_common.c
99242index e03dd6f..c475838 100644
99243--- a/mm/slab_common.c
99244+++ b/mm/slab_common.c
99245@@ -25,11 +25,35 @@
99246
99247 #include "slab.h"
99248
99249-enum slab_state slab_state;
99250+enum slab_state slab_state __read_only;
99251 LIST_HEAD(slab_caches);
99252 DEFINE_MUTEX(slab_mutex);
99253 struct kmem_cache *kmem_cache;
99254
99255+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99256+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99257+static int __init pax_sanitize_slab_setup(char *str)
99258+{
99259+ if (!str)
99260+ return 0;
99261+
99262+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99263+ pr_info("PaX slab sanitization: %s\n", "disabled");
99264+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99265+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99266+ pr_info("PaX slab sanitization: %s\n", "fast");
99267+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99268+ } else if (!strcmp(str, "full")) {
99269+ pr_info("PaX slab sanitization: %s\n", "full");
99270+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99271+ } else
99272+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99273+
99274+ return 0;
99275+}
99276+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99277+#endif
99278+
99279 /*
99280 * Set of flags that will prevent slab merging
99281 */
99282@@ -44,7 +68,7 @@ struct kmem_cache *kmem_cache;
99283 * Merge control. If this is set then no merging of slab caches will occur.
99284 * (Could be removed. This was introduced to pacify the merge skeptics.)
99285 */
99286-static int slab_nomerge;
99287+static int slab_nomerge = 1;
99288
99289 static int __init setup_slab_nomerge(char *str)
99290 {
99291@@ -218,7 +242,7 @@ int slab_unmergeable(struct kmem_cache *s)
99292 /*
99293 * We may have set a slab to be unmergeable during bootstrap.
99294 */
99295- if (s->refcount < 0)
99296+ if (atomic_read(&s->refcount) < 0)
99297 return 1;
99298
99299 return 0;
99300@@ -322,7 +346,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99301 if (err)
99302 goto out_free_cache;
99303
99304- s->refcount = 1;
99305+ atomic_set(&s->refcount, 1);
99306 list_add(&s->list, &slab_caches);
99307 out:
99308 if (err)
99309@@ -386,6 +410,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99310 */
99311 flags &= CACHE_CREATE_MASK;
99312
99313+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99314+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99315+ flags |= SLAB_NO_SANITIZE;
99316+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99317+ flags &= ~SLAB_NO_SANITIZE;
99318+#endif
99319+
99320 s = __kmem_cache_alias(name, size, align, flags, ctor);
99321 if (s)
99322 goto out_unlock;
99323@@ -505,8 +536,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99324
99325 mutex_lock(&slab_mutex);
99326
99327- s->refcount--;
99328- if (s->refcount)
99329+ if (!atomic_dec_and_test(&s->refcount))
99330 goto out_unlock;
99331
99332 if (memcg_cleanup_cache_params(s) != 0)
99333@@ -526,7 +556,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99334 rcu_barrier();
99335
99336 memcg_free_cache_params(s);
99337-#ifdef SLAB_SUPPORTS_SYSFS
99338+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99339 sysfs_slab_remove(s);
99340 #else
99341 slab_kmem_cache_release(s);
99342@@ -582,7 +612,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99343 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99344 name, size, err);
99345
99346- s->refcount = -1; /* Exempt from merging for now */
99347+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99348 }
99349
99350 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99351@@ -595,7 +625,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99352
99353 create_boot_cache(s, name, size, flags);
99354 list_add(&s->list, &slab_caches);
99355- s->refcount = 1;
99356+ atomic_set(&s->refcount, 1);
99357 return s;
99358 }
99359
99360@@ -607,6 +637,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99361 EXPORT_SYMBOL(kmalloc_dma_caches);
99362 #endif
99363
99364+#ifdef CONFIG_PAX_USERCOPY_SLABS
99365+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99366+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99367+#endif
99368+
99369 /*
99370 * Conversion table for small slabs sizes / 8 to the index in the
99371 * kmalloc array. This is necessary for slabs < 192 since we have non power
99372@@ -671,6 +706,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99373 return kmalloc_dma_caches[index];
99374
99375 #endif
99376+
99377+#ifdef CONFIG_PAX_USERCOPY_SLABS
99378+ if (unlikely((flags & GFP_USERCOPY)))
99379+ return kmalloc_usercopy_caches[index];
99380+
99381+#endif
99382+
99383 return kmalloc_caches[index];
99384 }
99385
99386@@ -727,7 +769,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99387 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99388 if (!kmalloc_caches[i]) {
99389 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99390- 1 << i, flags);
99391+ 1 << i, SLAB_USERCOPY | flags);
99392 }
99393
99394 /*
99395@@ -736,10 +778,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99396 * earlier power of two caches
99397 */
99398 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99399- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99400+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99401
99402 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99403- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99404+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99405 }
99406
99407 /* Kmalloc array is now usable */
99408@@ -772,6 +814,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99409 }
99410 }
99411 #endif
99412+
99413+#ifdef CONFIG_PAX_USERCOPY_SLABS
99414+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99415+ struct kmem_cache *s = kmalloc_caches[i];
99416+
99417+ if (s) {
99418+ int size = kmalloc_size(i);
99419+ char *n = kasprintf(GFP_NOWAIT,
99420+ "usercopy-kmalloc-%d", size);
99421+
99422+ BUG_ON(!n);
99423+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99424+ size, SLAB_USERCOPY | flags);
99425+ }
99426+ }
99427+#endif
99428+
99429 }
99430 #endif /* !CONFIG_SLOB */
99431
99432@@ -830,6 +889,9 @@ static void print_slabinfo_header(struct seq_file *m)
99433 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99434 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99435 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99436+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99437+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99438+#endif
99439 #endif
99440 seq_putc(m, '\n');
99441 }
99442@@ -964,7 +1026,7 @@ static int __init slab_proc_init(void)
99443 module_init(slab_proc_init);
99444 #endif /* CONFIG_SLABINFO */
99445
99446-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
99447+static __always_inline void * __size_overflow(2) __do_krealloc(const void *p, size_t new_size,
99448 gfp_t flags)
99449 {
99450 void *ret;
99451diff --git a/mm/slob.c b/mm/slob.c
99452index 96a8620..46b3f12 100644
99453--- a/mm/slob.c
99454+++ b/mm/slob.c
99455@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99456 /*
99457 * Return the size of a slob block.
99458 */
99459-static slobidx_t slob_units(slob_t *s)
99460+static slobidx_t slob_units(const slob_t *s)
99461 {
99462 if (s->units > 0)
99463 return s->units;
99464@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99465 /*
99466 * Return the next free slob block pointer after this one.
99467 */
99468-static slob_t *slob_next(slob_t *s)
99469+static slob_t *slob_next(const slob_t *s)
99470 {
99471 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99472 slobidx_t next;
99473@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99474 /*
99475 * Returns true if s is the last free block in its page.
99476 */
99477-static int slob_last(slob_t *s)
99478+static int slob_last(const slob_t *s)
99479 {
99480 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99481 }
99482
99483-static void *slob_new_pages(gfp_t gfp, int order, int node)
99484+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99485 {
99486- void *page;
99487+ struct page *page;
99488
99489 #ifdef CONFIG_NUMA
99490 if (node != NUMA_NO_NODE)
99491@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99492 if (!page)
99493 return NULL;
99494
99495- return page_address(page);
99496+ __SetPageSlab(page);
99497+ return page;
99498 }
99499
99500-static void slob_free_pages(void *b, int order)
99501+static void slob_free_pages(struct page *sp, int order)
99502 {
99503 if (current->reclaim_state)
99504 current->reclaim_state->reclaimed_slab += 1 << order;
99505- free_pages((unsigned long)b, order);
99506+ __ClearPageSlab(sp);
99507+ page_mapcount_reset(sp);
99508+ sp->private = 0;
99509+ __free_pages(sp, order);
99510 }
99511
99512 /*
99513@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99514
99515 /* Not enough space: must allocate a new page */
99516 if (!b) {
99517- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99518- if (!b)
99519+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99520+ if (!sp)
99521 return NULL;
99522- sp = virt_to_page(b);
99523- __SetPageSlab(sp);
99524+ b = page_address(sp);
99525
99526 spin_lock_irqsave(&slob_lock, flags);
99527 sp->units = SLOB_UNITS(PAGE_SIZE);
99528 sp->freelist = b;
99529+ sp->private = 0;
99530 INIT_LIST_HEAD(&sp->lru);
99531 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99532 set_slob_page_free(sp, slob_list);
99533@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99534 /*
99535 * slob_free: entry point into the slob allocator.
99536 */
99537-static void slob_free(void *block, int size)
99538+static void slob_free(struct kmem_cache *c, void *block, int size)
99539 {
99540 struct page *sp;
99541 slob_t *prev, *next, *b = (slob_t *)block;
99542@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99543 if (slob_page_free(sp))
99544 clear_slob_page_free(sp);
99545 spin_unlock_irqrestore(&slob_lock, flags);
99546- __ClearPageSlab(sp);
99547- page_mapcount_reset(sp);
99548- slob_free_pages(b, 0);
99549+ slob_free_pages(sp, 0);
99550 return;
99551 }
99552
99553+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99554+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99555+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99556+#endif
99557+
99558 if (!slob_page_free(sp)) {
99559 /* This slob page is about to become partially free. Easy! */
99560 sp->units = units;
99561@@ -424,11 +431,10 @@ out:
99562 */
99563
99564 static __always_inline void *
99565-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99566+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99567 {
99568- unsigned int *m;
99569- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99570- void *ret;
99571+ slob_t *m;
99572+ void *ret = NULL;
99573
99574 gfp &= gfp_allowed_mask;
99575
99576@@ -442,27 +448,45 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99577
99578 if (!m)
99579 return NULL;
99580- *m = size;
99581+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99582+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99583+ m[0].units = size;
99584+ m[1].units = align;
99585 ret = (void *)m + align;
99586
99587 trace_kmalloc_node(caller, ret,
99588 size, size + align, gfp, node);
99589 } else {
99590 unsigned int order = get_order(size);
99591+ struct page *page;
99592
99593 if (likely(order))
99594 gfp |= __GFP_COMP;
99595- ret = slob_new_pages(gfp, order, node);
99596+ page = slob_new_pages(gfp, order, node);
99597+ if (page) {
99598+ ret = page_address(page);
99599+ page->private = size;
99600+ }
99601
99602 trace_kmalloc_node(caller, ret,
99603 size, PAGE_SIZE << order, gfp, node);
99604 }
99605
99606- kmemleak_alloc(ret, size, 1, gfp);
99607 return ret;
99608 }
99609
99610-void *__kmalloc(size_t size, gfp_t gfp)
99611+static __always_inline void *
99612+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99613+{
99614+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99615+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99616+
99617+ if (!ZERO_OR_NULL_PTR(ret))
99618+ kmemleak_alloc(ret, size, 1, gfp);
99619+ return ret;
99620+}
99621+
99622+void * __size_overflow(1) __kmalloc(size_t size, gfp_t gfp)
99623 {
99624 return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
99625 }
99626@@ -491,34 +515,112 @@ void kfree(const void *block)
99627 return;
99628 kmemleak_free(block);
99629
99630+ VM_BUG_ON(!virt_addr_valid(block));
99631 sp = virt_to_page(block);
99632- if (PageSlab(sp)) {
99633+ VM_BUG_ON(!PageSlab(sp));
99634+ if (!sp->private) {
99635 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99636- unsigned int *m = (unsigned int *)(block - align);
99637- slob_free(m, *m + align);
99638- } else
99639+ slob_t *m = (slob_t *)(block - align);
99640+ slob_free(NULL, m, m[0].units + align);
99641+ } else {
99642+ __ClearPageSlab(sp);
99643+ page_mapcount_reset(sp);
99644+ sp->private = 0;
99645 __free_pages(sp, compound_order(sp));
99646+ }
99647 }
99648 EXPORT_SYMBOL(kfree);
99649
99650+bool is_usercopy_object(const void *ptr)
99651+{
99652+ if (!slab_is_available())
99653+ return false;
99654+
99655+ // PAX: TODO
99656+
99657+ return false;
99658+}
99659+
99660+#ifdef CONFIG_PAX_USERCOPY
99661+const char *check_heap_object(const void *ptr, unsigned long n)
99662+{
99663+ struct page *page;
99664+ const slob_t *free;
99665+ const void *base;
99666+ unsigned long flags;
99667+
99668+ if (ZERO_OR_NULL_PTR(ptr))
99669+ return "<null>";
99670+
99671+ if (!virt_addr_valid(ptr))
99672+ return NULL;
99673+
99674+ page = virt_to_head_page(ptr);
99675+ if (!PageSlab(page))
99676+ return NULL;
99677+
99678+ if (page->private) {
99679+ base = page;
99680+ if (base <= ptr && n <= page->private - (ptr - base))
99681+ return NULL;
99682+ return "<slob>";
99683+ }
99684+
99685+ /* some tricky double walking to find the chunk */
99686+ spin_lock_irqsave(&slob_lock, flags);
99687+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99688+ free = page->freelist;
99689+
99690+ while (!slob_last(free) && (void *)free <= ptr) {
99691+ base = free + slob_units(free);
99692+ free = slob_next(free);
99693+ }
99694+
99695+ while (base < (void *)free) {
99696+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99697+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99698+ int offset;
99699+
99700+ if (ptr < base + align)
99701+ break;
99702+
99703+ offset = ptr - base - align;
99704+ if (offset >= m) {
99705+ base += size;
99706+ continue;
99707+ }
99708+
99709+ if (n > m - offset)
99710+ break;
99711+
99712+ spin_unlock_irqrestore(&slob_lock, flags);
99713+ return NULL;
99714+ }
99715+
99716+ spin_unlock_irqrestore(&slob_lock, flags);
99717+ return "<slob>";
99718+}
99719+#endif
99720+
99721 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99722 size_t ksize(const void *block)
99723 {
99724 struct page *sp;
99725 int align;
99726- unsigned int *m;
99727+ slob_t *m;
99728
99729 BUG_ON(!block);
99730 if (unlikely(block == ZERO_SIZE_PTR))
99731 return 0;
99732
99733 sp = virt_to_page(block);
99734- if (unlikely(!PageSlab(sp)))
99735- return PAGE_SIZE << compound_order(sp);
99736+ VM_BUG_ON(!PageSlab(sp));
99737+ if (sp->private)
99738+ return sp->private;
99739
99740 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99741- m = (unsigned int *)(block - align);
99742- return SLOB_UNITS(*m) * SLOB_UNIT;
99743+ m = (slob_t *)(block - align);
99744+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99745 }
99746 EXPORT_SYMBOL(ksize);
99747
99748@@ -534,23 +636,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99749
99750 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99751 {
99752- void *b;
99753+ void *b = NULL;
99754
99755 flags &= gfp_allowed_mask;
99756
99757 lockdep_trace_alloc(flags);
99758
99759+#ifdef CONFIG_PAX_USERCOPY_SLABS
99760+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99761+#else
99762 if (c->size < PAGE_SIZE) {
99763 b = slob_alloc(c->size, flags, c->align, node);
99764 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99765 SLOB_UNITS(c->size) * SLOB_UNIT,
99766 flags, node);
99767 } else {
99768- b = slob_new_pages(flags, get_order(c->size), node);
99769+ struct page *sp;
99770+
99771+ sp = slob_new_pages(flags, get_order(c->size), node);
99772+ if (sp) {
99773+ b = page_address(sp);
99774+ sp->private = c->size;
99775+ }
99776 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99777 PAGE_SIZE << get_order(c->size),
99778 flags, node);
99779 }
99780+#endif
99781
99782 if (b && c->ctor)
99783 c->ctor(b);
99784@@ -567,7 +679,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
99785 EXPORT_SYMBOL(kmem_cache_alloc);
99786
99787 #ifdef CONFIG_NUMA
99788-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
99789+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t gfp, int node)
99790 {
99791 return __do_kmalloc_node(size, gfp, node, _RET_IP_);
99792 }
99793@@ -580,12 +692,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99794 EXPORT_SYMBOL(kmem_cache_alloc_node);
99795 #endif
99796
99797-static void __kmem_cache_free(void *b, int size)
99798+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99799 {
99800- if (size < PAGE_SIZE)
99801- slob_free(b, size);
99802+ struct page *sp;
99803+
99804+ sp = virt_to_page(b);
99805+ BUG_ON(!PageSlab(sp));
99806+ if (!sp->private)
99807+ slob_free(c, b, size);
99808 else
99809- slob_free_pages(b, get_order(size));
99810+ slob_free_pages(sp, get_order(size));
99811 }
99812
99813 static void kmem_rcu_free(struct rcu_head *head)
99814@@ -593,22 +709,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99815 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99816 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99817
99818- __kmem_cache_free(b, slob_rcu->size);
99819+ __kmem_cache_free(NULL, b, slob_rcu->size);
99820 }
99821
99822 void kmem_cache_free(struct kmem_cache *c, void *b)
99823 {
99824+ int size = c->size;
99825+
99826+#ifdef CONFIG_PAX_USERCOPY_SLABS
99827+ if (size + c->align < PAGE_SIZE) {
99828+ size += c->align;
99829+ b -= c->align;
99830+ }
99831+#endif
99832+
99833 kmemleak_free_recursive(b, c->flags);
99834 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99835 struct slob_rcu *slob_rcu;
99836- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99837- slob_rcu->size = c->size;
99838+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99839+ slob_rcu->size = size;
99840 call_rcu(&slob_rcu->head, kmem_rcu_free);
99841 } else {
99842- __kmem_cache_free(b, c->size);
99843+ __kmem_cache_free(c, b, size);
99844 }
99845
99846+#ifdef CONFIG_PAX_USERCOPY_SLABS
99847+ trace_kfree(_RET_IP_, b);
99848+#else
99849 trace_kmem_cache_free(_RET_IP_, b);
99850+#endif
99851+
99852 }
99853 EXPORT_SYMBOL(kmem_cache_free);
99854
99855diff --git a/mm/slub.c b/mm/slub.c
99856index fe376fe..2f5757c 100644
99857--- a/mm/slub.c
99858+++ b/mm/slub.c
99859@@ -197,7 +197,7 @@ struct track {
99860
99861 enum track_item { TRACK_ALLOC, TRACK_FREE };
99862
99863-#ifdef CONFIG_SYSFS
99864+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99865 static int sysfs_slab_add(struct kmem_cache *);
99866 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99867 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99868@@ -535,7 +535,7 @@ static void print_track(const char *s, struct track *t)
99869 if (!t->addr)
99870 return;
99871
99872- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99873+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99874 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99875 #ifdef CONFIG_STACKTRACE
99876 {
99877@@ -2652,6 +2652,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99878
99879 slab_free_hook(s, x);
99880
99881+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99882+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99883+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99884+ if (s->ctor)
99885+ s->ctor(x);
99886+ }
99887+#endif
99888+
99889 redo:
99890 /*
99891 * Determine the currently cpus per cpu slab.
99892@@ -2989,6 +2997,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99893 s->inuse = size;
99894
99895 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99896+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99897+ (!(flags & SLAB_NO_SANITIZE)) ||
99898+#endif
99899 s->ctor)) {
99900 /*
99901 * Relocate free pointer after the object if it is not
99902@@ -3243,7 +3254,7 @@ static int __init setup_slub_min_objects(char *str)
99903
99904 __setup("slub_min_objects=", setup_slub_min_objects);
99905
99906-void *__kmalloc(size_t size, gfp_t flags)
99907+void * __size_overflow(1) __kmalloc(size_t size, gfp_t flags)
99908 {
99909 struct kmem_cache *s;
99910 void *ret;
99911@@ -3279,7 +3290,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
99912 return ptr;
99913 }
99914
99915-void *__kmalloc_node(size_t size, gfp_t flags, int node)
99916+void * __size_overflow(1) __kmalloc_node(size_t size, gfp_t flags, int node)
99917 {
99918 struct kmem_cache *s;
99919 void *ret;
99920@@ -3308,6 +3319,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99921 EXPORT_SYMBOL(__kmalloc_node);
99922 #endif
99923
99924+bool is_usercopy_object(const void *ptr)
99925+{
99926+ struct page *page;
99927+ struct kmem_cache *s;
99928+
99929+ if (ZERO_OR_NULL_PTR(ptr))
99930+ return false;
99931+
99932+ if (!slab_is_available())
99933+ return false;
99934+
99935+ if (!virt_addr_valid(ptr))
99936+ return false;
99937+
99938+ page = virt_to_head_page(ptr);
99939+
99940+ if (!PageSlab(page))
99941+ return false;
99942+
99943+ s = page->slab_cache;
99944+ return s->flags & SLAB_USERCOPY;
99945+}
99946+
99947+#ifdef CONFIG_PAX_USERCOPY
99948+const char *check_heap_object(const void *ptr, unsigned long n)
99949+{
99950+ struct page *page;
99951+ struct kmem_cache *s;
99952+ unsigned long offset;
99953+
99954+ if (ZERO_OR_NULL_PTR(ptr))
99955+ return "<null>";
99956+
99957+ if (!virt_addr_valid(ptr))
99958+ return NULL;
99959+
99960+ page = virt_to_head_page(ptr);
99961+
99962+ if (!PageSlab(page))
99963+ return NULL;
99964+
99965+ s = page->slab_cache;
99966+ if (!(s->flags & SLAB_USERCOPY))
99967+ return s->name;
99968+
99969+ offset = (ptr - page_address(page)) % s->size;
99970+ if (offset <= s->object_size && n <= s->object_size - offset)
99971+ return NULL;
99972+
99973+ return s->name;
99974+}
99975+#endif
99976+
99977 size_t ksize(const void *object)
99978 {
99979 struct page *page;
99980@@ -3336,6 +3400,7 @@ void kfree(const void *x)
99981 if (unlikely(ZERO_OR_NULL_PTR(x)))
99982 return;
99983
99984+ VM_BUG_ON(!virt_addr_valid(x));
99985 page = virt_to_head_page(x);
99986 if (unlikely(!PageSlab(page))) {
99987 BUG_ON(!PageCompound(page));
99988@@ -3631,7 +3696,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99989 int i;
99990 struct kmem_cache *c;
99991
99992- s->refcount++;
99993+ atomic_inc(&s->refcount);
99994
99995 /*
99996 * Adjust the object sizes so that we clear
99997@@ -3650,7 +3715,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99998 }
99999
100000 if (sysfs_slab_alias(s, name)) {
100001- s->refcount--;
100002+ atomic_dec(&s->refcount);
100003 s = NULL;
100004 }
100005 }
100006@@ -3767,7 +3832,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
100007 }
100008 #endif
100009
100010-#ifdef CONFIG_SYSFS
100011+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100012 static int count_inuse(struct page *page)
100013 {
100014 return page->inuse;
100015@@ -4048,7 +4113,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
100016 len += sprintf(buf + len, "%7ld ", l->count);
100017
100018 if (l->addr)
100019+#ifdef CONFIG_GRKERNSEC_HIDESYM
100020+ len += sprintf(buf + len, "%pS", NULL);
100021+#else
100022 len += sprintf(buf + len, "%pS", (void *)l->addr);
100023+#endif
100024 else
100025 len += sprintf(buf + len, "<not-available>");
100026
100027@@ -4150,12 +4219,12 @@ static void __init resiliency_test(void)
100028 validate_slab_cache(kmalloc_caches[9]);
100029 }
100030 #else
100031-#ifdef CONFIG_SYSFS
100032+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100033 static void resiliency_test(void) {};
100034 #endif
100035 #endif
100036
100037-#ifdef CONFIG_SYSFS
100038+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100039 enum slab_stat_type {
100040 SL_ALL, /* All slabs */
100041 SL_PARTIAL, /* Only partially allocated slabs */
100042@@ -4392,13 +4461,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
100043 {
100044 if (!s->ctor)
100045 return 0;
100046+#ifdef CONFIG_GRKERNSEC_HIDESYM
100047+ return sprintf(buf, "%pS\n", NULL);
100048+#else
100049 return sprintf(buf, "%pS\n", s->ctor);
100050+#endif
100051 }
100052 SLAB_ATTR_RO(ctor);
100053
100054 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
100055 {
100056- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
100057+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
100058 }
100059 SLAB_ATTR_RO(aliases);
100060
100061@@ -4486,6 +4559,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
100062 SLAB_ATTR_RO(cache_dma);
100063 #endif
100064
100065+#ifdef CONFIG_PAX_USERCOPY_SLABS
100066+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
100067+{
100068+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
100069+}
100070+SLAB_ATTR_RO(usercopy);
100071+#endif
100072+
100073+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100074+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
100075+{
100076+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
100077+}
100078+SLAB_ATTR_RO(sanitize);
100079+#endif
100080+
100081 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
100082 {
100083 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
100084@@ -4541,7 +4630,7 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
100085 * as well as cause other issues like converting a mergeable
100086 * cache into an umergeable one.
100087 */
100088- if (s->refcount > 1)
100089+ if (atomic_read(&s->refcount) > 1)
100090 return -EINVAL;
100091
100092 s->flags &= ~SLAB_TRACE;
100093@@ -4661,7 +4750,7 @@ static ssize_t failslab_show(struct kmem_cache *s, char *buf)
100094 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
100095 size_t length)
100096 {
100097- if (s->refcount > 1)
100098+ if (atomic_read(&s->refcount) > 1)
100099 return -EINVAL;
100100
100101 s->flags &= ~SLAB_FAILSLAB;
100102@@ -4831,6 +4920,12 @@ static struct attribute *slab_attrs[] = {
100103 #ifdef CONFIG_ZONE_DMA
100104 &cache_dma_attr.attr,
100105 #endif
100106+#ifdef CONFIG_PAX_USERCOPY_SLABS
100107+ &usercopy_attr.attr,
100108+#endif
100109+#ifdef CONFIG_PAX_MEMORY_SANITIZE
100110+ &sanitize_attr.attr,
100111+#endif
100112 #ifdef CONFIG_NUMA
100113 &remote_node_defrag_ratio_attr.attr,
100114 #endif
100115@@ -5075,6 +5170,7 @@ static char *create_unique_id(struct kmem_cache *s)
100116 return name;
100117 }
100118
100119+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100120 static int sysfs_slab_add(struct kmem_cache *s)
100121 {
100122 int err;
100123@@ -5148,6 +5244,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
100124 kobject_del(&s->kobj);
100125 kobject_put(&s->kobj);
100126 }
100127+#endif
100128
100129 /*
100130 * Need to buffer aliases during bootup until sysfs becomes
100131@@ -5161,6 +5258,7 @@ struct saved_alias {
100132
100133 static struct saved_alias *alias_list;
100134
100135+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
100136 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100137 {
100138 struct saved_alias *al;
100139@@ -5183,6 +5281,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
100140 alias_list = al;
100141 return 0;
100142 }
100143+#endif
100144
100145 static int __init slab_sysfs_init(void)
100146 {
100147diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100148index 4cba9c2..b4f9fcc 100644
100149--- a/mm/sparse-vmemmap.c
100150+++ b/mm/sparse-vmemmap.c
100151@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100152 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100153 if (!p)
100154 return NULL;
100155- pud_populate(&init_mm, pud, p);
100156+ pud_populate_kernel(&init_mm, pud, p);
100157 }
100158 return pud;
100159 }
100160@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100161 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100162 if (!p)
100163 return NULL;
100164- pgd_populate(&init_mm, pgd, p);
100165+ pgd_populate_kernel(&init_mm, pgd, p);
100166 }
100167 return pgd;
100168 }
100169diff --git a/mm/sparse.c b/mm/sparse.c
100170index d1b48b6..6e8590e 100644
100171--- a/mm/sparse.c
100172+++ b/mm/sparse.c
100173@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100174
100175 for (i = 0; i < PAGES_PER_SECTION; i++) {
100176 if (PageHWPoison(&memmap[i])) {
100177- atomic_long_sub(1, &num_poisoned_pages);
100178+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100179 ClearPageHWPoison(&memmap[i]);
100180 }
100181 }
100182diff --git a/mm/swap.c b/mm/swap.c
100183index 8a12b33..7068e78 100644
100184--- a/mm/swap.c
100185+++ b/mm/swap.c
100186@@ -31,6 +31,7 @@
100187 #include <linux/memcontrol.h>
100188 #include <linux/gfp.h>
100189 #include <linux/uio.h>
100190+#include <linux/hugetlb.h>
100191
100192 #include "internal.h"
100193
100194@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100195
100196 __page_cache_release(page);
100197 dtor = get_compound_page_dtor(page);
100198+ if (!PageHuge(page))
100199+ BUG_ON(dtor != free_compound_page);
100200 (*dtor)(page);
100201 }
100202
100203diff --git a/mm/swapfile.c b/mm/swapfile.c
100204index 63f55cc..31874e6 100644
100205--- a/mm/swapfile.c
100206+++ b/mm/swapfile.c
100207@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100208
100209 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100210 /* Activity counter to indicate that a swapon or swapoff has occurred */
100211-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100212+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100213
100214 static inline unsigned char swap_count(unsigned char ent)
100215 {
100216@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100217 spin_unlock(&swap_lock);
100218
100219 err = 0;
100220- atomic_inc(&proc_poll_event);
100221+ atomic_inc_unchecked(&proc_poll_event);
100222 wake_up_interruptible(&proc_poll_wait);
100223
100224 out_dput:
100225@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100226
100227 poll_wait(file, &proc_poll_wait, wait);
100228
100229- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100230- seq->poll_event = atomic_read(&proc_poll_event);
100231+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100232+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100233 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100234 }
100235
100236@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100237 return ret;
100238
100239 seq = file->private_data;
100240- seq->poll_event = atomic_read(&proc_poll_event);
100241+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100242 return 0;
100243 }
100244
100245@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100246 (frontswap_map) ? "FS" : "");
100247
100248 mutex_unlock(&swapon_mutex);
100249- atomic_inc(&proc_poll_event);
100250+ atomic_inc_unchecked(&proc_poll_event);
100251 wake_up_interruptible(&proc_poll_wait);
100252
100253 if (S_ISREG(inode->i_mode))
100254diff --git a/mm/util.c b/mm/util.c
100255index fec39d4..3e60325 100644
100256--- a/mm/util.c
100257+++ b/mm/util.c
100258@@ -195,6 +195,12 @@ struct task_struct *task_of_stack(struct task_struct *task,
100259 void arch_pick_mmap_layout(struct mm_struct *mm)
100260 {
100261 mm->mmap_base = TASK_UNMAPPED_BASE;
100262+
100263+#ifdef CONFIG_PAX_RANDMMAP
100264+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100265+ mm->mmap_base += mm->delta_mmap;
100266+#endif
100267+
100268 mm->get_unmapped_area = arch_get_unmapped_area;
100269 }
100270 #endif
100271@@ -371,6 +377,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100272 if (!mm->arg_end)
100273 goto out_mm; /* Shh! No looking before we're done */
100274
100275+ if (gr_acl_handle_procpidmem(task))
100276+ goto out_mm;
100277+
100278 len = mm->arg_end - mm->arg_start;
100279
100280 if (len > buflen)
100281diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100282index 39c3388..7d976d4 100644
100283--- a/mm/vmalloc.c
100284+++ b/mm/vmalloc.c
100285@@ -39,20 +39,65 @@ struct vfree_deferred {
100286 struct work_struct wq;
100287 };
100288 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100289+static DEFINE_PER_CPU(struct vfree_deferred, vunmap_deferred);
100290+
100291+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100292+struct stack_deferred_llist {
100293+ struct llist_head list;
100294+ void *stack;
100295+ void *lowmem_stack;
100296+};
100297+
100298+struct stack_deferred {
100299+ struct stack_deferred_llist list;
100300+ struct work_struct wq;
100301+};
100302+
100303+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100304+#endif
100305
100306 static void __vunmap(const void *, int);
100307
100308-static void free_work(struct work_struct *w)
100309+static void vfree_work(struct work_struct *w)
100310+{
100311+ struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100312+ struct llist_node *llnode = llist_del_all(&p->list);
100313+ while (llnode) {
100314+ void *x = llnode;
100315+ llnode = llist_next(llnode);
100316+ __vunmap(x, 1);
100317+ }
100318+}
100319+
100320+static void vunmap_work(struct work_struct *w)
100321 {
100322 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100323 struct llist_node *llnode = llist_del_all(&p->list);
100324 while (llnode) {
100325 void *p = llnode;
100326 llnode = llist_next(llnode);
100327- __vunmap(p, 1);
100328+ __vunmap(p, 0);
100329 }
100330 }
100331
100332+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100333+static void unmap_work(struct work_struct *w)
100334+{
100335+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100336+ struct llist_node *llnode = llist_del_all(&p->list.list);
100337+ while (llnode) {
100338+ struct stack_deferred_llist *x =
100339+ llist_entry((struct llist_head *)llnode,
100340+ struct stack_deferred_llist, list);
100341+ void *stack = ACCESS_ONCE(x->stack);
100342+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100343+ llnode = llist_next(llnode);
100344+ __vunmap(stack, 0);
100345+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100346+ }
100347+}
100348+#endif
100349+
100350 /*** Page table manipulation functions ***/
100351
100352 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100353@@ -61,8 +106,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100354
100355 pte = pte_offset_kernel(pmd, addr);
100356 do {
100357- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100358- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100359+
100360+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100361+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100362+ BUG_ON(!pte_exec(*pte));
100363+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100364+ continue;
100365+ }
100366+#endif
100367+
100368+ {
100369+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100370+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100371+ }
100372 } while (pte++, addr += PAGE_SIZE, addr != end);
100373 }
100374
100375@@ -122,16 +178,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100376 pte = pte_alloc_kernel(pmd, addr);
100377 if (!pte)
100378 return -ENOMEM;
100379+
100380+ pax_open_kernel();
100381 do {
100382 struct page *page = pages[*nr];
100383
100384- if (WARN_ON(!pte_none(*pte)))
100385+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100386+ if (pgprot_val(prot) & _PAGE_NX)
100387+#endif
100388+
100389+ if (!pte_none(*pte)) {
100390+ pax_close_kernel();
100391+ WARN_ON(1);
100392 return -EBUSY;
100393- if (WARN_ON(!page))
100394+ }
100395+ if (!page) {
100396+ pax_close_kernel();
100397+ WARN_ON(1);
100398 return -ENOMEM;
100399+ }
100400 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100401 (*nr)++;
100402 } while (pte++, addr += PAGE_SIZE, addr != end);
100403+ pax_close_kernel();
100404 return 0;
100405 }
100406
100407@@ -141,7 +210,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100408 pmd_t *pmd;
100409 unsigned long next;
100410
100411- pmd = pmd_alloc(&init_mm, pud, addr);
100412+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100413 if (!pmd)
100414 return -ENOMEM;
100415 do {
100416@@ -158,7 +227,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100417 pud_t *pud;
100418 unsigned long next;
100419
100420- pud = pud_alloc(&init_mm, pgd, addr);
100421+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100422 if (!pud)
100423 return -ENOMEM;
100424 do {
100425@@ -218,6 +287,12 @@ int is_vmalloc_or_module_addr(const void *x)
100426 if (addr >= MODULES_VADDR && addr < MODULES_END)
100427 return 1;
100428 #endif
100429+
100430+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100431+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100432+ return 1;
100433+#endif
100434+
100435 return is_vmalloc_addr(x);
100436 }
100437
100438@@ -238,8 +313,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100439
100440 if (!pgd_none(*pgd)) {
100441 pud_t *pud = pud_offset(pgd, addr);
100442+#ifdef CONFIG_X86
100443+ if (!pud_large(*pud))
100444+#endif
100445 if (!pud_none(*pud)) {
100446 pmd_t *pmd = pmd_offset(pud, addr);
100447+#ifdef CONFIG_X86
100448+ if (!pmd_large(*pmd))
100449+#endif
100450 if (!pmd_none(*pmd)) {
100451 pte_t *ptep, pte;
100452
100453@@ -341,7 +422,7 @@ static void purge_vmap_area_lazy(void);
100454 * Allocate a region of KVA of the specified size and alignment, within the
100455 * vstart and vend.
100456 */
100457-static struct vmap_area *alloc_vmap_area(unsigned long size,
100458+static struct vmap_area * __size_overflow(1) alloc_vmap_area(unsigned long size,
100459 unsigned long align,
100460 unsigned long vstart, unsigned long vend,
100461 int node, gfp_t gfp_mask)
100462@@ -1182,13 +1263,27 @@ void __init vmalloc_init(void)
100463 for_each_possible_cpu(i) {
100464 struct vmap_block_queue *vbq;
100465 struct vfree_deferred *p;
100466+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100467+ struct stack_deferred *p2;
100468+#endif
100469
100470 vbq = &per_cpu(vmap_block_queue, i);
100471 spin_lock_init(&vbq->lock);
100472 INIT_LIST_HEAD(&vbq->free);
100473+
100474 p = &per_cpu(vfree_deferred, i);
100475 init_llist_head(&p->list);
100476- INIT_WORK(&p->wq, free_work);
100477+ INIT_WORK(&p->wq, vfree_work);
100478+
100479+ p = &per_cpu(vunmap_deferred, i);
100480+ init_llist_head(&p->list);
100481+ INIT_WORK(&p->wq, vunmap_work);
100482+
100483+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100484+ p2 = &per_cpu(stack_deferred, i);
100485+ init_llist_head(&p2->list.list);
100486+ INIT_WORK(&p2->wq, unmap_work);
100487+#endif
100488 }
100489
100490 /* Import existing vmlist entries. */
100491@@ -1313,6 +1408,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100492 struct vm_struct *area;
100493
100494 BUG_ON(in_interrupt());
100495+
100496+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100497+ if (flags & VM_KERNEXEC) {
100498+ if (start != VMALLOC_START || end != VMALLOC_END)
100499+ return NULL;
100500+ start = (unsigned long)MODULES_EXEC_VADDR;
100501+ end = (unsigned long)MODULES_EXEC_END;
100502+ }
100503+#endif
100504+
100505 if (flags & VM_IOREMAP)
100506 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100507
100508@@ -1511,13 +1616,37 @@ EXPORT_SYMBOL(vfree);
100509 */
100510 void vunmap(const void *addr)
100511 {
100512- BUG_ON(in_interrupt());
100513- might_sleep();
100514- if (addr)
100515+ if (!addr)
100516+ return;
100517+
100518+ if (unlikely(in_interrupt())) {
100519+ struct vfree_deferred *p = this_cpu_ptr(&vunmap_deferred);
100520+ if (llist_add((struct llist_node *)addr, &p->list))
100521+ schedule_work(&p->wq);
100522+ } else {
100523+ might_sleep();
100524 __vunmap(addr, 0);
100525+ }
100526 }
100527 EXPORT_SYMBOL(vunmap);
100528
100529+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100530+void unmap_process_stacks(struct task_struct *task)
100531+{
100532+ if (unlikely(in_interrupt())) {
100533+ struct stack_deferred *p = this_cpu_ptr(&stack_deferred);
100534+ struct stack_deferred_llist *list = task->stack;
100535+ list->stack = task->stack;
100536+ list->lowmem_stack = task->lowmem_stack;
100537+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100538+ schedule_work(&p->wq);
100539+ } else {
100540+ __vunmap(task->stack, 0);
100541+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100542+ }
100543+}
100544+#endif
100545+
100546 /**
100547 * vmap - map an array of pages into virtually contiguous space
100548 * @pages: array of page pointers
100549@@ -1538,6 +1667,11 @@ void *vmap(struct page **pages, unsigned int count,
100550 if (count > totalram_pages)
100551 return NULL;
100552
100553+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100554+ if (!(pgprot_val(prot) & _PAGE_NX))
100555+ flags |= VM_KERNEXEC;
100556+#endif
100557+
100558 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100559 __builtin_return_address(0));
100560 if (!area)
100561@@ -1640,6 +1774,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100562 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100563 goto fail;
100564
100565+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100566+ if (!(pgprot_val(prot) & _PAGE_NX))
100567+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100568+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100569+ else
100570+#endif
100571+
100572 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100573 start, end, node, gfp_mask, caller);
100574 if (!area)
100575@@ -1816,10 +1957,9 @@ EXPORT_SYMBOL(vzalloc_node);
100576 * For tight control over page level allocator and protection flags
100577 * use __vmalloc() instead.
100578 */
100579-
100580 void *vmalloc_exec(unsigned long size)
100581 {
100582- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100583+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100584 NUMA_NO_NODE, __builtin_return_address(0));
100585 }
100586
100587@@ -2126,6 +2266,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100588 {
100589 struct vm_struct *area;
100590
100591+ BUG_ON(vma->vm_mirror);
100592+
100593 size = PAGE_ALIGN(size);
100594
100595 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100596@@ -2608,7 +2750,11 @@ static int s_show(struct seq_file *m, void *p)
100597 v->addr, v->addr + v->size, v->size);
100598
100599 if (v->caller)
100600+#ifdef CONFIG_GRKERNSEC_HIDESYM
100601+ seq_printf(m, " %pK", v->caller);
100602+#else
100603 seq_printf(m, " %pS", v->caller);
100604+#endif
100605
100606 if (v->nr_pages)
100607 seq_printf(m, " pages=%d", v->nr_pages);
100608diff --git a/mm/vmstat.c b/mm/vmstat.c
100609index cdac773..7dd324e 100644
100610--- a/mm/vmstat.c
100611+++ b/mm/vmstat.c
100612@@ -24,6 +24,7 @@
100613 #include <linux/mm_inline.h>
100614 #include <linux/page_ext.h>
100615 #include <linux/page_owner.h>
100616+#include <linux/grsecurity.h>
100617
100618 #include "internal.h"
100619
100620@@ -83,7 +84,7 @@ void vm_events_fold_cpu(int cpu)
100621 *
100622 * vm_stat contains the global counters
100623 */
100624-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100625+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100626 EXPORT_SYMBOL(vm_stat);
100627
100628 #ifdef CONFIG_SMP
100629@@ -435,7 +436,7 @@ static int fold_diff(int *diff)
100630
100631 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100632 if (diff[i]) {
100633- atomic_long_add(diff[i], &vm_stat[i]);
100634+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100635 changes++;
100636 }
100637 return changes;
100638@@ -473,7 +474,7 @@ static int refresh_cpu_vm_stats(void)
100639 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100640 if (v) {
100641
100642- atomic_long_add(v, &zone->vm_stat[i]);
100643+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100644 global_diff[i] += v;
100645 #ifdef CONFIG_NUMA
100646 /* 3 seconds idle till flush */
100647@@ -537,7 +538,7 @@ void cpu_vm_stats_fold(int cpu)
100648
100649 v = p->vm_stat_diff[i];
100650 p->vm_stat_diff[i] = 0;
100651- atomic_long_add(v, &zone->vm_stat[i]);
100652+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100653 global_diff[i] += v;
100654 }
100655 }
100656@@ -557,8 +558,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100657 if (pset->vm_stat_diff[i]) {
100658 int v = pset->vm_stat_diff[i];
100659 pset->vm_stat_diff[i] = 0;
100660- atomic_long_add(v, &zone->vm_stat[i]);
100661- atomic_long_add(v, &vm_stat[i]);
100662+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100663+ atomic_long_add_unchecked(v, &vm_stat[i]);
100664 }
100665 }
100666 #endif
100667@@ -1291,10 +1292,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100668 stat_items_size += sizeof(struct vm_event_state);
100669 #endif
100670
100671- v = kmalloc(stat_items_size, GFP_KERNEL);
100672+ v = kzalloc(stat_items_size, GFP_KERNEL);
100673 m->private = v;
100674 if (!v)
100675 return ERR_PTR(-ENOMEM);
100676+
100677+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100678+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100679+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100680+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100681+ && !in_group_p(grsec_proc_gid)
100682+#endif
100683+ )
100684+ return (unsigned long *)m->private + *pos;
100685+#endif
100686+#endif
100687+
100688 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100689 v[i] = global_page_state(i);
100690 v += NR_VM_ZONE_STAT_ITEMS;
100691@@ -1526,10 +1539,16 @@ static int __init setup_vmstat(void)
100692 cpu_notifier_register_done();
100693 #endif
100694 #ifdef CONFIG_PROC_FS
100695- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100696- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100697- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100698- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100699+ {
100700+ mode_t gr_mode = S_IRUGO;
100701+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100702+ gr_mode = S_IRUSR;
100703+#endif
100704+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100705+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100706+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100707+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100708+ }
100709 #endif
100710 return 0;
100711 }
100712diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100713index 64c6bed..b79a5de 100644
100714--- a/net/8021q/vlan.c
100715+++ b/net/8021q/vlan.c
100716@@ -481,7 +481,7 @@ out:
100717 return NOTIFY_DONE;
100718 }
100719
100720-static struct notifier_block vlan_notifier_block __read_mostly = {
100721+static struct notifier_block vlan_notifier_block = {
100722 .notifier_call = vlan_device_event,
100723 };
100724
100725@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100726 err = -EPERM;
100727 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100728 break;
100729- if ((args.u.name_type >= 0) &&
100730- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100731+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100732 struct vlan_net *vn;
100733
100734 vn = net_generic(net, vlan_net_id);
100735diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
100736index 8ac8a5c..991defc 100644
100737--- a/net/8021q/vlan_netlink.c
100738+++ b/net/8021q/vlan_netlink.c
100739@@ -238,7 +238,7 @@ nla_put_failure:
100740 return -EMSGSIZE;
100741 }
100742
100743-struct rtnl_link_ops vlan_link_ops __read_mostly = {
100744+struct rtnl_link_ops vlan_link_ops = {
100745 .kind = "vlan",
100746 .maxtype = IFLA_VLAN_MAX,
100747 .policy = vlan_policy,
100748diff --git a/net/9p/client.c b/net/9p/client.c
100749index e86a9bea..e91f70e 100644
100750--- a/net/9p/client.c
100751+++ b/net/9p/client.c
100752@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100753 len - inline_len);
100754 } else {
100755 err = copy_from_user(ename + inline_len,
100756- uidata, len - inline_len);
100757+ (char __force_user *)uidata, len - inline_len);
100758 if (err) {
100759 err = -EFAULT;
100760 goto out_err;
100761@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100762 kernel_buf = 1;
100763 indata = data;
100764 } else
100765- indata = (__force char *)udata;
100766+ indata = (__force_kernel char *)udata;
100767 /*
100768 * response header len is 11
100769 * PDU Header(7) + IO Size (4)
100770@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100771 kernel_buf = 1;
100772 odata = data;
100773 } else
100774- odata = (char *)udata;
100775+ odata = (char __force_kernel *)udata;
100776 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100777 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100778 fid->fid, offset, rsize);
100779diff --git a/net/9p/mod.c b/net/9p/mod.c
100780index 6ab36ae..6f1841b 100644
100781--- a/net/9p/mod.c
100782+++ b/net/9p/mod.c
100783@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100784 void v9fs_register_trans(struct p9_trans_module *m)
100785 {
100786 spin_lock(&v9fs_trans_lock);
100787- list_add_tail(&m->list, &v9fs_trans_list);
100788+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100789 spin_unlock(&v9fs_trans_lock);
100790 }
100791 EXPORT_SYMBOL(v9fs_register_trans);
100792@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100793 void v9fs_unregister_trans(struct p9_trans_module *m)
100794 {
100795 spin_lock(&v9fs_trans_lock);
100796- list_del_init(&m->list);
100797+ pax_list_del_init((struct list_head *)&m->list);
100798 spin_unlock(&v9fs_trans_lock);
100799 }
100800 EXPORT_SYMBOL(v9fs_unregister_trans);
100801diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100802index 80d08f6..de63fd1 100644
100803--- a/net/9p/trans_fd.c
100804+++ b/net/9p/trans_fd.c
100805@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100806 oldfs = get_fs();
100807 set_fs(get_ds());
100808 /* The cast to a user pointer is valid due to the set_fs() */
100809- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100810+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100811 set_fs(oldfs);
100812
100813 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100814diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100815index af46bc4..f9adfcd 100644
100816--- a/net/appletalk/atalk_proc.c
100817+++ b/net/appletalk/atalk_proc.c
100818@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100819 struct proc_dir_entry *p;
100820 int rc = -ENOMEM;
100821
100822- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100823+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100824 if (!atalk_proc_dir)
100825 goto out;
100826
100827diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100828index 876fbe8..8bbea9f 100644
100829--- a/net/atm/atm_misc.c
100830+++ b/net/atm/atm_misc.c
100831@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100832 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100833 return 1;
100834 atm_return(vcc, truesize);
100835- atomic_inc(&vcc->stats->rx_drop);
100836+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100837 return 0;
100838 }
100839 EXPORT_SYMBOL(atm_charge);
100840@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100841 }
100842 }
100843 atm_return(vcc, guess);
100844- atomic_inc(&vcc->stats->rx_drop);
100845+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100846 return NULL;
100847 }
100848 EXPORT_SYMBOL(atm_alloc_charge);
100849@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100850
100851 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100852 {
100853-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100854+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100855 __SONET_ITEMS
100856 #undef __HANDLE_ITEM
100857 }
100858@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100859
100860 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100861 {
100862-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100863+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100864 __SONET_ITEMS
100865 #undef __HANDLE_ITEM
100866 }
100867diff --git a/net/atm/lec.c b/net/atm/lec.c
100868index 4b98f89..5a2f6cb 100644
100869--- a/net/atm/lec.c
100870+++ b/net/atm/lec.c
100871@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100872 }
100873
100874 static struct lane2_ops lane2_ops = {
100875- lane2_resolve, /* resolve, spec 3.1.3 */
100876- lane2_associate_req, /* associate_req, spec 3.1.4 */
100877- NULL /* associate indicator, spec 3.1.5 */
100878+ .resolve = lane2_resolve,
100879+ .associate_req = lane2_associate_req,
100880+ .associate_indicator = NULL
100881 };
100882
100883 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100884diff --git a/net/atm/lec.h b/net/atm/lec.h
100885index 4149db1..f2ab682 100644
100886--- a/net/atm/lec.h
100887+++ b/net/atm/lec.h
100888@@ -48,7 +48,7 @@ struct lane2_ops {
100889 const u8 *tlvs, u32 sizeoftlvs);
100890 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100891 const u8 *tlvs, u32 sizeoftlvs);
100892-};
100893+} __no_const;
100894
100895 /*
100896 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100897diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100898index d1b2d9a..d549f7f 100644
100899--- a/net/atm/mpoa_caches.c
100900+++ b/net/atm/mpoa_caches.c
100901@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100902
100903
100904 static struct in_cache_ops ingress_ops = {
100905- in_cache_add_entry, /* add_entry */
100906- in_cache_get, /* get */
100907- in_cache_get_with_mask, /* get_with_mask */
100908- in_cache_get_by_vcc, /* get_by_vcc */
100909- in_cache_put, /* put */
100910- in_cache_remove_entry, /* remove_entry */
100911- cache_hit, /* cache_hit */
100912- clear_count_and_expired, /* clear_count */
100913- check_resolving_entries, /* check_resolving */
100914- refresh_entries, /* refresh */
100915- in_destroy_cache /* destroy_cache */
100916+ .add_entry = in_cache_add_entry,
100917+ .get = in_cache_get,
100918+ .get_with_mask = in_cache_get_with_mask,
100919+ .get_by_vcc = in_cache_get_by_vcc,
100920+ .put = in_cache_put,
100921+ .remove_entry = in_cache_remove_entry,
100922+ .cache_hit = cache_hit,
100923+ .clear_count = clear_count_and_expired,
100924+ .check_resolving = check_resolving_entries,
100925+ .refresh = refresh_entries,
100926+ .destroy_cache = in_destroy_cache
100927 };
100928
100929 static struct eg_cache_ops egress_ops = {
100930- eg_cache_add_entry, /* add_entry */
100931- eg_cache_get_by_cache_id, /* get_by_cache_id */
100932- eg_cache_get_by_tag, /* get_by_tag */
100933- eg_cache_get_by_vcc, /* get_by_vcc */
100934- eg_cache_get_by_src_ip, /* get_by_src_ip */
100935- eg_cache_put, /* put */
100936- eg_cache_remove_entry, /* remove_entry */
100937- update_eg_cache_entry, /* update */
100938- clear_expired, /* clear_expired */
100939- eg_destroy_cache /* destroy_cache */
100940+ .add_entry = eg_cache_add_entry,
100941+ .get_by_cache_id = eg_cache_get_by_cache_id,
100942+ .get_by_tag = eg_cache_get_by_tag,
100943+ .get_by_vcc = eg_cache_get_by_vcc,
100944+ .get_by_src_ip = eg_cache_get_by_src_ip,
100945+ .put = eg_cache_put,
100946+ .remove_entry = eg_cache_remove_entry,
100947+ .update = update_eg_cache_entry,
100948+ .clear_expired = clear_expired,
100949+ .destroy_cache = eg_destroy_cache
100950 };
100951
100952
100953diff --git a/net/atm/proc.c b/net/atm/proc.c
100954index bbb6461..cf04016 100644
100955--- a/net/atm/proc.c
100956+++ b/net/atm/proc.c
100957@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100958 const struct k_atm_aal_stats *stats)
100959 {
100960 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100961- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100962- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100963- atomic_read(&stats->rx_drop));
100964+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100965+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100966+ atomic_read_unchecked(&stats->rx_drop));
100967 }
100968
100969 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100970diff --git a/net/atm/resources.c b/net/atm/resources.c
100971index 0447d5d..3cf4728 100644
100972--- a/net/atm/resources.c
100973+++ b/net/atm/resources.c
100974@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100975 static void copy_aal_stats(struct k_atm_aal_stats *from,
100976 struct atm_aal_stats *to)
100977 {
100978-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100979+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100980 __AAL_STAT_ITEMS
100981 #undef __HANDLE_ITEM
100982 }
100983@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100984 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100985 struct atm_aal_stats *to)
100986 {
100987-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100988+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100989 __AAL_STAT_ITEMS
100990 #undef __HANDLE_ITEM
100991 }
100992diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100993index 919a5ce..cc6b444 100644
100994--- a/net/ax25/sysctl_net_ax25.c
100995+++ b/net/ax25/sysctl_net_ax25.c
100996@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100997 {
100998 char path[sizeof("net/ax25/") + IFNAMSIZ];
100999 int k;
101000- struct ctl_table *table;
101001+ ctl_table_no_const *table;
101002
101003 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
101004 if (!table)
101005diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
101006index 1e80539..676c37a 100644
101007--- a/net/batman-adv/bat_iv_ogm.c
101008+++ b/net/batman-adv/bat_iv_ogm.c
101009@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
101010
101011 /* randomize initial seqno to avoid collision */
101012 get_random_bytes(&random_seqno, sizeof(random_seqno));
101013- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101014+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
101015
101016 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
101017 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
101018@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
101019 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
101020
101021 /* change sequence number to network order */
101022- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
101023+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
101024 batadv_ogm_packet->seqno = htonl(seqno);
101025- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
101026+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
101027
101028 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
101029
101030@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
101031 return;
101032
101033 /* could be changed by schedule_own_packet() */
101034- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
101035+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
101036
101037 if (ogm_packet->flags & BATADV_DIRECTLINK)
101038 has_directlink_flag = true;
101039diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
101040index 00f9e14..e1c7203 100644
101041--- a/net/batman-adv/fragmentation.c
101042+++ b/net/batman-adv/fragmentation.c
101043@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
101044 frag_header.packet_type = BATADV_UNICAST_FRAG;
101045 frag_header.version = BATADV_COMPAT_VERSION;
101046 frag_header.ttl = BATADV_TTL;
101047- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
101048+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
101049 frag_header.reserved = 0;
101050 frag_header.no = 0;
101051 frag_header.total_size = htons(skb->len);
101052diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
101053index 5467955..75ad4e3 100644
101054--- a/net/batman-adv/soft-interface.c
101055+++ b/net/batman-adv/soft-interface.c
101056@@ -296,7 +296,7 @@ send:
101057 primary_if->net_dev->dev_addr);
101058
101059 /* set broadcast sequence number */
101060- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
101061+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
101062 bcast_packet->seqno = htonl(seqno);
101063
101064 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
101065@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101066 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
101067
101068 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
101069- atomic_set(&bat_priv->bcast_seqno, 1);
101070+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
101071 atomic_set(&bat_priv->tt.vn, 0);
101072 atomic_set(&bat_priv->tt.local_changes, 0);
101073 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
101074@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
101075
101076 /* randomize initial seqno to avoid collision */
101077 get_random_bytes(&random_seqno, sizeof(random_seqno));
101078- atomic_set(&bat_priv->frag_seqno, random_seqno);
101079+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
101080
101081 bat_priv->primary_if = NULL;
101082 bat_priv->num_ifaces = 0;
101083@@ -983,7 +983,7 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
101084 return 0;
101085 }
101086
101087-struct rtnl_link_ops batadv_link_ops __read_mostly = {
101088+struct rtnl_link_ops batadv_link_ops = {
101089 .kind = "batadv",
101090 .priv_size = sizeof(struct batadv_priv),
101091 .setup = batadv_softif_init_early,
101092diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
101093index 8854c05..ee5d5497 100644
101094--- a/net/batman-adv/types.h
101095+++ b/net/batman-adv/types.h
101096@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
101097 struct batadv_hard_iface_bat_iv {
101098 unsigned char *ogm_buff;
101099 int ogm_buff_len;
101100- atomic_t ogm_seqno;
101101+ atomic_unchecked_t ogm_seqno;
101102 };
101103
101104 /**
101105@@ -768,7 +768,7 @@ struct batadv_priv {
101106 atomic_t bonding;
101107 atomic_t fragmentation;
101108 atomic_t packet_size_max;
101109- atomic_t frag_seqno;
101110+ atomic_unchecked_t frag_seqno;
101111 #ifdef CONFIG_BATMAN_ADV_BLA
101112 atomic_t bridge_loop_avoidance;
101113 #endif
101114@@ -787,7 +787,7 @@ struct batadv_priv {
101115 #endif
101116 uint32_t isolation_mark;
101117 uint32_t isolation_mark_mask;
101118- atomic_t bcast_seqno;
101119+ atomic_unchecked_t bcast_seqno;
101120 atomic_t bcast_queue_left;
101121 atomic_t batman_queue_left;
101122 char num_ifaces;
101123diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
101124index 2c245fd..dccf543 100644
101125--- a/net/bluetooth/hci_sock.c
101126+++ b/net/bluetooth/hci_sock.c
101127@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
101128 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
101129 }
101130
101131- len = min_t(unsigned int, len, sizeof(uf));
101132+ len = min((size_t)len, sizeof(uf));
101133 if (copy_from_user(&uf, optval, len)) {
101134 err = -EFAULT;
101135 break;
101136diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
101137index d04dc00..d25d576 100644
101138--- a/net/bluetooth/l2cap_core.c
101139+++ b/net/bluetooth/l2cap_core.c
101140@@ -3524,8 +3524,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
101141 break;
101142
101143 case L2CAP_CONF_RFC:
101144- if (olen == sizeof(rfc))
101145- memcpy(&rfc, (void *)val, olen);
101146+ if (olen != sizeof(rfc))
101147+ break;
101148+
101149+ memcpy(&rfc, (void *)val, olen);
101150
101151 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
101152 rfc.mode != chan->mode)
101153diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
101154index f65caf4..c07110c 100644
101155--- a/net/bluetooth/l2cap_sock.c
101156+++ b/net/bluetooth/l2cap_sock.c
101157@@ -634,7 +634,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101158 struct sock *sk = sock->sk;
101159 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
101160 struct l2cap_options opts;
101161- int len, err = 0;
101162+ int err = 0;
101163+ size_t len = optlen;
101164 u32 opt;
101165
101166 BT_DBG("sk %p", sk);
101167@@ -661,7 +662,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
101168 opts.max_tx = chan->max_tx;
101169 opts.txwin_size = chan->tx_win;
101170
101171- len = min_t(unsigned int, sizeof(opts), optlen);
101172+ len = min(sizeof(opts), len);
101173 if (copy_from_user((char *) &opts, optval, len)) {
101174 err = -EFAULT;
101175 break;
101176@@ -748,7 +749,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101177 struct bt_security sec;
101178 struct bt_power pwr;
101179 struct l2cap_conn *conn;
101180- int len, err = 0;
101181+ int err = 0;
101182+ size_t len = optlen;
101183 u32 opt;
101184
101185 BT_DBG("sk %p", sk);
101186@@ -772,7 +774,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101187
101188 sec.level = BT_SECURITY_LOW;
101189
101190- len = min_t(unsigned int, sizeof(sec), optlen);
101191+ len = min(sizeof(sec), len);
101192 if (copy_from_user((char *) &sec, optval, len)) {
101193 err = -EFAULT;
101194 break;
101195@@ -868,7 +870,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101196
101197 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101198
101199- len = min_t(unsigned int, sizeof(pwr), optlen);
101200+ len = min(sizeof(pwr), len);
101201 if (copy_from_user((char *) &pwr, optval, len)) {
101202 err = -EFAULT;
101203 break;
101204diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101205index 2348176..b9b6cf2 100644
101206--- a/net/bluetooth/rfcomm/sock.c
101207+++ b/net/bluetooth/rfcomm/sock.c
101208@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101209 struct sock *sk = sock->sk;
101210 struct bt_security sec;
101211 int err = 0;
101212- size_t len;
101213+ size_t len = optlen;
101214 u32 opt;
101215
101216 BT_DBG("sk %p", sk);
101217@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101218
101219 sec.level = BT_SECURITY_LOW;
101220
101221- len = min_t(unsigned int, sizeof(sec), optlen);
101222+ len = min(sizeof(sec), len);
101223 if (copy_from_user((char *) &sec, optval, len)) {
101224 err = -EFAULT;
101225 break;
101226diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101227index 8e385a0..a5bdd8e 100644
101228--- a/net/bluetooth/rfcomm/tty.c
101229+++ b/net/bluetooth/rfcomm/tty.c
101230@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101231 BT_DBG("tty %p id %d", tty, tty->index);
101232
101233 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101234- dev->channel, dev->port.count);
101235+ dev->channel, atomic_read(&dev->port.count));
101236
101237 err = tty_port_open(&dev->port, tty, filp);
101238 if (err)
101239@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101240 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101241
101242 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101243- dev->port.count);
101244+ atomic_read(&dev->port.count));
101245
101246 tty_port_close(&dev->port, tty, filp);
101247 }
101248diff --git a/net/bridge/br.c b/net/bridge/br.c
101249index 44425af..4ee730e 100644
101250--- a/net/bridge/br.c
101251+++ b/net/bridge/br.c
101252@@ -147,6 +147,8 @@ static int __init br_init(void)
101253 {
101254 int err;
101255
101256+ BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
101257+
101258 err = stp_proto_register(&br_stp_proto);
101259 if (err < 0) {
101260 pr_err("bridge: can't register sap for STP\n");
101261diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
101262index 9f5eb55..45ab9c5 100644
101263--- a/net/bridge/br_netlink.c
101264+++ b/net/bridge/br_netlink.c
101265@@ -566,7 +566,7 @@ static struct rtnl_af_ops br_af_ops = {
101266 .get_link_af_size = br_get_link_af_size,
101267 };
101268
101269-struct rtnl_link_ops br_link_ops __read_mostly = {
101270+struct rtnl_link_ops br_link_ops = {
101271 .kind = "bridge",
101272 .priv_size = sizeof(struct net_bridge),
101273 .setup = br_dev_setup,
101274diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101275index d9a8c05..8dadc6c6 100644
101276--- a/net/bridge/netfilter/ebtables.c
101277+++ b/net/bridge/netfilter/ebtables.c
101278@@ -1533,7 +1533,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101279 tmp.valid_hooks = t->table->valid_hooks;
101280 }
101281 mutex_unlock(&ebt_mutex);
101282- if (copy_to_user(user, &tmp, *len) != 0) {
101283+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101284 BUGPRINT("c2u Didn't work\n");
101285 ret = -EFAULT;
101286 break;
101287@@ -2339,7 +2339,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101288 goto out;
101289 tmp.valid_hooks = t->valid_hooks;
101290
101291- if (copy_to_user(user, &tmp, *len) != 0) {
101292+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101293 ret = -EFAULT;
101294 break;
101295 }
101296@@ -2350,7 +2350,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101297 tmp.entries_size = t->table->entries_size;
101298 tmp.valid_hooks = t->table->valid_hooks;
101299
101300- if (copy_to_user(user, &tmp, *len) != 0) {
101301+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101302 ret = -EFAULT;
101303 break;
101304 }
101305diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101306index f5afda1..dcf770a 100644
101307--- a/net/caif/cfctrl.c
101308+++ b/net/caif/cfctrl.c
101309@@ -10,6 +10,7 @@
101310 #include <linux/spinlock.h>
101311 #include <linux/slab.h>
101312 #include <linux/pkt_sched.h>
101313+#include <linux/sched.h>
101314 #include <net/caif/caif_layer.h>
101315 #include <net/caif/cfpkt.h>
101316 #include <net/caif/cfctrl.h>
101317@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101318 memset(&dev_info, 0, sizeof(dev_info));
101319 dev_info.id = 0xff;
101320 cfsrvl_init(&this->serv, 0, &dev_info, false);
101321- atomic_set(&this->req_seq_no, 1);
101322- atomic_set(&this->rsp_seq_no, 1);
101323+ atomic_set_unchecked(&this->req_seq_no, 1);
101324+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101325 this->serv.layer.receive = cfctrl_recv;
101326 sprintf(this->serv.layer.name, "ctrl");
101327 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101328@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101329 struct cfctrl_request_info *req)
101330 {
101331 spin_lock_bh(&ctrl->info_list_lock);
101332- atomic_inc(&ctrl->req_seq_no);
101333- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101334+ atomic_inc_unchecked(&ctrl->req_seq_no);
101335+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101336 list_add_tail(&req->list, &ctrl->list);
101337 spin_unlock_bh(&ctrl->info_list_lock);
101338 }
101339@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101340 if (p != first)
101341 pr_warn("Requests are not received in order\n");
101342
101343- atomic_set(&ctrl->rsp_seq_no,
101344+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101345 p->sequence_no);
101346 list_del(&p->list);
101347 goto out;
101348diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
101349index 67a4a36..8d28068 100644
101350--- a/net/caif/chnl_net.c
101351+++ b/net/caif/chnl_net.c
101352@@ -515,7 +515,7 @@ static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
101353 };
101354
101355
101356-static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
101357+static struct rtnl_link_ops ipcaif_link_ops = {
101358 .kind = "caif",
101359 .priv_size = sizeof(struct chnl_net),
101360 .setup = ipcaif_net_setup,
101361diff --git a/net/can/af_can.c b/net/can/af_can.c
101362index 32d710e..93bcf05 100644
101363--- a/net/can/af_can.c
101364+++ b/net/can/af_can.c
101365@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101366 };
101367
101368 /* notifier block for netdevice event */
101369-static struct notifier_block can_netdev_notifier __read_mostly = {
101370+static struct notifier_block can_netdev_notifier = {
101371 .notifier_call = can_notifier,
101372 };
101373
101374diff --git a/net/can/bcm.c b/net/can/bcm.c
101375index ee9ffd9..dfdf3d4 100644
101376--- a/net/can/bcm.c
101377+++ b/net/can/bcm.c
101378@@ -1619,7 +1619,7 @@ static int __init bcm_module_init(void)
101379 }
101380
101381 /* create /proc/net/can-bcm directory */
101382- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101383+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101384 return 0;
101385 }
101386
101387diff --git a/net/can/gw.c b/net/can/gw.c
101388index 295f62e..0c3b09e 100644
101389--- a/net/can/gw.c
101390+++ b/net/can/gw.c
101391@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101392 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101393
101394 static HLIST_HEAD(cgw_list);
101395-static struct notifier_block notifier;
101396
101397 static struct kmem_cache *cgw_cache __read_mostly;
101398
101399@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101400 return err;
101401 }
101402
101403+static struct notifier_block notifier = {
101404+ .notifier_call = cgw_notifier
101405+};
101406+
101407 static __init int cgw_module_init(void)
101408 {
101409 /* sanitize given module parameter */
101410@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101411 return -ENOMEM;
101412
101413 /* set notifier */
101414- notifier.notifier_call = cgw_notifier;
101415 register_netdevice_notifier(&notifier);
101416
101417 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101418diff --git a/net/can/proc.c b/net/can/proc.c
101419index 1a19b98..df2b4ec 100644
101420--- a/net/can/proc.c
101421+++ b/net/can/proc.c
101422@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101423 void can_init_proc(void)
101424 {
101425 /* create /proc/net/can directory */
101426- can_dir = proc_mkdir("can", init_net.proc_net);
101427+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101428
101429 if (!can_dir) {
101430 printk(KERN_INFO "can: failed to create /proc/net/can . "
101431diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101432index 74d30ec..25df678 100644
101433--- a/net/ceph/messenger.c
101434+++ b/net/ceph/messenger.c
101435@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101436 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101437
101438 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101439-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101440+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101441
101442 static struct page *zero_page; /* used in certain error cases */
101443
101444@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101445 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101446 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101447
101448- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101449+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101450 s = addr_str[i];
101451
101452 switch (ss->ss_family) {
101453diff --git a/net/compat.c b/net/compat.c
101454index f7bd286..76ea56a 100644
101455--- a/net/compat.c
101456+++ b/net/compat.c
101457@@ -100,20 +100,20 @@ ssize_t get_compat_msghdr(struct msghdr *kmsg,
101458
101459 #define CMSG_COMPAT_FIRSTHDR(msg) \
101460 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101461- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101462+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101463 (struct compat_cmsghdr __user *)NULL)
101464
101465 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101466 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101467 (ucmlen) <= (unsigned long) \
101468 ((mhdr)->msg_controllen - \
101469- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101470+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101471
101472 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101473 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101474 {
101475 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101476- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101477+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101478 msg->msg_controllen)
101479 return NULL;
101480 return (struct compat_cmsghdr __user *)ptr;
101481@@ -203,7 +203,7 @@ Efault:
101482
101483 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101484 {
101485- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101486+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101487 struct compat_cmsghdr cmhdr;
101488 struct compat_timeval ctv;
101489 struct compat_timespec cts[3];
101490@@ -259,7 +259,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101491
101492 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101493 {
101494- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101495+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101496 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101497 int fdnum = scm->fp->count;
101498 struct file **fp = scm->fp->fp;
101499@@ -347,7 +347,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101500 return -EFAULT;
101501 old_fs = get_fs();
101502 set_fs(KERNEL_DS);
101503- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101504+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101505 set_fs(old_fs);
101506
101507 return err;
101508@@ -408,7 +408,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101509 len = sizeof(ktime);
101510 old_fs = get_fs();
101511 set_fs(KERNEL_DS);
101512- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101513+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101514 set_fs(old_fs);
101515
101516 if (!err) {
101517@@ -551,7 +551,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101518 case MCAST_JOIN_GROUP:
101519 case MCAST_LEAVE_GROUP:
101520 {
101521- struct compat_group_req __user *gr32 = (void *)optval;
101522+ struct compat_group_req __user *gr32 = (void __user *)optval;
101523 struct group_req __user *kgr =
101524 compat_alloc_user_space(sizeof(struct group_req));
101525 u32 interface;
101526@@ -572,7 +572,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101527 case MCAST_BLOCK_SOURCE:
101528 case MCAST_UNBLOCK_SOURCE:
101529 {
101530- struct compat_group_source_req __user *gsr32 = (void *)optval;
101531+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101532 struct group_source_req __user *kgsr = compat_alloc_user_space(
101533 sizeof(struct group_source_req));
101534 u32 interface;
101535@@ -593,7 +593,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101536 }
101537 case MCAST_MSFILTER:
101538 {
101539- struct compat_group_filter __user *gf32 = (void *)optval;
101540+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101541 struct group_filter __user *kgf;
101542 u32 interface, fmode, numsrc;
101543
101544@@ -631,7 +631,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101545 char __user *optval, int __user *optlen,
101546 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101547 {
101548- struct compat_group_filter __user *gf32 = (void *)optval;
101549+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101550 struct group_filter __user *kgf;
101551 int __user *koptlen;
101552 u32 interface, fmode, numsrc;
101553@@ -775,7 +775,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101554
101555 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101556 return -EINVAL;
101557- if (copy_from_user(a, args, nas[call]))
101558+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101559 return -EFAULT;
101560 a0 = a[0];
101561 a1 = a[1];
101562diff --git a/net/core/datagram.c b/net/core/datagram.c
101563index df493d6..1145766 100644
101564--- a/net/core/datagram.c
101565+++ b/net/core/datagram.c
101566@@ -302,7 +302,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101567 }
101568
101569 kfree_skb(skb);
101570- atomic_inc(&sk->sk_drops);
101571+ atomic_inc_unchecked(&sk->sk_drops);
101572 sk_mem_reclaim_partial(sk);
101573
101574 return err;
101575diff --git a/net/core/dev.c b/net/core/dev.c
101576index 4ff46f8..e877e78 100644
101577--- a/net/core/dev.c
101578+++ b/net/core/dev.c
101579@@ -1680,14 +1680,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101580 {
101581 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101582 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101583- atomic_long_inc(&dev->rx_dropped);
101584+ atomic_long_inc_unchecked(&dev->rx_dropped);
101585 kfree_skb(skb);
101586 return NET_RX_DROP;
101587 }
101588 }
101589
101590 if (unlikely(!is_skb_forwardable(dev, skb))) {
101591- atomic_long_inc(&dev->rx_dropped);
101592+ atomic_long_inc_unchecked(&dev->rx_dropped);
101593 kfree_skb(skb);
101594 return NET_RX_DROP;
101595 }
101596@@ -2958,7 +2958,7 @@ recursion_alert:
101597 drop:
101598 rcu_read_unlock_bh();
101599
101600- atomic_long_inc(&dev->tx_dropped);
101601+ atomic_long_inc_unchecked(&dev->tx_dropped);
101602 kfree_skb_list(skb);
101603 return rc;
101604 out:
101605@@ -3301,7 +3301,7 @@ enqueue:
101606
101607 local_irq_restore(flags);
101608
101609- atomic_long_inc(&skb->dev->rx_dropped);
101610+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101611 kfree_skb(skb);
101612 return NET_RX_DROP;
101613 }
101614@@ -3378,7 +3378,7 @@ int netif_rx_ni(struct sk_buff *skb)
101615 }
101616 EXPORT_SYMBOL(netif_rx_ni);
101617
101618-static void net_tx_action(struct softirq_action *h)
101619+static __latent_entropy void net_tx_action(void)
101620 {
101621 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101622
101623@@ -3711,7 +3711,7 @@ ncls:
101624 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101625 } else {
101626 drop:
101627- atomic_long_inc(&skb->dev->rx_dropped);
101628+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101629 kfree_skb(skb);
101630 /* Jamal, now you will not able to escape explaining
101631 * me how you were going to use this. :-)
101632@@ -4599,7 +4599,7 @@ out_unlock:
101633 return work;
101634 }
101635
101636-static void net_rx_action(struct softirq_action *h)
101637+static __latent_entropy void net_rx_action(void)
101638 {
101639 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
101640 unsigned long time_limit = jiffies + 2;
101641@@ -6610,8 +6610,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101642 } else {
101643 netdev_stats_to_stats64(storage, &dev->stats);
101644 }
101645- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101646- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101647+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101648+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101649 return storage;
101650 }
101651 EXPORT_SYMBOL(dev_get_stats);
101652diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101653index b94b1d2..da3ed7c 100644
101654--- a/net/core/dev_ioctl.c
101655+++ b/net/core/dev_ioctl.c
101656@@ -368,8 +368,13 @@ void dev_load(struct net *net, const char *name)
101657 no_module = !dev;
101658 if (no_module && capable(CAP_NET_ADMIN))
101659 no_module = request_module("netdev-%s", name);
101660- if (no_module && capable(CAP_SYS_MODULE))
101661+ if (no_module && capable(CAP_SYS_MODULE)) {
101662+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101663+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101664+#else
101665 request_module("%s", name);
101666+#endif
101667+ }
101668 }
101669 EXPORT_SYMBOL(dev_load);
101670
101671diff --git a/net/core/filter.c b/net/core/filter.c
101672index ec9baea..dd6195d 100644
101673--- a/net/core/filter.c
101674+++ b/net/core/filter.c
101675@@ -533,7 +533,11 @@ do_pass:
101676
101677 /* Unkown instruction. */
101678 default:
101679- goto err;
101680+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101681+ fp->code, fp->jt, fp->jf, fp->k);
101682+ kfree(addrs);
101683+ BUG();
101684+ return -EINVAL;
101685 }
101686
101687 insn++;
101688@@ -577,7 +581,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101689 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101690 int pc, ret = 0;
101691
101692- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101693+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101694
101695 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101696 if (!masks)
101697@@ -992,7 +996,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101698 if (!fp)
101699 return -ENOMEM;
101700
101701- memcpy(fp->insns, fprog->filter, fsize);
101702+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101703
101704 fp->len = fprog->len;
101705 /* Since unattached filters are not copied back to user
101706diff --git a/net/core/flow.c b/net/core/flow.c
101707index 1033725..340f65d 100644
101708--- a/net/core/flow.c
101709+++ b/net/core/flow.c
101710@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101711 static int flow_entry_valid(struct flow_cache_entry *fle,
101712 struct netns_xfrm *xfrm)
101713 {
101714- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101715+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101716 return 0;
101717 if (fle->object && !fle->object->ops->check(fle->object))
101718 return 0;
101719@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101720 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101721 fcp->hash_count++;
101722 }
101723- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101724+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101725 flo = fle->object;
101726 if (!flo)
101727 goto ret_object;
101728@@ -263,7 +263,7 @@ nocache:
101729 }
101730 flo = resolver(net, key, family, dir, flo, ctx);
101731 if (fle) {
101732- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101733+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101734 if (!IS_ERR(flo))
101735 fle->object = flo;
101736 else
101737diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101738index 8d614c9..55752ea 100644
101739--- a/net/core/neighbour.c
101740+++ b/net/core/neighbour.c
101741@@ -2802,7 +2802,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101742 void __user *buffer, size_t *lenp, loff_t *ppos)
101743 {
101744 int size, ret;
101745- struct ctl_table tmp = *ctl;
101746+ ctl_table_no_const tmp = *ctl;
101747
101748 tmp.extra1 = &zero;
101749 tmp.extra2 = &unres_qlen_max;
101750@@ -2864,7 +2864,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101751 void __user *buffer,
101752 size_t *lenp, loff_t *ppos)
101753 {
101754- struct ctl_table tmp = *ctl;
101755+ ctl_table_no_const tmp = *ctl;
101756 int ret;
101757
101758 tmp.extra1 = &zero;
101759diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101760index 2bf8329..2eb1423 100644
101761--- a/net/core/net-procfs.c
101762+++ b/net/core/net-procfs.c
101763@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101764 struct rtnl_link_stats64 temp;
101765 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101766
101767- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101768+ if (gr_proc_is_restricted())
101769+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101770+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101771+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101772+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101773+ else
101774+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101775 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101776 dev->name, stats->rx_bytes, stats->rx_packets,
101777 stats->rx_errors,
101778@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101779 return 0;
101780 }
101781
101782-static const struct seq_operations dev_seq_ops = {
101783+const struct seq_operations dev_seq_ops = {
101784 .start = dev_seq_start,
101785 .next = dev_seq_next,
101786 .stop = dev_seq_stop,
101787@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101788
101789 static int softnet_seq_open(struct inode *inode, struct file *file)
101790 {
101791- return seq_open(file, &softnet_seq_ops);
101792+ return seq_open_restrict(file, &softnet_seq_ops);
101793 }
101794
101795 static const struct file_operations softnet_seq_fops = {
101796@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101797 else
101798 seq_printf(seq, "%04x", ntohs(pt->type));
101799
101800+#ifdef CONFIG_GRKERNSEC_HIDESYM
101801+ seq_printf(seq, " %-8s %pf\n",
101802+ pt->dev ? pt->dev->name : "", NULL);
101803+#else
101804 seq_printf(seq, " %-8s %pf\n",
101805 pt->dev ? pt->dev->name : "", pt->func);
101806+#endif
101807 }
101808
101809 return 0;
101810diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101811index 9993412..2a4672b 100644
101812--- a/net/core/net-sysfs.c
101813+++ b/net/core/net-sysfs.c
101814@@ -279,7 +279,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101815 {
101816 struct net_device *netdev = to_net_dev(dev);
101817 return sprintf(buf, fmt_dec,
101818- atomic_read(&netdev->carrier_changes));
101819+ atomic_read_unchecked(&netdev->carrier_changes));
101820 }
101821 static DEVICE_ATTR_RO(carrier_changes);
101822
101823diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101824index ce780c7..6d296b3 100644
101825--- a/net/core/net_namespace.c
101826+++ b/net/core/net_namespace.c
101827@@ -448,7 +448,7 @@ static int __register_pernet_operations(struct list_head *list,
101828 int error;
101829 LIST_HEAD(net_exit_list);
101830
101831- list_add_tail(&ops->list, list);
101832+ pax_list_add_tail((struct list_head *)&ops->list, list);
101833 if (ops->init || (ops->id && ops->size)) {
101834 for_each_net(net) {
101835 error = ops_init(ops, net);
101836@@ -461,7 +461,7 @@ static int __register_pernet_operations(struct list_head *list,
101837
101838 out_undo:
101839 /* If I have an error cleanup all namespaces I initialized */
101840- list_del(&ops->list);
101841+ pax_list_del((struct list_head *)&ops->list);
101842 ops_exit_list(ops, &net_exit_list);
101843 ops_free_list(ops, &net_exit_list);
101844 return error;
101845@@ -472,7 +472,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101846 struct net *net;
101847 LIST_HEAD(net_exit_list);
101848
101849- list_del(&ops->list);
101850+ pax_list_del((struct list_head *)&ops->list);
101851 for_each_net(net)
101852 list_add_tail(&net->exit_list, &net_exit_list);
101853 ops_exit_list(ops, &net_exit_list);
101854@@ -606,7 +606,7 @@ int register_pernet_device(struct pernet_operations *ops)
101855 mutex_lock(&net_mutex);
101856 error = register_pernet_operations(&pernet_list, ops);
101857 if (!error && (first_device == &pernet_list))
101858- first_device = &ops->list;
101859+ first_device = (struct list_head *)&ops->list;
101860 mutex_unlock(&net_mutex);
101861 return error;
101862 }
101863diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101864index e0ad5d1..04fa7f7 100644
101865--- a/net/core/netpoll.c
101866+++ b/net/core/netpoll.c
101867@@ -377,7 +377,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101868 struct udphdr *udph;
101869 struct iphdr *iph;
101870 struct ethhdr *eth;
101871- static atomic_t ip_ident;
101872+ static atomic_unchecked_t ip_ident;
101873 struct ipv6hdr *ip6h;
101874
101875 udp_len = len + sizeof(*udph);
101876@@ -448,7 +448,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101877 put_unaligned(0x45, (unsigned char *)iph);
101878 iph->tos = 0;
101879 put_unaligned(htons(ip_len), &(iph->tot_len));
101880- iph->id = htons(atomic_inc_return(&ip_ident));
101881+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101882 iph->frag_off = 0;
101883 iph->ttl = 64;
101884 iph->protocol = IPPROTO_UDP;
101885diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101886index 352d183..1bddfaf 100644
101887--- a/net/core/pktgen.c
101888+++ b/net/core/pktgen.c
101889@@ -3755,7 +3755,7 @@ static int __net_init pg_net_init(struct net *net)
101890 pn->net = net;
101891 INIT_LIST_HEAD(&pn->pktgen_threads);
101892 pn->pktgen_exiting = false;
101893- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101894+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101895 if (!pn->proc_dir) {
101896 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101897 return -ENODEV;
101898diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101899index 76ec6c5..9cfb81c 100644
101900--- a/net/core/rtnetlink.c
101901+++ b/net/core/rtnetlink.c
101902@@ -60,7 +60,7 @@ struct rtnl_link {
101903 rtnl_doit_func doit;
101904 rtnl_dumpit_func dumpit;
101905 rtnl_calcit_func calcit;
101906-};
101907+} __no_const;
101908
101909 static DEFINE_MUTEX(rtnl_mutex);
101910
101911@@ -306,10 +306,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101912 * to use the ops for creating device. So do not
101913 * fill up dellink as well. That disables rtnl_dellink.
101914 */
101915- if (ops->setup && !ops->dellink)
101916- ops->dellink = unregister_netdevice_queue;
101917+ if (ops->setup && !ops->dellink) {
101918+ pax_open_kernel();
101919+ *(void **)&ops->dellink = unregister_netdevice_queue;
101920+ pax_close_kernel();
101921+ }
101922
101923- list_add_tail(&ops->list, &link_ops);
101924+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101925 return 0;
101926 }
101927 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101928@@ -356,7 +359,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101929 for_each_net(net) {
101930 __rtnl_kill_links(net, ops);
101931 }
101932- list_del(&ops->list);
101933+ pax_list_del((struct list_head *)&ops->list);
101934 }
101935 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101936
101937@@ -1035,7 +1038,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101938 (dev->ifalias &&
101939 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101940 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101941- atomic_read(&dev->carrier_changes)))
101942+ atomic_read_unchecked(&dev->carrier_changes)))
101943 goto nla_put_failure;
101944
101945 if (1) {
101946@@ -2094,6 +2097,10 @@ replay:
101947 if (IS_ERR(dest_net))
101948 return PTR_ERR(dest_net);
101949
101950+ err = -EPERM;
101951+ if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
101952+ goto out;
101953+
101954 dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
101955 if (IS_ERR(dev)) {
101956 err = PTR_ERR(dev);
101957diff --git a/net/core/scm.c b/net/core/scm.c
101958index 3b6899b..cf36238 100644
101959--- a/net/core/scm.c
101960+++ b/net/core/scm.c
101961@@ -209,7 +209,7 @@ EXPORT_SYMBOL(__scm_send);
101962 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101963 {
101964 struct cmsghdr __user *cm
101965- = (__force struct cmsghdr __user *)msg->msg_control;
101966+ = (struct cmsghdr __force_user *)msg->msg_control;
101967 struct cmsghdr cmhdr;
101968 int cmlen = CMSG_LEN(len);
101969 int err;
101970@@ -232,7 +232,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101971 err = -EFAULT;
101972 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101973 goto out;
101974- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101975+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101976 goto out;
101977 cmlen = CMSG_SPACE(len);
101978 if (msg->msg_controllen < cmlen)
101979@@ -248,7 +248,7 @@ EXPORT_SYMBOL(put_cmsg);
101980 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101981 {
101982 struct cmsghdr __user *cm
101983- = (__force struct cmsghdr __user*)msg->msg_control;
101984+ = (struct cmsghdr __force_user *)msg->msg_control;
101985
101986 int fdmax = 0;
101987 int fdnum = scm->fp->count;
101988@@ -268,7 +268,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101989 if (fdnum < fdmax)
101990 fdmax = fdnum;
101991
101992- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101993+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101994 i++, cmfptr++)
101995 {
101996 struct socket *sock;
101997diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101998index 62c67be..01893a0a 100644
101999--- a/net/core/skbuff.c
102000+++ b/net/core/skbuff.c
102001@@ -2123,7 +2123,7 @@ EXPORT_SYMBOL(__skb_checksum);
102002 __wsum skb_checksum(const struct sk_buff *skb, int offset,
102003 int len, __wsum csum)
102004 {
102005- const struct skb_checksum_ops ops = {
102006+ static const struct skb_checksum_ops ops = {
102007 .update = csum_partial_ext,
102008 .combine = csum_block_add_ext,
102009 };
102010@@ -3363,12 +3363,14 @@ void __init skb_init(void)
102011 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
102012 sizeof(struct sk_buff),
102013 0,
102014- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102015+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102016+ SLAB_NO_SANITIZE,
102017 NULL);
102018 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
102019 sizeof(struct sk_buff_fclones),
102020 0,
102021- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
102022+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
102023+ SLAB_NO_SANITIZE,
102024 NULL);
102025 }
102026
102027diff --git a/net/core/sock.c b/net/core/sock.c
102028index 1c7a33d..a3817e2 100644
102029--- a/net/core/sock.c
102030+++ b/net/core/sock.c
102031@@ -441,7 +441,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102032 struct sk_buff_head *list = &sk->sk_receive_queue;
102033
102034 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
102035- atomic_inc(&sk->sk_drops);
102036+ atomic_inc_unchecked(&sk->sk_drops);
102037 trace_sock_rcvqueue_full(sk, skb);
102038 return -ENOMEM;
102039 }
102040@@ -451,7 +451,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102041 return err;
102042
102043 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
102044- atomic_inc(&sk->sk_drops);
102045+ atomic_inc_unchecked(&sk->sk_drops);
102046 return -ENOBUFS;
102047 }
102048
102049@@ -464,7 +464,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
102050 skb_dst_force(skb);
102051
102052 spin_lock_irqsave(&list->lock, flags);
102053- skb->dropcount = atomic_read(&sk->sk_drops);
102054+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
102055 __skb_queue_tail(list, skb);
102056 spin_unlock_irqrestore(&list->lock, flags);
102057
102058@@ -484,7 +484,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102059 skb->dev = NULL;
102060
102061 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
102062- atomic_inc(&sk->sk_drops);
102063+ atomic_inc_unchecked(&sk->sk_drops);
102064 goto discard_and_relse;
102065 }
102066 if (nested)
102067@@ -502,7 +502,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
102068 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
102069 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
102070 bh_unlock_sock(sk);
102071- atomic_inc(&sk->sk_drops);
102072+ atomic_inc_unchecked(&sk->sk_drops);
102073 goto discard_and_relse;
102074 }
102075
102076@@ -888,6 +888,7 @@ set_rcvbuf:
102077 }
102078 break;
102079
102080+#ifndef GRKERNSEC_BPF_HARDEN
102081 case SO_ATTACH_BPF:
102082 ret = -EINVAL;
102083 if (optlen == sizeof(u32)) {
102084@@ -900,7 +901,7 @@ set_rcvbuf:
102085 ret = sk_attach_bpf(ufd, sk);
102086 }
102087 break;
102088-
102089+#endif
102090 case SO_DETACH_FILTER:
102091 ret = sk_detach_filter(sk);
102092 break;
102093@@ -1004,12 +1005,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102094 struct timeval tm;
102095 } v;
102096
102097- int lv = sizeof(int);
102098- int len;
102099+ unsigned int lv = sizeof(int);
102100+ unsigned int len;
102101
102102 if (get_user(len, optlen))
102103 return -EFAULT;
102104- if (len < 0)
102105+ if (len > INT_MAX)
102106 return -EINVAL;
102107
102108 memset(&v, 0, sizeof(v));
102109@@ -1147,11 +1148,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102110
102111 case SO_PEERNAME:
102112 {
102113- char address[128];
102114+ char address[_K_SS_MAXSIZE];
102115
102116 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102117 return -ENOTCONN;
102118- if (lv < len)
102119+ if (lv < len || sizeof address < len)
102120 return -EINVAL;
102121 if (copy_to_user(optval, address, len))
102122 return -EFAULT;
102123@@ -1236,7 +1237,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102124
102125 if (len > lv)
102126 len = lv;
102127- if (copy_to_user(optval, &v, len))
102128+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102129 return -EFAULT;
102130 lenout:
102131 if (put_user(len, optlen))
102132@@ -2349,7 +2350,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102133 */
102134 smp_wmb();
102135 atomic_set(&sk->sk_refcnt, 1);
102136- atomic_set(&sk->sk_drops, 0);
102137+ atomic_set_unchecked(&sk->sk_drops, 0);
102138 }
102139 EXPORT_SYMBOL(sock_init_data);
102140
102141@@ -2477,6 +2478,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102142 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102143 int level, int type)
102144 {
102145+ struct sock_extended_err ee;
102146 struct sock_exterr_skb *serr;
102147 struct sk_buff *skb;
102148 int copied, err;
102149@@ -2498,7 +2500,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102150 sock_recv_timestamp(msg, sk, skb);
102151
102152 serr = SKB_EXT_ERR(skb);
102153- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102154+ ee = serr->ee;
102155+ put_cmsg(msg, level, type, sizeof ee, &ee);
102156
102157 msg->msg_flags |= MSG_ERRQUEUE;
102158 err = copied;
102159diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102160index ad704c7..ca48aff 100644
102161--- a/net/core/sock_diag.c
102162+++ b/net/core/sock_diag.c
102163@@ -9,26 +9,33 @@
102164 #include <linux/inet_diag.h>
102165 #include <linux/sock_diag.h>
102166
102167-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102168+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102169 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102170 static DEFINE_MUTEX(sock_diag_table_mutex);
102171
102172 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102173 {
102174+#ifndef CONFIG_GRKERNSEC_HIDESYM
102175 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102176 cookie[1] != INET_DIAG_NOCOOKIE) &&
102177 ((u32)(unsigned long)sk != cookie[0] ||
102178 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102179 return -ESTALE;
102180 else
102181+#endif
102182 return 0;
102183 }
102184 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102185
102186 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102187 {
102188+#ifdef CONFIG_GRKERNSEC_HIDESYM
102189+ cookie[0] = 0;
102190+ cookie[1] = 0;
102191+#else
102192 cookie[0] = (u32)(unsigned long)sk;
102193 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102194+#endif
102195 }
102196 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102197
102198@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102199 mutex_lock(&sock_diag_table_mutex);
102200 if (sock_diag_handlers[hndl->family])
102201 err = -EBUSY;
102202- else
102203+ else {
102204+ pax_open_kernel();
102205 sock_diag_handlers[hndl->family] = hndl;
102206+ pax_close_kernel();
102207+ }
102208 mutex_unlock(&sock_diag_table_mutex);
102209
102210 return err;
102211@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102212
102213 mutex_lock(&sock_diag_table_mutex);
102214 BUG_ON(sock_diag_handlers[family] != hnld);
102215+ pax_open_kernel();
102216 sock_diag_handlers[family] = NULL;
102217+ pax_close_kernel();
102218 mutex_unlock(&sock_diag_table_mutex);
102219 }
102220 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102221diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102222index bbb1d5a..754e2e5 100644
102223--- a/net/core/sysctl_net_core.c
102224+++ b/net/core/sysctl_net_core.c
102225@@ -36,7 +36,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102226 {
102227 unsigned int orig_size, size;
102228 int ret, i;
102229- struct ctl_table tmp = {
102230+ ctl_table_no_const tmp = {
102231 .data = &size,
102232 .maxlen = sizeof(size),
102233 .mode = table->mode
102234@@ -204,7 +204,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102235 void __user *buffer, size_t *lenp, loff_t *ppos)
102236 {
102237 char id[IFNAMSIZ];
102238- struct ctl_table tbl = {
102239+ ctl_table_no_const tbl = {
102240 .data = id,
102241 .maxlen = IFNAMSIZ,
102242 };
102243@@ -222,7 +222,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102244 static int proc_do_rss_key(struct ctl_table *table, int write,
102245 void __user *buffer, size_t *lenp, loff_t *ppos)
102246 {
102247- struct ctl_table fake_table;
102248+ ctl_table_no_const fake_table;
102249 char buf[NETDEV_RSS_KEY_LEN * 3];
102250
102251 snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
102252@@ -286,7 +286,7 @@ static struct ctl_table net_core_table[] = {
102253 .mode = 0444,
102254 .proc_handler = proc_do_rss_key,
102255 },
102256-#ifdef CONFIG_BPF_JIT
102257+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102258 {
102259 .procname = "bpf_jit_enable",
102260 .data = &bpf_jit_enable,
102261@@ -402,13 +402,12 @@ static struct ctl_table netns_core_table[] = {
102262
102263 static __net_init int sysctl_core_net_init(struct net *net)
102264 {
102265- struct ctl_table *tbl;
102266+ ctl_table_no_const *tbl = NULL;
102267
102268 net->core.sysctl_somaxconn = SOMAXCONN;
102269
102270- tbl = netns_core_table;
102271 if (!net_eq(net, &init_net)) {
102272- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102273+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102274 if (tbl == NULL)
102275 goto err_dup;
102276
102277@@ -418,17 +417,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102278 if (net->user_ns != &init_user_ns) {
102279 tbl[0].procname = NULL;
102280 }
102281- }
102282-
102283- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102284+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102285+ } else
102286+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102287 if (net->core.sysctl_hdr == NULL)
102288 goto err_reg;
102289
102290 return 0;
102291
102292 err_reg:
102293- if (tbl != netns_core_table)
102294- kfree(tbl);
102295+ kfree(tbl);
102296 err_dup:
102297 return -ENOMEM;
102298 }
102299@@ -443,7 +441,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102300 kfree(tbl);
102301 }
102302
102303-static __net_initdata struct pernet_operations sysctl_core_ops = {
102304+static __net_initconst struct pernet_operations sysctl_core_ops = {
102305 .init = sysctl_core_net_init,
102306 .exit = sysctl_core_net_exit,
102307 };
102308diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102309index 8102286..a0c2755 100644
102310--- a/net/decnet/af_decnet.c
102311+++ b/net/decnet/af_decnet.c
102312@@ -466,6 +466,7 @@ static struct proto dn_proto = {
102313 .sysctl_rmem = sysctl_decnet_rmem,
102314 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102315 .obj_size = sizeof(struct dn_sock),
102316+ .slab_flags = SLAB_USERCOPY,
102317 };
102318
102319 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102320diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102321index 4400da7..3429972 100644
102322--- a/net/decnet/dn_dev.c
102323+++ b/net/decnet/dn_dev.c
102324@@ -201,7 +201,7 @@ static struct dn_dev_sysctl_table {
102325 .extra1 = &min_t3,
102326 .extra2 = &max_t3
102327 },
102328- {0}
102329+ { }
102330 },
102331 };
102332
102333diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102334index 5325b54..a0d4d69 100644
102335--- a/net/decnet/sysctl_net_decnet.c
102336+++ b/net/decnet/sysctl_net_decnet.c
102337@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102338
102339 if (len > *lenp) len = *lenp;
102340
102341- if (copy_to_user(buffer, addr, len))
102342+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102343 return -EFAULT;
102344
102345 *lenp = len;
102346@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102347
102348 if (len > *lenp) len = *lenp;
102349
102350- if (copy_to_user(buffer, devname, len))
102351+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102352 return -EFAULT;
102353
102354 *lenp = len;
102355diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
102356index a2c7e4c..3dc9f67 100644
102357--- a/net/hsr/hsr_netlink.c
102358+++ b/net/hsr/hsr_netlink.c
102359@@ -102,7 +102,7 @@ nla_put_failure:
102360 return -EMSGSIZE;
102361 }
102362
102363-static struct rtnl_link_ops hsr_link_ops __read_mostly = {
102364+static struct rtnl_link_ops hsr_link_ops = {
102365 .kind = "hsr",
102366 .maxtype = IFLA_HSR_MAX,
102367 .policy = hsr_policy,
102368diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
102369index 27eaa65..7083217 100644
102370--- a/net/ieee802154/6lowpan_rtnl.c
102371+++ b/net/ieee802154/6lowpan_rtnl.c
102372@@ -642,7 +642,7 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
102373 dev_put(real_dev);
102374 }
102375
102376-static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
102377+static struct rtnl_link_ops lowpan_link_ops = {
102378 .kind = "lowpan",
102379 .priv_size = sizeof(struct lowpan_dev_info),
102380 .setup = lowpan_setup,
102381diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102382index 9d980ed..7d01e12 100644
102383--- a/net/ieee802154/reassembly.c
102384+++ b/net/ieee802154/reassembly.c
102385@@ -435,14 +435,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102386
102387 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102388 {
102389- struct ctl_table *table;
102390+ ctl_table_no_const *table = NULL;
102391 struct ctl_table_header *hdr;
102392 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102393 net_ieee802154_lowpan(net);
102394
102395- table = lowpan_frags_ns_ctl_table;
102396 if (!net_eq(net, &init_net)) {
102397- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102398+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102399 GFP_KERNEL);
102400 if (table == NULL)
102401 goto err_alloc;
102402@@ -457,9 +456,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102403 /* Don't export sysctls to unprivileged users */
102404 if (net->user_ns != &init_user_ns)
102405 table[0].procname = NULL;
102406- }
102407-
102408- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102409+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102410+ } else
102411+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102412 if (hdr == NULL)
102413 goto err_reg;
102414
102415@@ -467,8 +466,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102416 return 0;
102417
102418 err_reg:
102419- if (!net_eq(net, &init_net))
102420- kfree(table);
102421+ kfree(table);
102422 err_alloc:
102423 return -ENOMEM;
102424 }
102425diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
102426index a44773c..a6ae415 100644
102427--- a/net/ipv4/af_inet.c
102428+++ b/net/ipv4/af_inet.c
102429@@ -1392,7 +1392,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
102430 return ip_recv_error(sk, msg, len, addr_len);
102431 #if IS_ENABLED(CONFIG_IPV6)
102432 if (sk->sk_family == AF_INET6)
102433- return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
102434+ return pingv6_ops->ipv6_recv_error(sk, msg, len, addr_len);
102435 #endif
102436 return -EINVAL;
102437 }
102438diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102439index 214882e..ec032f6 100644
102440--- a/net/ipv4/devinet.c
102441+++ b/net/ipv4/devinet.c
102442@@ -69,7 +69,8 @@
102443
102444 static struct ipv4_devconf ipv4_devconf = {
102445 .data = {
102446- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102447+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102448+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102449 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102450 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102451 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102452@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102453
102454 static struct ipv4_devconf ipv4_devconf_dflt = {
102455 .data = {
102456- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102457+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102458+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102459 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102460 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102461 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102462@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102463 idx = 0;
102464 head = &net->dev_index_head[h];
102465 rcu_read_lock();
102466- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102467+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102468 net->dev_base_seq;
102469 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102470 if (idx < s_idx)
102471@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102472 idx = 0;
102473 head = &net->dev_index_head[h];
102474 rcu_read_lock();
102475- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102476+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102477 net->dev_base_seq;
102478 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102479 if (idx < s_idx)
102480@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102481 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102482 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102483
102484-static struct devinet_sysctl_table {
102485+static const struct devinet_sysctl_table {
102486 struct ctl_table_header *sysctl_header;
102487 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102488 } devinet_sysctl = {
102489@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102490 int err;
102491 struct ipv4_devconf *all, *dflt;
102492 #ifdef CONFIG_SYSCTL
102493- struct ctl_table *tbl = ctl_forward_entry;
102494+ ctl_table_no_const *tbl = NULL;
102495 struct ctl_table_header *forw_hdr;
102496 #endif
102497
102498@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102499 goto err_alloc_dflt;
102500
102501 #ifdef CONFIG_SYSCTL
102502- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102503+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102504 if (tbl == NULL)
102505 goto err_alloc_ctl;
102506
102507@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102508 goto err_reg_dflt;
102509
102510 err = -ENOMEM;
102511- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102512+ if (!net_eq(net, &init_net))
102513+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102514+ else
102515+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102516 if (forw_hdr == NULL)
102517 goto err_reg_ctl;
102518 net->ipv4.forw_hdr = forw_hdr;
102519@@ -2287,8 +2292,7 @@ err_reg_ctl:
102520 err_reg_dflt:
102521 __devinet_sysctl_unregister(all);
102522 err_reg_all:
102523- if (tbl != ctl_forward_entry)
102524- kfree(tbl);
102525+ kfree(tbl);
102526 err_alloc_ctl:
102527 #endif
102528 if (dflt != &ipv4_devconf_dflt)
102529diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102530index 23104a3..9f5570b 100644
102531--- a/net/ipv4/fib_frontend.c
102532+++ b/net/ipv4/fib_frontend.c
102533@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102534 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102535 fib_sync_up(dev);
102536 #endif
102537- atomic_inc(&net->ipv4.dev_addr_genid);
102538+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102539 rt_cache_flush(dev_net(dev));
102540 break;
102541 case NETDEV_DOWN:
102542 fib_del_ifaddr(ifa, NULL);
102543- atomic_inc(&net->ipv4.dev_addr_genid);
102544+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102545 if (ifa->ifa_dev->ifa_list == NULL) {
102546 /* Last address was deleted from this interface.
102547 * Disable IP.
102548@@ -1060,7 +1060,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102549 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102550 fib_sync_up(dev);
102551 #endif
102552- atomic_inc(&net->ipv4.dev_addr_genid);
102553+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102554 rt_cache_flush(net);
102555 break;
102556 case NETDEV_DOWN:
102557diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102558index f99f41b..1879da9 100644
102559--- a/net/ipv4/fib_semantics.c
102560+++ b/net/ipv4/fib_semantics.c
102561@@ -770,7 +770,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102562 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102563 nh->nh_gw,
102564 nh->nh_parent->fib_scope);
102565- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102566+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102567
102568 return nh->nh_saddr;
102569 }
102570diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
102571index b986298..7e726da 100644
102572--- a/net/ipv4/fou.c
102573+++ b/net/ipv4/fou.c
102574@@ -765,12 +765,12 @@ EXPORT_SYMBOL(gue_build_header);
102575
102576 #ifdef CONFIG_NET_FOU_IP_TUNNELS
102577
102578-static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = {
102579+static const struct ip_tunnel_encap_ops fou_iptun_ops = {
102580 .encap_hlen = fou_encap_hlen,
102581 .build_header = fou_build_header,
102582 };
102583
102584-static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = {
102585+static const struct ip_tunnel_encap_ops gue_iptun_ops = {
102586 .encap_hlen = gue_encap_hlen,
102587 .build_header = gue_build_header,
102588 };
102589diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102590index 9111a4e..3576905 100644
102591--- a/net/ipv4/inet_hashtables.c
102592+++ b/net/ipv4/inet_hashtables.c
102593@@ -18,6 +18,7 @@
102594 #include <linux/sched.h>
102595 #include <linux/slab.h>
102596 #include <linux/wait.h>
102597+#include <linux/security.h>
102598
102599 #include <net/inet_connection_sock.h>
102600 #include <net/inet_hashtables.h>
102601@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102602 return inet_ehashfn(net, laddr, lport, faddr, fport);
102603 }
102604
102605+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102606+
102607 /*
102608 * Allocate and initialize a new local port bind bucket.
102609 * The bindhash mutex for snum's hash chain must be held here.
102610@@ -554,6 +557,8 @@ ok:
102611 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102612 spin_unlock(&head->lock);
102613
102614+ gr_update_task_in_ip_table(inet_sk(sk));
102615+
102616 if (tw) {
102617 inet_twsk_deschedule(tw, death_row);
102618 while (twrefcnt) {
102619diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102620index 241afd7..31b95d5 100644
102621--- a/net/ipv4/inetpeer.c
102622+++ b/net/ipv4/inetpeer.c
102623@@ -461,7 +461,7 @@ relookup:
102624 if (p) {
102625 p->daddr = *daddr;
102626 atomic_set(&p->refcnt, 1);
102627- atomic_set(&p->rid, 0);
102628+ atomic_set_unchecked(&p->rid, 0);
102629 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102630 p->rate_tokens = 0;
102631 /* 60*HZ is arbitrary, but chosen enough high so that the first
102632diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102633index 145a50c..5dd8cc5 100644
102634--- a/net/ipv4/ip_fragment.c
102635+++ b/net/ipv4/ip_fragment.c
102636@@ -268,7 +268,7 @@ static int ip_frag_too_far(struct ipq *qp)
102637 return 0;
102638
102639 start = qp->rid;
102640- end = atomic_inc_return(&peer->rid);
102641+ end = atomic_inc_return_unchecked(&peer->rid);
102642 qp->rid = end;
102643
102644 rc = qp->q.fragments && (end - start) > max;
102645@@ -748,12 +748,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102646
102647 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102648 {
102649- struct ctl_table *table;
102650+ ctl_table_no_const *table = NULL;
102651 struct ctl_table_header *hdr;
102652
102653- table = ip4_frags_ns_ctl_table;
102654 if (!net_eq(net, &init_net)) {
102655- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102656+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102657 if (table == NULL)
102658 goto err_alloc;
102659
102660@@ -767,9 +766,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102661 /* Don't export sysctls to unprivileged users */
102662 if (net->user_ns != &init_user_ns)
102663 table[0].procname = NULL;
102664- }
102665+ hdr = register_net_sysctl(net, "net/ipv4", table);
102666+ } else
102667+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102668
102669- hdr = register_net_sysctl(net, "net/ipv4", table);
102670 if (hdr == NULL)
102671 goto err_reg;
102672
102673@@ -777,8 +777,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102674 return 0;
102675
102676 err_reg:
102677- if (!net_eq(net, &init_net))
102678- kfree(table);
102679+ kfree(table);
102680 err_alloc:
102681 return -ENOMEM;
102682 }
102683diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102684index 4f4bf5b..2c936fe 100644
102685--- a/net/ipv4/ip_gre.c
102686+++ b/net/ipv4/ip_gre.c
102687@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102688 module_param(log_ecn_error, bool, 0644);
102689 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102690
102691-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102692+static struct rtnl_link_ops ipgre_link_ops;
102693 static int ipgre_tunnel_init(struct net_device *dev);
102694
102695 static int ipgre_net_id __read_mostly;
102696@@ -816,7 +816,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102697 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
102698 };
102699
102700-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102701+static struct rtnl_link_ops ipgre_link_ops = {
102702 .kind = "gre",
102703 .maxtype = IFLA_GRE_MAX,
102704 .policy = ipgre_policy,
102705@@ -830,7 +830,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102706 .fill_info = ipgre_fill_info,
102707 };
102708
102709-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102710+static struct rtnl_link_ops ipgre_tap_ops = {
102711 .kind = "gretap",
102712 .maxtype = IFLA_GRE_MAX,
102713 .policy = ipgre_policy,
102714diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102715index 3d4da2c..40f9c29 100644
102716--- a/net/ipv4/ip_input.c
102717+++ b/net/ipv4/ip_input.c
102718@@ -147,6 +147,10 @@
102719 #include <linux/mroute.h>
102720 #include <linux/netlink.h>
102721
102722+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102723+extern int grsec_enable_blackhole;
102724+#endif
102725+
102726 /*
102727 * Process Router Attention IP option (RFC 2113)
102728 */
102729@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102730 if (!raw) {
102731 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102732 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102733+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102734+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102735+#endif
102736 icmp_send(skb, ICMP_DEST_UNREACH,
102737 ICMP_PROT_UNREACH, 0);
102738 }
102739diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102740index 6b85adb..cd7e5d3 100644
102741--- a/net/ipv4/ip_sockglue.c
102742+++ b/net/ipv4/ip_sockglue.c
102743@@ -1193,7 +1193,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102744 len = min_t(unsigned int, len, opt->optlen);
102745 if (put_user(len, optlen))
102746 return -EFAULT;
102747- if (copy_to_user(optval, opt->__data, len))
102748+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102749+ copy_to_user(optval, opt->__data, len))
102750 return -EFAULT;
102751 return 0;
102752 }
102753@@ -1324,7 +1325,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102754 if (sk->sk_type != SOCK_STREAM)
102755 return -ENOPROTOOPT;
102756
102757- msg.msg_control = (__force void *) optval;
102758+ msg.msg_control = (__force_kernel void *) optval;
102759 msg.msg_controllen = len;
102760 msg.msg_flags = flags;
102761
102762diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102763index 1a7e979..fd05aa4 100644
102764--- a/net/ipv4/ip_vti.c
102765+++ b/net/ipv4/ip_vti.c
102766@@ -45,7 +45,7 @@
102767 #include <net/net_namespace.h>
102768 #include <net/netns/generic.h>
102769
102770-static struct rtnl_link_ops vti_link_ops __read_mostly;
102771+static struct rtnl_link_ops vti_link_ops;
102772
102773 static int vti_net_id __read_mostly;
102774 static int vti_tunnel_init(struct net_device *dev);
102775@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102776 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102777 };
102778
102779-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102780+static struct rtnl_link_ops vti_link_ops = {
102781 .kind = "vti",
102782 .maxtype = IFLA_VTI_MAX,
102783 .policy = vti_policy,
102784diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102785index 7fa18bc..bea16af 100644
102786--- a/net/ipv4/ipconfig.c
102787+++ b/net/ipv4/ipconfig.c
102788@@ -333,7 +333,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102789
102790 mm_segment_t oldfs = get_fs();
102791 set_fs(get_ds());
102792- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102793+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102794 set_fs(oldfs);
102795 return res;
102796 }
102797@@ -344,7 +344,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102798
102799 mm_segment_t oldfs = get_fs();
102800 set_fs(get_ds());
102801- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102802+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102803 set_fs(oldfs);
102804 return res;
102805 }
102806@@ -355,7 +355,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102807
102808 mm_segment_t oldfs = get_fs();
102809 set_fs(get_ds());
102810- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102811+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102812 set_fs(oldfs);
102813 return res;
102814 }
102815diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102816index 40403114..c35c647 100644
102817--- a/net/ipv4/ipip.c
102818+++ b/net/ipv4/ipip.c
102819@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102820 static int ipip_net_id __read_mostly;
102821
102822 static int ipip_tunnel_init(struct net_device *dev);
102823-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102824+static struct rtnl_link_ops ipip_link_ops;
102825
102826 static int ipip_err(struct sk_buff *skb, u32 info)
102827 {
102828@@ -487,7 +487,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102829 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
102830 };
102831
102832-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102833+static struct rtnl_link_ops ipip_link_ops = {
102834 .kind = "ipip",
102835 .maxtype = IFLA_IPTUN_MAX,
102836 .policy = ipip_policy,
102837diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102838index f95b6f9..2ee2097 100644
102839--- a/net/ipv4/netfilter/arp_tables.c
102840+++ b/net/ipv4/netfilter/arp_tables.c
102841@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102842 #endif
102843
102844 static int get_info(struct net *net, void __user *user,
102845- const int *len, int compat)
102846+ int len, int compat)
102847 {
102848 char name[XT_TABLE_MAXNAMELEN];
102849 struct xt_table *t;
102850 int ret;
102851
102852- if (*len != sizeof(struct arpt_getinfo)) {
102853- duprintf("length %u != %Zu\n", *len,
102854+ if (len != sizeof(struct arpt_getinfo)) {
102855+ duprintf("length %u != %Zu\n", len,
102856 sizeof(struct arpt_getinfo));
102857 return -EINVAL;
102858 }
102859@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102860 info.size = private->size;
102861 strcpy(info.name, name);
102862
102863- if (copy_to_user(user, &info, *len) != 0)
102864+ if (copy_to_user(user, &info, len) != 0)
102865 ret = -EFAULT;
102866 else
102867 ret = 0;
102868@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102869
102870 switch (cmd) {
102871 case ARPT_SO_GET_INFO:
102872- ret = get_info(sock_net(sk), user, len, 1);
102873+ ret = get_info(sock_net(sk), user, *len, 1);
102874 break;
102875 case ARPT_SO_GET_ENTRIES:
102876 ret = compat_get_entries(sock_net(sk), user, len);
102877@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102878
102879 switch (cmd) {
102880 case ARPT_SO_GET_INFO:
102881- ret = get_info(sock_net(sk), user, len, 0);
102882+ ret = get_info(sock_net(sk), user, *len, 0);
102883 break;
102884
102885 case ARPT_SO_GET_ENTRIES:
102886diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102887index 99e810f..3711b81 100644
102888--- a/net/ipv4/netfilter/ip_tables.c
102889+++ b/net/ipv4/netfilter/ip_tables.c
102890@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102891 #endif
102892
102893 static int get_info(struct net *net, void __user *user,
102894- const int *len, int compat)
102895+ int len, int compat)
102896 {
102897 char name[XT_TABLE_MAXNAMELEN];
102898 struct xt_table *t;
102899 int ret;
102900
102901- if (*len != sizeof(struct ipt_getinfo)) {
102902- duprintf("length %u != %zu\n", *len,
102903+ if (len != sizeof(struct ipt_getinfo)) {
102904+ duprintf("length %u != %zu\n", len,
102905 sizeof(struct ipt_getinfo));
102906 return -EINVAL;
102907 }
102908@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102909 info.size = private->size;
102910 strcpy(info.name, name);
102911
102912- if (copy_to_user(user, &info, *len) != 0)
102913+ if (copy_to_user(user, &info, len) != 0)
102914 ret = -EFAULT;
102915 else
102916 ret = 0;
102917@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102918
102919 switch (cmd) {
102920 case IPT_SO_GET_INFO:
102921- ret = get_info(sock_net(sk), user, len, 1);
102922+ ret = get_info(sock_net(sk), user, *len, 1);
102923 break;
102924 case IPT_SO_GET_ENTRIES:
102925 ret = compat_get_entries(sock_net(sk), user, len);
102926@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102927
102928 switch (cmd) {
102929 case IPT_SO_GET_INFO:
102930- ret = get_info(sock_net(sk), user, len, 0);
102931+ ret = get_info(sock_net(sk), user, *len, 0);
102932 break;
102933
102934 case IPT_SO_GET_ENTRIES:
102935diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102936index e90f83a..3e6acca 100644
102937--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102938+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102939@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102940 spin_lock_init(&cn->lock);
102941
102942 #ifdef CONFIG_PROC_FS
102943- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102944+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102945 if (!cn->procdir) {
102946 pr_err("Unable to proc dir entry\n");
102947 return -ENOMEM;
102948diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102949index 0ae28f5..d32b565 100644
102950--- a/net/ipv4/ping.c
102951+++ b/net/ipv4/ping.c
102952@@ -59,7 +59,7 @@ struct ping_table {
102953 };
102954
102955 static struct ping_table ping_table;
102956-struct pingv6_ops pingv6_ops;
102957+struct pingv6_ops *pingv6_ops;
102958 EXPORT_SYMBOL_GPL(pingv6_ops);
102959
102960 static u16 ping_port_rover;
102961@@ -358,7 +358,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102962 return -ENODEV;
102963 }
102964 }
102965- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102966+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102967 scoped);
102968 rcu_read_unlock();
102969
102970@@ -566,7 +566,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102971 }
102972 #if IS_ENABLED(CONFIG_IPV6)
102973 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102974- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102975+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102976 #endif
102977 }
102978
102979@@ -584,7 +584,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102980 info, (u8 *)icmph);
102981 #if IS_ENABLED(CONFIG_IPV6)
102982 } else if (family == AF_INET6) {
102983- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102984+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102985 info, (u8 *)icmph);
102986 #endif
102987 }
102988@@ -919,10 +919,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102989 }
102990
102991 if (inet6_sk(sk)->rxopt.all)
102992- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102993+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102994 if (skb->protocol == htons(ETH_P_IPV6) &&
102995 inet6_sk(sk)->rxopt.all)
102996- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102997+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102998 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102999 ip_cmsg_recv(msg, skb);
103000 #endif
103001@@ -1117,7 +1117,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
103002 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103003 0, sock_i_ino(sp),
103004 atomic_read(&sp->sk_refcnt), sp,
103005- atomic_read(&sp->sk_drops));
103006+ atomic_read_unchecked(&sp->sk_drops));
103007 }
103008
103009 static int ping_v4_seq_show(struct seq_file *seq, void *v)
103010diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
103011index 0bb68df..59405fc 100644
103012--- a/net/ipv4/raw.c
103013+++ b/net/ipv4/raw.c
103014@@ -324,7 +324,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
103015 int raw_rcv(struct sock *sk, struct sk_buff *skb)
103016 {
103017 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
103018- atomic_inc(&sk->sk_drops);
103019+ atomic_inc_unchecked(&sk->sk_drops);
103020 kfree_skb(skb);
103021 return NET_RX_DROP;
103022 }
103023@@ -774,16 +774,20 @@ static int raw_init(struct sock *sk)
103024
103025 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
103026 {
103027+ struct icmp_filter filter;
103028+
103029 if (optlen > sizeof(struct icmp_filter))
103030 optlen = sizeof(struct icmp_filter);
103031- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
103032+ if (copy_from_user(&filter, optval, optlen))
103033 return -EFAULT;
103034+ raw_sk(sk)->filter = filter;
103035 return 0;
103036 }
103037
103038 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
103039 {
103040 int len, ret = -EFAULT;
103041+ struct icmp_filter filter;
103042
103043 if (get_user(len, optlen))
103044 goto out;
103045@@ -793,8 +797,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
103046 if (len > sizeof(struct icmp_filter))
103047 len = sizeof(struct icmp_filter);
103048 ret = -EFAULT;
103049- if (put_user(len, optlen) ||
103050- copy_to_user(optval, &raw_sk(sk)->filter, len))
103051+ filter = raw_sk(sk)->filter;
103052+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
103053 goto out;
103054 ret = 0;
103055 out: return ret;
103056@@ -1023,7 +1027,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
103057 0, 0L, 0,
103058 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
103059 0, sock_i_ino(sp),
103060- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
103061+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
103062 }
103063
103064 static int raw_seq_show(struct seq_file *seq, void *v)
103065diff --git a/net/ipv4/route.c b/net/ipv4/route.c
103066index 52e1f2b..e736cb4 100644
103067--- a/net/ipv4/route.c
103068+++ b/net/ipv4/route.c
103069@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
103070
103071 static int rt_cache_seq_open(struct inode *inode, struct file *file)
103072 {
103073- return seq_open(file, &rt_cache_seq_ops);
103074+ return seq_open_restrict(file, &rt_cache_seq_ops);
103075 }
103076
103077 static const struct file_operations rt_cache_seq_fops = {
103078@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
103079
103080 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
103081 {
103082- return seq_open(file, &rt_cpu_seq_ops);
103083+ return seq_open_restrict(file, &rt_cpu_seq_ops);
103084 }
103085
103086 static const struct file_operations rt_cpu_seq_fops = {
103087@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
103088
103089 static int rt_acct_proc_open(struct inode *inode, struct file *file)
103090 {
103091- return single_open(file, rt_acct_proc_show, NULL);
103092+ return single_open_restrict(file, rt_acct_proc_show, NULL);
103093 }
103094
103095 static const struct file_operations rt_acct_proc_fops = {
103096@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
103097
103098 #define IP_IDENTS_SZ 2048u
103099 struct ip_ident_bucket {
103100- atomic_t id;
103101+ atomic_unchecked_t id;
103102 u32 stamp32;
103103 };
103104
103105-static struct ip_ident_bucket *ip_idents __read_mostly;
103106+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103107
103108 /* In order to protect privacy, we add a perturbation to identifiers
103109 * if one generator is seldom used. This makes hard for an attacker
103110@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103111 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103112 delta = prandom_u32_max(now - old);
103113
103114- return atomic_add_return(segs + delta, &bucket->id) - segs;
103115+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103116 }
103117 EXPORT_SYMBOL(ip_idents_reserve);
103118
103119@@ -2628,34 +2628,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103120 .maxlen = sizeof(int),
103121 .mode = 0200,
103122 .proc_handler = ipv4_sysctl_rtcache_flush,
103123+ .extra1 = &init_net,
103124 },
103125 { },
103126 };
103127
103128 static __net_init int sysctl_route_net_init(struct net *net)
103129 {
103130- struct ctl_table *tbl;
103131+ ctl_table_no_const *tbl = NULL;
103132
103133- tbl = ipv4_route_flush_table;
103134 if (!net_eq(net, &init_net)) {
103135- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103136+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103137 if (tbl == NULL)
103138 goto err_dup;
103139
103140 /* Don't export sysctls to unprivileged users */
103141 if (net->user_ns != &init_user_ns)
103142 tbl[0].procname = NULL;
103143- }
103144- tbl[0].extra1 = net;
103145+ tbl[0].extra1 = net;
103146+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103147+ } else
103148+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103149
103150- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103151 if (net->ipv4.route_hdr == NULL)
103152 goto err_reg;
103153 return 0;
103154
103155 err_reg:
103156- if (tbl != ipv4_route_flush_table)
103157- kfree(tbl);
103158+ kfree(tbl);
103159 err_dup:
103160 return -ENOMEM;
103161 }
103162@@ -2678,8 +2678,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103163
103164 static __net_init int rt_genid_init(struct net *net)
103165 {
103166- atomic_set(&net->ipv4.rt_genid, 0);
103167- atomic_set(&net->fnhe_genid, 0);
103168+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103169+ atomic_set_unchecked(&net->fnhe_genid, 0);
103170 get_random_bytes(&net->ipv4.dev_addr_genid,
103171 sizeof(net->ipv4.dev_addr_genid));
103172 return 0;
103173@@ -2722,11 +2722,7 @@ int __init ip_rt_init(void)
103174 {
103175 int rc = 0;
103176
103177- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103178- if (!ip_idents)
103179- panic("IP: failed to allocate ip_idents\n");
103180-
103181- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103182+ prandom_bytes(ip_idents, sizeof(ip_idents));
103183
103184 #ifdef CONFIG_IP_ROUTE_CLASSID
103185 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103186diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103187index e0ee384..e2688d9 100644
103188--- a/net/ipv4/sysctl_net_ipv4.c
103189+++ b/net/ipv4/sysctl_net_ipv4.c
103190@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103191 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103192 int ret;
103193 int range[2];
103194- struct ctl_table tmp = {
103195+ ctl_table_no_const tmp = {
103196 .data = &range,
103197 .maxlen = sizeof(range),
103198 .mode = table->mode,
103199@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103200 int ret;
103201 gid_t urange[2];
103202 kgid_t low, high;
103203- struct ctl_table tmp = {
103204+ ctl_table_no_const tmp = {
103205 .data = &urange,
103206 .maxlen = sizeof(urange),
103207 .mode = table->mode,
103208@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103209 void __user *buffer, size_t *lenp, loff_t *ppos)
103210 {
103211 char val[TCP_CA_NAME_MAX];
103212- struct ctl_table tbl = {
103213+ ctl_table_no_const tbl = {
103214 .data = val,
103215 .maxlen = TCP_CA_NAME_MAX,
103216 };
103217@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103218 void __user *buffer, size_t *lenp,
103219 loff_t *ppos)
103220 {
103221- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103222+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103223 int ret;
103224
103225 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103226@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103227 void __user *buffer, size_t *lenp,
103228 loff_t *ppos)
103229 {
103230- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103231+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103232 int ret;
103233
103234 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103235@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103236 void __user *buffer, size_t *lenp,
103237 loff_t *ppos)
103238 {
103239- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103240+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103241 struct tcp_fastopen_context *ctxt;
103242 int ret;
103243 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103244@@ -881,13 +881,12 @@ static struct ctl_table ipv4_net_table[] = {
103245
103246 static __net_init int ipv4_sysctl_init_net(struct net *net)
103247 {
103248- struct ctl_table *table;
103249+ ctl_table_no_const *table = NULL;
103250
103251- table = ipv4_net_table;
103252 if (!net_eq(net, &init_net)) {
103253 int i;
103254
103255- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103256+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103257 if (table == NULL)
103258 goto err_alloc;
103259
103260@@ -896,7 +895,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103261 table[i].data += (void *)net - (void *)&init_net;
103262 }
103263
103264- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103265+ if (!net_eq(net, &init_net))
103266+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103267+ else
103268+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103269 if (net->ipv4.ipv4_hdr == NULL)
103270 goto err_reg;
103271
103272diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103273index 075ab4d..623bb9d 100644
103274--- a/net/ipv4/tcp_input.c
103275+++ b/net/ipv4/tcp_input.c
103276@@ -766,7 +766,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103277 * without any lock. We want to make sure compiler wont store
103278 * intermediate values in this location.
103279 */
103280- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103281+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103282 sk->sk_max_pacing_rate);
103283 }
103284
103285@@ -4528,7 +4528,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103286 * simplifies code)
103287 */
103288 static void
103289-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103290+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103291 struct sk_buff *head, struct sk_buff *tail,
103292 u32 start, u32 end)
103293 {
103294@@ -5506,6 +5506,7 @@ discard:
103295 tcp_paws_reject(&tp->rx_opt, 0))
103296 goto discard_and_undo;
103297
103298+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103299 if (th->syn) {
103300 /* We see SYN without ACK. It is attempt of
103301 * simultaneous connect with crossed SYNs.
103302@@ -5556,6 +5557,7 @@ discard:
103303 goto discard;
103304 #endif
103305 }
103306+#endif
103307 /* "fifth, if neither of the SYN or RST bits is set then
103308 * drop the segment and return."
103309 */
103310@@ -5602,7 +5604,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103311 goto discard;
103312
103313 if (th->syn) {
103314- if (th->fin)
103315+ if (th->fin || th->urg || th->psh)
103316 goto discard;
103317 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103318 return 1;
103319diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103320index d22f544..62f6787 100644
103321--- a/net/ipv4/tcp_ipv4.c
103322+++ b/net/ipv4/tcp_ipv4.c
103323@@ -89,6 +89,10 @@ int sysctl_tcp_tw_reuse __read_mostly;
103324 int sysctl_tcp_low_latency __read_mostly;
103325 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103326
103327+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103328+extern int grsec_enable_blackhole;
103329+#endif
103330+
103331 #ifdef CONFIG_TCP_MD5SIG
103332 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103333 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103334@@ -1473,6 +1477,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103335 return 0;
103336
103337 reset:
103338+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103339+ if (!grsec_enable_blackhole)
103340+#endif
103341 tcp_v4_send_reset(rsk, skb);
103342 discard:
103343 kfree_skb(skb);
103344@@ -1637,12 +1644,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103345 TCP_SKB_CB(skb)->sacked = 0;
103346
103347 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103348- if (!sk)
103349+ if (!sk) {
103350+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103351+ ret = 1;
103352+#endif
103353 goto no_tcp_socket;
103354-
103355+ }
103356 process:
103357- if (sk->sk_state == TCP_TIME_WAIT)
103358+ if (sk->sk_state == TCP_TIME_WAIT) {
103359+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103360+ ret = 2;
103361+#endif
103362 goto do_time_wait;
103363+ }
103364
103365 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103367@@ -1698,6 +1712,10 @@ csum_error:
103368 bad_packet:
103369 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103370 } else {
103371+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103372+ if (!grsec_enable_blackhole || (ret == 1 &&
103373+ (skb->dev->flags & IFF_LOOPBACK)))
103374+#endif
103375 tcp_v4_send_reset(NULL, skb);
103376 }
103377
103378diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103379index 63d2680..2db9d6b 100644
103380--- a/net/ipv4/tcp_minisocks.c
103381+++ b/net/ipv4/tcp_minisocks.c
103382@@ -27,6 +27,10 @@
103383 #include <net/inet_common.h>
103384 #include <net/xfrm.h>
103385
103386+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103387+extern int grsec_enable_blackhole;
103388+#endif
103389+
103390 int sysctl_tcp_syncookies __read_mostly = 1;
103391 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103392
103393@@ -739,7 +743,10 @@ embryonic_reset:
103394 * avoid becoming vulnerable to outside attack aiming at
103395 * resetting legit local connections.
103396 */
103397- req->rsk_ops->send_reset(sk, skb);
103398+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103399+ if (!grsec_enable_blackhole)
103400+#endif
103401+ req->rsk_ops->send_reset(sk, skb);
103402 } else if (fastopen) { /* received a valid RST pkt */
103403 reqsk_fastopen_remove(sk, req, true);
103404 tcp_reset(sk);
103405diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
103406index 9790f39..9f29453 100644
103407--- a/net/ipv4/tcp_output.c
103408+++ b/net/ipv4/tcp_output.c
103409@@ -2931,6 +2931,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
103410 }
103411 #endif
103412
103413+ /* Do not fool tcpdump (if any), clean our debris */
103414+ skb->tstamp.tv64 = 0;
103415 return skb;
103416 }
103417 EXPORT_SYMBOL(tcp_make_synack);
103418diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103419index ebf5ff5..4d1ff32 100644
103420--- a/net/ipv4/tcp_probe.c
103421+++ b/net/ipv4/tcp_probe.c
103422@@ -236,7 +236,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103423 if (cnt + width >= len)
103424 break;
103425
103426- if (copy_to_user(buf + cnt, tbuf, width))
103427+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103428 return -EFAULT;
103429 cnt += width;
103430 }
103431diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103432index 1829c7f..c0b3d52 100644
103433--- a/net/ipv4/tcp_timer.c
103434+++ b/net/ipv4/tcp_timer.c
103435@@ -22,6 +22,10 @@
103436 #include <linux/gfp.h>
103437 #include <net/tcp.h>
103438
103439+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103440+extern int grsec_lastack_retries;
103441+#endif
103442+
103443 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103444 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103445 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103446@@ -191,6 +195,13 @@ static int tcp_write_timeout(struct sock *sk)
103447 }
103448 }
103449
103450+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103451+ if ((sk->sk_state == TCP_LAST_ACK) &&
103452+ (grsec_lastack_retries > 0) &&
103453+ (grsec_lastack_retries < retry_until))
103454+ retry_until = grsec_lastack_retries;
103455+#endif
103456+
103457 if (retransmits_timed_out(sk, retry_until,
103458 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103459 /* Has it gone just too far? */
103460diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103461index 13b4dcf..b866a2a 100644
103462--- a/net/ipv4/udp.c
103463+++ b/net/ipv4/udp.c
103464@@ -87,6 +87,7 @@
103465 #include <linux/types.h>
103466 #include <linux/fcntl.h>
103467 #include <linux/module.h>
103468+#include <linux/security.h>
103469 #include <linux/socket.h>
103470 #include <linux/sockios.h>
103471 #include <linux/igmp.h>
103472@@ -114,6 +115,10 @@
103473 #include <net/busy_poll.h>
103474 #include "udp_impl.h"
103475
103476+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103477+extern int grsec_enable_blackhole;
103478+#endif
103479+
103480 struct udp_table udp_table __read_mostly;
103481 EXPORT_SYMBOL(udp_table);
103482
103483@@ -608,6 +613,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103484 return true;
103485 }
103486
103487+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103488+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103489+
103490 /*
103491 * This routine is called by the ICMP module when it gets some
103492 * sort of error condition. If err < 0 then the socket should
103493@@ -945,9 +953,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103494 dport = usin->sin_port;
103495 if (dport == 0)
103496 return -EINVAL;
103497+
103498+ err = gr_search_udp_sendmsg(sk, usin);
103499+ if (err)
103500+ return err;
103501 } else {
103502 if (sk->sk_state != TCP_ESTABLISHED)
103503 return -EDESTADDRREQ;
103504+
103505+ err = gr_search_udp_sendmsg(sk, NULL);
103506+ if (err)
103507+ return err;
103508+
103509 daddr = inet->inet_daddr;
103510 dport = inet->inet_dport;
103511 /* Open fast path for connected socket.
103512@@ -1195,7 +1212,7 @@ static unsigned int first_packet_length(struct sock *sk)
103513 IS_UDPLITE(sk));
103514 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103515 IS_UDPLITE(sk));
103516- atomic_inc(&sk->sk_drops);
103517+ atomic_inc_unchecked(&sk->sk_drops);
103518 __skb_unlink(skb, rcvq);
103519 __skb_queue_tail(&list_kill, skb);
103520 }
103521@@ -1275,6 +1292,10 @@ try_again:
103522 if (!skb)
103523 goto out;
103524
103525+ err = gr_search_udp_recvmsg(sk, skb);
103526+ if (err)
103527+ goto out_free;
103528+
103529 ulen = skb->len - sizeof(struct udphdr);
103530 copied = len;
103531 if (copied > ulen)
103532@@ -1307,7 +1328,7 @@ try_again:
103533 if (unlikely(err)) {
103534 trace_kfree_skb(skb, udp_recvmsg);
103535 if (!peeked) {
103536- atomic_inc(&sk->sk_drops);
103537+ atomic_inc_unchecked(&sk->sk_drops);
103538 UDP_INC_STATS_USER(sock_net(sk),
103539 UDP_MIB_INERRORS, is_udplite);
103540 }
103541@@ -1605,7 +1626,7 @@ csum_error:
103542 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103543 drop:
103544 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103545- atomic_inc(&sk->sk_drops);
103546+ atomic_inc_unchecked(&sk->sk_drops);
103547 kfree_skb(skb);
103548 return -1;
103549 }
103550@@ -1624,7 +1645,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103551 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103552
103553 if (!skb1) {
103554- atomic_inc(&sk->sk_drops);
103555+ atomic_inc_unchecked(&sk->sk_drops);
103556 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103557 IS_UDPLITE(sk));
103558 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103559@@ -1830,6 +1851,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103560 goto csum_error;
103561
103562 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103563+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103564+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103565+#endif
103566 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103567
103568 /*
103569@@ -2416,7 +2440,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103570 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103571 0, sock_i_ino(sp),
103572 atomic_read(&sp->sk_refcnt), sp,
103573- atomic_read(&sp->sk_drops));
103574+ atomic_read_unchecked(&sp->sk_drops));
103575 }
103576
103577 int udp4_seq_show(struct seq_file *seq, void *v)
103578diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103579index 6156f68..d6ab46d 100644
103580--- a/net/ipv4/xfrm4_policy.c
103581+++ b/net/ipv4/xfrm4_policy.c
103582@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103583 fl4->flowi4_tos = iph->tos;
103584 }
103585
103586-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103587+static int xfrm4_garbage_collect(struct dst_ops *ops)
103588 {
103589 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103590
103591- xfrm4_policy_afinfo.garbage_collect(net);
103592+ xfrm_garbage_collect_deferred(net);
103593 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103594 }
103595
103596@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103597
103598 static int __net_init xfrm4_net_init(struct net *net)
103599 {
103600- struct ctl_table *table;
103601+ ctl_table_no_const *table = NULL;
103602 struct ctl_table_header *hdr;
103603
103604- table = xfrm4_policy_table;
103605 if (!net_eq(net, &init_net)) {
103606- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103607+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103608 if (!table)
103609 goto err_alloc;
103610
103611 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103612- }
103613-
103614- hdr = register_net_sysctl(net, "net/ipv4", table);
103615+ hdr = register_net_sysctl(net, "net/ipv4", table);
103616+ } else
103617+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103618 if (!hdr)
103619 goto err_reg;
103620
103621@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103622 return 0;
103623
103624 err_reg:
103625- if (!net_eq(net, &init_net))
103626- kfree(table);
103627+ kfree(table);
103628 err_alloc:
103629 return -ENOMEM;
103630 }
103631diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103632index dac9419..534fa31 100644
103633--- a/net/ipv6/addrconf.c
103634+++ b/net/ipv6/addrconf.c
103635@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103636 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103637 .mtu6 = IPV6_MIN_MTU,
103638 .accept_ra = 1,
103639- .accept_redirects = 1,
103640+ .accept_redirects = 0,
103641 .autoconf = 1,
103642 .force_mld_version = 0,
103643 .mldv1_unsolicited_report_interval = 10 * HZ,
103644@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103645 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103646 .mtu6 = IPV6_MIN_MTU,
103647 .accept_ra = 1,
103648- .accept_redirects = 1,
103649+ .accept_redirects = 0,
103650 .autoconf = 1,
103651 .force_mld_version = 0,
103652 .mldv1_unsolicited_report_interval = 10 * HZ,
103653@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103654 idx = 0;
103655 head = &net->dev_index_head[h];
103656 rcu_read_lock();
103657- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103658+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103659 net->dev_base_seq;
103660 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103661 if (idx < s_idx)
103662@@ -2420,7 +2420,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103663 p.iph.ihl = 5;
103664 p.iph.protocol = IPPROTO_IPV6;
103665 p.iph.ttl = 64;
103666- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103667+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103668
103669 if (ops->ndo_do_ioctl) {
103670 mm_segment_t oldfs = get_fs();
103671@@ -3569,16 +3569,23 @@ static const struct file_operations if6_fops = {
103672 .release = seq_release_net,
103673 };
103674
103675+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103676+extern void unregister_ipv6_seq_ops_addr(void);
103677+
103678 static int __net_init if6_proc_net_init(struct net *net)
103679 {
103680- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103681+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103682+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103683+ unregister_ipv6_seq_ops_addr();
103684 return -ENOMEM;
103685+ }
103686 return 0;
103687 }
103688
103689 static void __net_exit if6_proc_net_exit(struct net *net)
103690 {
103691 remove_proc_entry("if_inet6", net->proc_net);
103692+ unregister_ipv6_seq_ops_addr();
103693 }
103694
103695 static struct pernet_operations if6_proc_net_ops = {
103696@@ -4194,7 +4201,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103697 s_ip_idx = ip_idx = cb->args[2];
103698
103699 rcu_read_lock();
103700- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103701+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103702 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103703 idx = 0;
103704 head = &net->dev_index_head[h];
103705@@ -4840,7 +4847,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103706 rt_genid_bump_ipv6(net);
103707 break;
103708 }
103709- atomic_inc(&net->ipv6.dev_addr_genid);
103710+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103711 }
103712
103713 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103714@@ -4860,7 +4867,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103715 int *valp = ctl->data;
103716 int val = *valp;
103717 loff_t pos = *ppos;
103718- struct ctl_table lctl;
103719+ ctl_table_no_const lctl;
103720 int ret;
103721
103722 /*
103723@@ -4945,7 +4952,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103724 int *valp = ctl->data;
103725 int val = *valp;
103726 loff_t pos = *ppos;
103727- struct ctl_table lctl;
103728+ ctl_table_no_const lctl;
103729 int ret;
103730
103731 /*
103732diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103733index e8c4400..a4cd5da 100644
103734--- a/net/ipv6/af_inet6.c
103735+++ b/net/ipv6/af_inet6.c
103736@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103737 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103738 net->ipv6.sysctl.flowlabel_consistency = 1;
103739 net->ipv6.sysctl.auto_flowlabels = 0;
103740- atomic_set(&net->ipv6.fib6_sernum, 1);
103741+ atomic_set_unchecked(&net->ipv6.fib6_sernum, 1);
103742
103743 err = ipv6_init_mibs(net);
103744 if (err)
103745diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103746index 49f5e73..ae02d54 100644
103747--- a/net/ipv6/datagram.c
103748+++ b/net/ipv6/datagram.c
103749@@ -941,5 +941,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103750 0,
103751 sock_i_ino(sp),
103752 atomic_read(&sp->sk_refcnt), sp,
103753- atomic_read(&sp->sk_drops));
103754+ atomic_read_unchecked(&sp->sk_drops));
103755 }
103756diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103757index d674152..fb5a01d 100644
103758--- a/net/ipv6/icmp.c
103759+++ b/net/ipv6/icmp.c
103760@@ -1005,7 +1005,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103761
103762 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103763 {
103764- struct ctl_table *table;
103765+ ctl_table_no_const *table;
103766
103767 table = kmemdup(ipv6_icmp_table_template,
103768 sizeof(ipv6_icmp_table_template),
103769diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
103770index f1c6d5e..faabef6 100644
103771--- a/net/ipv6/ip6_fib.c
103772+++ b/net/ipv6/ip6_fib.c
103773@@ -99,9 +99,9 @@ static int fib6_new_sernum(struct net *net)
103774 int new, old;
103775
103776 do {
103777- old = atomic_read(&net->ipv6.fib6_sernum);
103778+ old = atomic_read_unchecked(&net->ipv6.fib6_sernum);
103779 new = old < INT_MAX ? old + 1 : 1;
103780- } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
103781+ } while (atomic_cmpxchg_unchecked(&net->ipv6.fib6_sernum,
103782 old, new) != old);
103783 return new;
103784 }
103785diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103786index 01ccc28..66861c7 100644
103787--- a/net/ipv6/ip6_gre.c
103788+++ b/net/ipv6/ip6_gre.c
103789@@ -71,8 +71,8 @@ struct ip6gre_net {
103790 struct net_device *fb_tunnel_dev;
103791 };
103792
103793-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103794-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103795+static struct rtnl_link_ops ip6gre_link_ops;
103796+static struct rtnl_link_ops ip6gre_tap_ops;
103797 static int ip6gre_tunnel_init(struct net_device *dev);
103798 static void ip6gre_tunnel_setup(struct net_device *dev);
103799 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103800@@ -1289,7 +1289,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103801 }
103802
103803
103804-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103805+static struct inet6_protocol ip6gre_protocol = {
103806 .handler = ip6gre_rcv,
103807 .err_handler = ip6gre_err,
103808 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103809@@ -1650,7 +1650,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103810 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103811 };
103812
103813-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103814+static struct rtnl_link_ops ip6gre_link_ops = {
103815 .kind = "ip6gre",
103816 .maxtype = IFLA_GRE_MAX,
103817 .policy = ip6gre_policy,
103818@@ -1664,7 +1664,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103819 .fill_info = ip6gre_fill_info,
103820 };
103821
103822-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103823+static struct rtnl_link_ops ip6gre_tap_ops = {
103824 .kind = "ip6gretap",
103825 .maxtype = IFLA_GRE_MAX,
103826 .policy = ip6gre_policy,
103827diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103828index 92b3da5..77837b8 100644
103829--- a/net/ipv6/ip6_tunnel.c
103830+++ b/net/ipv6/ip6_tunnel.c
103831@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103832
103833 static int ip6_tnl_dev_init(struct net_device *dev);
103834 static void ip6_tnl_dev_setup(struct net_device *dev);
103835-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103836+static struct rtnl_link_ops ip6_link_ops;
103837
103838 static int ip6_tnl_net_id __read_mostly;
103839 struct ip6_tnl_net {
103840@@ -1771,7 +1771,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103841 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103842 };
103843
103844-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103845+static struct rtnl_link_ops ip6_link_ops = {
103846 .kind = "ip6tnl",
103847 .maxtype = IFLA_IPTUN_MAX,
103848 .policy = ip6_tnl_policy,
103849diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103850index ace10d0..97a8b49 100644
103851--- a/net/ipv6/ip6_vti.c
103852+++ b/net/ipv6/ip6_vti.c
103853@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103854
103855 static int vti6_dev_init(struct net_device *dev);
103856 static void vti6_dev_setup(struct net_device *dev);
103857-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103858+static struct rtnl_link_ops vti6_link_ops;
103859
103860 static int vti6_net_id __read_mostly;
103861 struct vti6_net {
103862@@ -1004,7 +1004,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103863 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103864 };
103865
103866-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103867+static struct rtnl_link_ops vti6_link_ops = {
103868 .kind = "vti6",
103869 .maxtype = IFLA_VTI_MAX,
103870 .policy = vti6_policy,
103871diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103872index 66980d8d..8aef0d1 100644
103873--- a/net/ipv6/ipv6_sockglue.c
103874+++ b/net/ipv6/ipv6_sockglue.c
103875@@ -989,7 +989,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103876 if (sk->sk_type != SOCK_STREAM)
103877 return -ENOPROTOOPT;
103878
103879- msg.msg_control = optval;
103880+ msg.msg_control = (void __force_kernel *)optval;
103881 msg.msg_controllen = len;
103882 msg.msg_flags = flags;
103883
103884diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103885index e080fbb..412b3cf 100644
103886--- a/net/ipv6/netfilter/ip6_tables.c
103887+++ b/net/ipv6/netfilter/ip6_tables.c
103888@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103889 #endif
103890
103891 static int get_info(struct net *net, void __user *user,
103892- const int *len, int compat)
103893+ int len, int compat)
103894 {
103895 char name[XT_TABLE_MAXNAMELEN];
103896 struct xt_table *t;
103897 int ret;
103898
103899- if (*len != sizeof(struct ip6t_getinfo)) {
103900- duprintf("length %u != %zu\n", *len,
103901+ if (len != sizeof(struct ip6t_getinfo)) {
103902+ duprintf("length %u != %zu\n", len,
103903 sizeof(struct ip6t_getinfo));
103904 return -EINVAL;
103905 }
103906@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103907 info.size = private->size;
103908 strcpy(info.name, name);
103909
103910- if (copy_to_user(user, &info, *len) != 0)
103911+ if (copy_to_user(user, &info, len) != 0)
103912 ret = -EFAULT;
103913 else
103914 ret = 0;
103915@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103916
103917 switch (cmd) {
103918 case IP6T_SO_GET_INFO:
103919- ret = get_info(sock_net(sk), user, len, 1);
103920+ ret = get_info(sock_net(sk), user, *len, 1);
103921 break;
103922 case IP6T_SO_GET_ENTRIES:
103923 ret = compat_get_entries(sock_net(sk), user, len);
103924@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103925
103926 switch (cmd) {
103927 case IP6T_SO_GET_INFO:
103928- ret = get_info(sock_net(sk), user, len, 0);
103929+ ret = get_info(sock_net(sk), user, *len, 0);
103930 break;
103931
103932 case IP6T_SO_GET_ENTRIES:
103933diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103934index 6f187c8..34b367f 100644
103935--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103936+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103937@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103938
103939 static int nf_ct_frag6_sysctl_register(struct net *net)
103940 {
103941- struct ctl_table *table;
103942+ ctl_table_no_const *table = NULL;
103943 struct ctl_table_header *hdr;
103944
103945- table = nf_ct_frag6_sysctl_table;
103946 if (!net_eq(net, &init_net)) {
103947- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103948+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103949 GFP_KERNEL);
103950 if (table == NULL)
103951 goto err_alloc;
103952@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103953 table[2].data = &net->nf_frag.frags.high_thresh;
103954 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103955 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103956- }
103957-
103958- hdr = register_net_sysctl(net, "net/netfilter", table);
103959+ hdr = register_net_sysctl(net, "net/netfilter", table);
103960+ } else
103961+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103962 if (hdr == NULL)
103963 goto err_reg;
103964
103965@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103966 return 0;
103967
103968 err_reg:
103969- if (!net_eq(net, &init_net))
103970- kfree(table);
103971+ kfree(table);
103972 err_alloc:
103973 return -ENOMEM;
103974 }
103975diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103976index fe7e3e4..47aba96 100644
103977--- a/net/ipv6/ping.c
103978+++ b/net/ipv6/ping.c
103979@@ -242,6 +242,24 @@ static struct pernet_operations ping_v6_net_ops = {
103980 };
103981 #endif
103982
103983+static struct pingv6_ops real_pingv6_ops = {
103984+ .ipv6_recv_error = ipv6_recv_error,
103985+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103986+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103987+ .icmpv6_err_convert = icmpv6_err_convert,
103988+ .ipv6_icmp_error = ipv6_icmp_error,
103989+ .ipv6_chk_addr = ipv6_chk_addr,
103990+};
103991+
103992+static struct pingv6_ops dummy_pingv6_ops = {
103993+ .ipv6_recv_error = dummy_ipv6_recv_error,
103994+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103995+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103996+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103997+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103998+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103999+};
104000+
104001 int __init pingv6_init(void)
104002 {
104003 #ifdef CONFIG_PROC_FS
104004@@ -249,13 +267,7 @@ int __init pingv6_init(void)
104005 if (ret)
104006 return ret;
104007 #endif
104008- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
104009- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
104010- pingv6_ops.ip6_datagram_recv_specific_ctl =
104011- ip6_datagram_recv_specific_ctl;
104012- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
104013- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
104014- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
104015+ pingv6_ops = &real_pingv6_ops;
104016 return inet6_register_protosw(&pingv6_protosw);
104017 }
104018
104019@@ -264,14 +276,9 @@ int __init pingv6_init(void)
104020 */
104021 void pingv6_exit(void)
104022 {
104023- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
104024- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
104025- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
104026- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
104027- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
104028- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
104029 #ifdef CONFIG_PROC_FS
104030 unregister_pernet_subsys(&ping_v6_net_ops);
104031 #endif
104032+ pingv6_ops = &dummy_pingv6_ops;
104033 inet6_unregister_protosw(&pingv6_protosw);
104034 }
104035diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
104036index 679253d0..70b653c 100644
104037--- a/net/ipv6/proc.c
104038+++ b/net/ipv6/proc.c
104039@@ -310,7 +310,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
104040 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
104041 goto proc_snmp6_fail;
104042
104043- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
104044+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
104045 if (!net->mib.proc_net_devsnmp6)
104046 goto proc_dev_snmp6_fail;
104047 return 0;
104048diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
104049index ee25631..3c3ac5d 100644
104050--- a/net/ipv6/raw.c
104051+++ b/net/ipv6/raw.c
104052@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
104053 {
104054 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
104055 skb_checksum_complete(skb)) {
104056- atomic_inc(&sk->sk_drops);
104057+ atomic_inc_unchecked(&sk->sk_drops);
104058 kfree_skb(skb);
104059 return NET_RX_DROP;
104060 }
104061@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104062 struct raw6_sock *rp = raw6_sk(sk);
104063
104064 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
104065- atomic_inc(&sk->sk_drops);
104066+ atomic_inc_unchecked(&sk->sk_drops);
104067 kfree_skb(skb);
104068 return NET_RX_DROP;
104069 }
104070@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
104071
104072 if (inet->hdrincl) {
104073 if (skb_checksum_complete(skb)) {
104074- atomic_inc(&sk->sk_drops);
104075+ atomic_inc_unchecked(&sk->sk_drops);
104076 kfree_skb(skb);
104077 return NET_RX_DROP;
104078 }
104079@@ -609,7 +609,7 @@ out:
104080 return err;
104081 }
104082
104083-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
104084+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
104085 struct flowi6 *fl6, struct dst_entry **dstp,
104086 unsigned int flags)
104087 {
104088@@ -916,12 +916,15 @@ do_confirm:
104089 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
104090 char __user *optval, int optlen)
104091 {
104092+ struct icmp6_filter filter;
104093+
104094 switch (optname) {
104095 case ICMPV6_FILTER:
104096 if (optlen > sizeof(struct icmp6_filter))
104097 optlen = sizeof(struct icmp6_filter);
104098- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
104099+ if (copy_from_user(&filter, optval, optlen))
104100 return -EFAULT;
104101+ raw6_sk(sk)->filter = filter;
104102 return 0;
104103 default:
104104 return -ENOPROTOOPT;
104105@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104106 char __user *optval, int __user *optlen)
104107 {
104108 int len;
104109+ struct icmp6_filter filter;
104110
104111 switch (optname) {
104112 case ICMPV6_FILTER:
104113@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
104114 len = sizeof(struct icmp6_filter);
104115 if (put_user(len, optlen))
104116 return -EFAULT;
104117- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
104118+ filter = raw6_sk(sk)->filter;
104119+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
104120 return -EFAULT;
104121 return 0;
104122 default:
104123diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104124index d7d70e6..bd5e9fc 100644
104125--- a/net/ipv6/reassembly.c
104126+++ b/net/ipv6/reassembly.c
104127@@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104128
104129 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104130 {
104131- struct ctl_table *table;
104132+ ctl_table_no_const *table = NULL;
104133 struct ctl_table_header *hdr;
104134
104135- table = ip6_frags_ns_ctl_table;
104136 if (!net_eq(net, &init_net)) {
104137- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104138+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104139 if (table == NULL)
104140 goto err_alloc;
104141
104142@@ -645,9 +644,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104143 /* Don't export sysctls to unprivileged users */
104144 if (net->user_ns != &init_user_ns)
104145 table[0].procname = NULL;
104146- }
104147+ hdr = register_net_sysctl(net, "net/ipv6", table);
104148+ } else
104149+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104150
104151- hdr = register_net_sysctl(net, "net/ipv6", table);
104152 if (hdr == NULL)
104153 goto err_reg;
104154
104155@@ -655,8 +655,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104156 return 0;
104157
104158 err_reg:
104159- if (!net_eq(net, &init_net))
104160- kfree(table);
104161+ kfree(table);
104162 err_alloc:
104163 return -ENOMEM;
104164 }
104165diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104166index 1528d84..f393960 100644
104167--- a/net/ipv6/route.c
104168+++ b/net/ipv6/route.c
104169@@ -2978,7 +2978,7 @@ struct ctl_table ipv6_route_table_template[] = {
104170
104171 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104172 {
104173- struct ctl_table *table;
104174+ ctl_table_no_const *table;
104175
104176 table = kmemdup(ipv6_route_table_template,
104177 sizeof(ipv6_route_table_template),
104178diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104179index cdbfe5a..e13eb31 100644
104180--- a/net/ipv6/sit.c
104181+++ b/net/ipv6/sit.c
104182@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104183 static void ipip6_dev_free(struct net_device *dev);
104184 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104185 __be32 *v4dst);
104186-static struct rtnl_link_ops sit_link_ops __read_mostly;
104187+static struct rtnl_link_ops sit_link_ops;
104188
104189 static int sit_net_id __read_mostly;
104190 struct sit_net {
104191@@ -1751,7 +1751,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104192 unregister_netdevice_queue(dev, head);
104193 }
104194
104195-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104196+static struct rtnl_link_ops sit_link_ops = {
104197 .kind = "sit",
104198 .maxtype = IFLA_IPTUN_MAX,
104199 .policy = ipip6_policy,
104200diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104201index c5c10fa..2577d51 100644
104202--- a/net/ipv6/sysctl_net_ipv6.c
104203+++ b/net/ipv6/sysctl_net_ipv6.c
104204@@ -78,7 +78,7 @@ static struct ctl_table ipv6_rotable[] = {
104205
104206 static int __net_init ipv6_sysctl_net_init(struct net *net)
104207 {
104208- struct ctl_table *ipv6_table;
104209+ ctl_table_no_const *ipv6_table;
104210 struct ctl_table *ipv6_route_table;
104211 struct ctl_table *ipv6_icmp_table;
104212 int err;
104213diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104214index 9c0b54e..5e7bd8f 100644
104215--- a/net/ipv6/tcp_ipv6.c
104216+++ b/net/ipv6/tcp_ipv6.c
104217@@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104218 }
104219 }
104220
104221+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104222+extern int grsec_enable_blackhole;
104223+#endif
104224+
104225 static void tcp_v6_hash(struct sock *sk)
104226 {
104227 if (sk->sk_state != TCP_CLOSE) {
104228@@ -1343,6 +1347,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104229 return 0;
104230
104231 reset:
104232+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104233+ if (!grsec_enable_blackhole)
104234+#endif
104235 tcp_v6_send_reset(sk, skb);
104236 discard:
104237 if (opt_skb)
104238@@ -1443,12 +1450,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104239
104240 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
104241 inet6_iif(skb));
104242- if (!sk)
104243+ if (!sk) {
104244+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104245+ ret = 1;
104246+#endif
104247 goto no_tcp_socket;
104248+ }
104249
104250 process:
104251- if (sk->sk_state == TCP_TIME_WAIT)
104252+ if (sk->sk_state == TCP_TIME_WAIT) {
104253+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104254+ ret = 2;
104255+#endif
104256 goto do_time_wait;
104257+ }
104258
104259 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104260 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104261@@ -1499,6 +1514,10 @@ csum_error:
104262 bad_packet:
104263 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104264 } else {
104265+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104266+ if (!grsec_enable_blackhole || (ret == 1 &&
104267+ (skb->dev->flags & IFF_LOOPBACK)))
104268+#endif
104269 tcp_v6_send_reset(NULL, skb);
104270 }
104271
104272diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104273index 189dc4a..458bec0 100644
104274--- a/net/ipv6/udp.c
104275+++ b/net/ipv6/udp.c
104276@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104277 udp_ipv6_hash_secret + net_hash_mix(net));
104278 }
104279
104280+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104281+extern int grsec_enable_blackhole;
104282+#endif
104283+
104284 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104285 {
104286 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104287@@ -448,7 +452,7 @@ try_again:
104288 if (unlikely(err)) {
104289 trace_kfree_skb(skb, udpv6_recvmsg);
104290 if (!peeked) {
104291- atomic_inc(&sk->sk_drops);
104292+ atomic_inc_unchecked(&sk->sk_drops);
104293 if (is_udp4)
104294 UDP_INC_STATS_USER(sock_net(sk),
104295 UDP_MIB_INERRORS,
104296@@ -714,7 +718,7 @@ csum_error:
104297 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104298 drop:
104299 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104300- atomic_inc(&sk->sk_drops);
104301+ atomic_inc_unchecked(&sk->sk_drops);
104302 kfree_skb(skb);
104303 return -1;
104304 }
104305@@ -753,7 +757,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104306 if (likely(skb1 == NULL))
104307 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104308 if (!skb1) {
104309- atomic_inc(&sk->sk_drops);
104310+ atomic_inc_unchecked(&sk->sk_drops);
104311 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104312 IS_UDPLITE(sk));
104313 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104314@@ -937,6 +941,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104315 goto csum_error;
104316
104317 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104318+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104319+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104320+#endif
104321 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104322
104323 kfree_skb(skb);
104324diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104325index 48bf5a0..691985a 100644
104326--- a/net/ipv6/xfrm6_policy.c
104327+++ b/net/ipv6/xfrm6_policy.c
104328@@ -223,11 +223,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104329 }
104330 }
104331
104332-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104333+static int xfrm6_garbage_collect(struct dst_ops *ops)
104334 {
104335 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104336
104337- xfrm6_policy_afinfo.garbage_collect(net);
104338+ xfrm_garbage_collect_deferred(net);
104339 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104340 }
104341
104342@@ -340,19 +340,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104343
104344 static int __net_init xfrm6_net_init(struct net *net)
104345 {
104346- struct ctl_table *table;
104347+ ctl_table_no_const *table = NULL;
104348 struct ctl_table_header *hdr;
104349
104350- table = xfrm6_policy_table;
104351 if (!net_eq(net, &init_net)) {
104352- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104353+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104354 if (!table)
104355 goto err_alloc;
104356
104357 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104358- }
104359+ hdr = register_net_sysctl(net, "net/ipv6", table);
104360+ } else
104361+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104362
104363- hdr = register_net_sysctl(net, "net/ipv6", table);
104364 if (!hdr)
104365 goto err_reg;
104366
104367@@ -360,8 +360,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104368 return 0;
104369
104370 err_reg:
104371- if (!net_eq(net, &init_net))
104372- kfree(table);
104373+ kfree(table);
104374 err_alloc:
104375 return -ENOMEM;
104376 }
104377diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104378index c1d247e..9e5949d 100644
104379--- a/net/ipx/ipx_proc.c
104380+++ b/net/ipx/ipx_proc.c
104381@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104382 struct proc_dir_entry *p;
104383 int rc = -ENOMEM;
104384
104385- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104386+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104387
104388 if (!ipx_proc_dir)
104389 goto out;
104390diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104391index 4efe486..dee966e 100644
104392--- a/net/irda/ircomm/ircomm_tty.c
104393+++ b/net/irda/ircomm/ircomm_tty.c
104394@@ -310,10 +310,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104395 add_wait_queue(&port->open_wait, &wait);
104396
104397 pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n",
104398- __FILE__, __LINE__, tty->driver->name, port->count);
104399+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104400
104401 spin_lock_irqsave(&port->lock, flags);
104402- port->count--;
104403+ atomic_dec(&port->count);
104404 port->blocked_open++;
104405 spin_unlock_irqrestore(&port->lock, flags);
104406
104407@@ -348,7 +348,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104408 }
104409
104410 pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n",
104411- __FILE__, __LINE__, tty->driver->name, port->count);
104412+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104413
104414 schedule();
104415 }
104416@@ -358,12 +358,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104417
104418 spin_lock_irqsave(&port->lock, flags);
104419 if (!tty_hung_up_p(filp))
104420- port->count++;
104421+ atomic_inc(&port->count);
104422 port->blocked_open--;
104423 spin_unlock_irqrestore(&port->lock, flags);
104424
104425 pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104426- __FILE__, __LINE__, tty->driver->name, port->count);
104427+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104428
104429 if (!retval)
104430 port->flags |= ASYNC_NORMAL_ACTIVE;
104431@@ -433,12 +433,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104432
104433 /* ++ is not atomic, so this should be protected - Jean II */
104434 spin_lock_irqsave(&self->port.lock, flags);
104435- self->port.count++;
104436+ atomic_inc(&self->port.count);
104437 spin_unlock_irqrestore(&self->port.lock, flags);
104438 tty_port_tty_set(&self->port, tty);
104439
104440 pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104441- self->line, self->port.count);
104442+ self->line, atomic_read(&self->port.count));
104443
104444 /* Not really used by us, but lets do it anyway */
104445 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104446@@ -961,7 +961,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104447 tty_kref_put(port->tty);
104448 }
104449 port->tty = NULL;
104450- port->count = 0;
104451+ atomic_set(&port->count, 0);
104452 spin_unlock_irqrestore(&port->lock, flags);
104453
104454 wake_up_interruptible(&port->open_wait);
104455@@ -1308,7 +1308,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104456 seq_putc(m, '\n');
104457
104458 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104459- seq_printf(m, "Open count: %d\n", self->port.count);
104460+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104461 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104462 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104463
104464diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104465index b9ac598..f88cc56 100644
104466--- a/net/irda/irproc.c
104467+++ b/net/irda/irproc.c
104468@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104469 {
104470 int i;
104471
104472- proc_irda = proc_mkdir("irda", init_net.proc_net);
104473+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104474 if (proc_irda == NULL)
104475 return;
104476
104477diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104478index 2e9953b..ed06350 100644
104479--- a/net/iucv/af_iucv.c
104480+++ b/net/iucv/af_iucv.c
104481@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104482 {
104483 char name[12];
104484
104485- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104486+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104487 while (__iucv_get_sock_by_name(name)) {
104488 sprintf(name, "%08x",
104489- atomic_inc_return(&iucv_sk_list.autobind_name));
104490+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104491 }
104492 memcpy(iucv->src_name, name, 8);
104493 }
104494diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104495index 2a6a1fd..6c112b0 100644
104496--- a/net/iucv/iucv.c
104497+++ b/net/iucv/iucv.c
104498@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104499 return NOTIFY_OK;
104500 }
104501
104502-static struct notifier_block __refdata iucv_cpu_notifier = {
104503+static struct notifier_block iucv_cpu_notifier = {
104504 .notifier_call = iucv_cpu_notify,
104505 };
104506
104507diff --git a/net/key/af_key.c b/net/key/af_key.c
104508index f8ac939..1e189bf 100644
104509--- a/net/key/af_key.c
104510+++ b/net/key/af_key.c
104511@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104512 static u32 get_acqseq(void)
104513 {
104514 u32 res;
104515- static atomic_t acqseq;
104516+ static atomic_unchecked_t acqseq;
104517
104518 do {
104519- res = atomic_inc_return(&acqseq);
104520+ res = atomic_inc_return_unchecked(&acqseq);
104521 } while (!res);
104522 return res;
104523 }
104524diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104525index 781b3a2..73a7434 100644
104526--- a/net/l2tp/l2tp_eth.c
104527+++ b/net/l2tp/l2tp_eth.c
104528@@ -42,12 +42,12 @@ struct l2tp_eth {
104529 struct sock *tunnel_sock;
104530 struct l2tp_session *session;
104531 struct list_head list;
104532- atomic_long_t tx_bytes;
104533- atomic_long_t tx_packets;
104534- atomic_long_t tx_dropped;
104535- atomic_long_t rx_bytes;
104536- atomic_long_t rx_packets;
104537- atomic_long_t rx_errors;
104538+ atomic_long_unchecked_t tx_bytes;
104539+ atomic_long_unchecked_t tx_packets;
104540+ atomic_long_unchecked_t tx_dropped;
104541+ atomic_long_unchecked_t rx_bytes;
104542+ atomic_long_unchecked_t rx_packets;
104543+ atomic_long_unchecked_t rx_errors;
104544 };
104545
104546 /* via l2tp_session_priv() */
104547@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104548 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104549
104550 if (likely(ret == NET_XMIT_SUCCESS)) {
104551- atomic_long_add(len, &priv->tx_bytes);
104552- atomic_long_inc(&priv->tx_packets);
104553+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104554+ atomic_long_inc_unchecked(&priv->tx_packets);
104555 } else {
104556- atomic_long_inc(&priv->tx_dropped);
104557+ atomic_long_inc_unchecked(&priv->tx_dropped);
104558 }
104559 return NETDEV_TX_OK;
104560 }
104561@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104562 {
104563 struct l2tp_eth *priv = netdev_priv(dev);
104564
104565- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104566- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104567- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104568- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104569- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104570- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104571+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104572+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104573+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104574+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104575+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104576+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104577 return stats;
104578 }
104579
104580@@ -167,15 +167,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104581 nf_reset(skb);
104582
104583 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104584- atomic_long_inc(&priv->rx_packets);
104585- atomic_long_add(data_len, &priv->rx_bytes);
104586+ atomic_long_inc_unchecked(&priv->rx_packets);
104587+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104588 } else {
104589- atomic_long_inc(&priv->rx_errors);
104590+ atomic_long_inc_unchecked(&priv->rx_errors);
104591 }
104592 return;
104593
104594 error:
104595- atomic_long_inc(&priv->rx_errors);
104596+ atomic_long_inc_unchecked(&priv->rx_errors);
104597 kfree_skb(skb);
104598 }
104599
104600diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104601index 1a3c7e0..80f8b0c 100644
104602--- a/net/llc/llc_proc.c
104603+++ b/net/llc/llc_proc.c
104604@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104605 int rc = -ENOMEM;
104606 struct proc_dir_entry *p;
104607
104608- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104609+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104610 if (!llc_proc_dir)
104611 goto out;
104612
104613diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104614index e75d5c5..429fc95 100644
104615--- a/net/mac80211/cfg.c
104616+++ b/net/mac80211/cfg.c
104617@@ -543,7 +543,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104618 ret = ieee80211_vif_use_channel(sdata, chandef,
104619 IEEE80211_CHANCTX_EXCLUSIVE);
104620 }
104621- } else if (local->open_count == local->monitors) {
104622+ } else if (local_read(&local->open_count) == local->monitors) {
104623 local->_oper_chandef = *chandef;
104624 ieee80211_hw_config(local, 0);
104625 }
104626@@ -3416,7 +3416,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104627 else
104628 local->probe_req_reg--;
104629
104630- if (!local->open_count)
104631+ if (!local_read(&local->open_count))
104632 break;
104633
104634 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104635@@ -3551,8 +3551,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104636 if (chanctx_conf) {
104637 *chandef = sdata->vif.bss_conf.chandef;
104638 ret = 0;
104639- } else if (local->open_count > 0 &&
104640- local->open_count == local->monitors &&
104641+ } else if (local_read(&local->open_count) > 0 &&
104642+ local_read(&local->open_count) == local->monitors &&
104643 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104644 if (local->use_chanctx)
104645 *chandef = local->monitor_chandef;
104646diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104647index fa7568c..74c815c 100644
104648--- a/net/mac80211/ieee80211_i.h
104649+++ b/net/mac80211/ieee80211_i.h
104650@@ -29,6 +29,7 @@
104651 #include <net/ieee80211_radiotap.h>
104652 #include <net/cfg80211.h>
104653 #include <net/mac80211.h>
104654+#include <asm/local.h>
104655 #include "key.h"
104656 #include "sta_info.h"
104657 #include "debug.h"
104658@@ -1125,7 +1126,7 @@ struct ieee80211_local {
104659 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104660 spinlock_t queue_stop_reason_lock;
104661
104662- int open_count;
104663+ local_t open_count;
104664 int monitors, cooked_mntrs;
104665 /* number of interfaces with corresponding FIF_ flags */
104666 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104667diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104668index 4173553..e3b5a3f 100644
104669--- a/net/mac80211/iface.c
104670+++ b/net/mac80211/iface.c
104671@@ -543,7 +543,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104672 break;
104673 }
104674
104675- if (local->open_count == 0) {
104676+ if (local_read(&local->open_count) == 0) {
104677 res = drv_start(local);
104678 if (res)
104679 goto err_del_bss;
104680@@ -590,7 +590,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104681 res = drv_add_interface(local, sdata);
104682 if (res)
104683 goto err_stop;
104684- } else if (local->monitors == 0 && local->open_count == 0) {
104685+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104686 res = ieee80211_add_virtual_monitor(local);
104687 if (res)
104688 goto err_stop;
104689@@ -700,7 +700,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104690 atomic_inc(&local->iff_promiscs);
104691
104692 if (coming_up)
104693- local->open_count++;
104694+ local_inc(&local->open_count);
104695
104696 if (hw_reconf_flags)
104697 ieee80211_hw_config(local, hw_reconf_flags);
104698@@ -738,7 +738,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104699 err_del_interface:
104700 drv_remove_interface(local, sdata);
104701 err_stop:
104702- if (!local->open_count)
104703+ if (!local_read(&local->open_count))
104704 drv_stop(local);
104705 err_del_bss:
104706 sdata->bss = NULL;
104707@@ -906,7 +906,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104708 }
104709
104710 if (going_down)
104711- local->open_count--;
104712+ local_dec(&local->open_count);
104713
104714 switch (sdata->vif.type) {
104715 case NL80211_IFTYPE_AP_VLAN:
104716@@ -968,7 +968,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104717 }
104718 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104719
104720- if (local->open_count == 0)
104721+ if (local_read(&local->open_count) == 0)
104722 ieee80211_clear_tx_pending(local);
104723
104724 /*
104725@@ -1011,7 +1011,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104726 if (cancel_scan)
104727 flush_delayed_work(&local->scan_work);
104728
104729- if (local->open_count == 0) {
104730+ if (local_read(&local->open_count) == 0) {
104731 ieee80211_stop_device(local);
104732
104733 /* no reconfiguring after stop! */
104734@@ -1022,7 +1022,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104735 ieee80211_configure_filter(local);
104736 ieee80211_hw_config(local, hw_reconf_flags);
104737
104738- if (local->monitors == local->open_count)
104739+ if (local->monitors == local_read(&local->open_count))
104740 ieee80211_add_virtual_monitor(local);
104741 }
104742
104743diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104744index 6ab99da..f9502d4 100644
104745--- a/net/mac80211/main.c
104746+++ b/net/mac80211/main.c
104747@@ -175,7 +175,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104748 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104749 IEEE80211_CONF_CHANGE_POWER);
104750
104751- if (changed && local->open_count) {
104752+ if (changed && local_read(&local->open_count)) {
104753 ret = drv_config(local, changed);
104754 /*
104755 * Goal:
104756diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104757index 4a95fe3..0bfd713 100644
104758--- a/net/mac80211/pm.c
104759+++ b/net/mac80211/pm.c
104760@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104761 struct ieee80211_sub_if_data *sdata;
104762 struct sta_info *sta;
104763
104764- if (!local->open_count)
104765+ if (!local_read(&local->open_count))
104766 goto suspend;
104767
104768 ieee80211_scan_cancel(local);
104769@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104770 cancel_work_sync(&local->dynamic_ps_enable_work);
104771 del_timer_sync(&local->dynamic_ps_timer);
104772
104773- local->wowlan = wowlan && local->open_count;
104774+ local->wowlan = wowlan && local_read(&local->open_count);
104775 if (local->wowlan) {
104776 int err = drv_suspend(local, wowlan);
104777 if (err < 0) {
104778@@ -126,7 +126,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104779 WARN_ON(!list_empty(&local->chanctx_list));
104780
104781 /* stop hardware - this must stop RX */
104782- if (local->open_count)
104783+ if (local_read(&local->open_count))
104784 ieee80211_stop_device(local);
104785
104786 suspend:
104787diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104788index d53355b..21f583a 100644
104789--- a/net/mac80211/rate.c
104790+++ b/net/mac80211/rate.c
104791@@ -724,7 +724,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104792
104793 ASSERT_RTNL();
104794
104795- if (local->open_count)
104796+ if (local_read(&local->open_count))
104797 return -EBUSY;
104798
104799 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104800diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104801index 1ce38e7..77267ad 100644
104802--- a/net/mac80211/util.c
104803+++ b/net/mac80211/util.c
104804@@ -1757,7 +1757,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104805 }
104806 #endif
104807 /* everything else happens only if HW was up & running */
104808- if (!local->open_count)
104809+ if (!local_read(&local->open_count))
104810 goto wake_up;
104811
104812 /*
104813@@ -1987,7 +1987,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104814 local->in_reconfig = false;
104815 barrier();
104816
104817- if (local->monitors == local->open_count && local->monitors > 0)
104818+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104819 ieee80211_add_virtual_monitor(local);
104820
104821 /*
104822diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104823index b02660f..c0f791c 100644
104824--- a/net/netfilter/Kconfig
104825+++ b/net/netfilter/Kconfig
104826@@ -1122,6 +1122,16 @@ config NETFILTER_XT_MATCH_ESP
104827
104828 To compile it as a module, choose M here. If unsure, say N.
104829
104830+config NETFILTER_XT_MATCH_GRADM
104831+ tristate '"gradm" match support'
104832+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104833+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104834+ ---help---
104835+ The gradm match allows to match on grsecurity RBAC being enabled.
104836+ It is useful when iptables rules are applied early on bootup to
104837+ prevent connections to the machine (except from a trusted host)
104838+ while the RBAC system is disabled.
104839+
104840 config NETFILTER_XT_MATCH_HASHLIMIT
104841 tristate '"hashlimit" match support'
104842 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104843diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104844index 89f73a9..e4e5bd9 100644
104845--- a/net/netfilter/Makefile
104846+++ b/net/netfilter/Makefile
104847@@ -139,6 +139,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104848 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104849 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104850 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104851+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104852 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104853 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104854 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104855diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104856index d259da3..6a32b2c 100644
104857--- a/net/netfilter/ipset/ip_set_core.c
104858+++ b/net/netfilter/ipset/ip_set_core.c
104859@@ -1952,7 +1952,7 @@ done:
104860 return ret;
104861 }
104862
104863-static struct nf_sockopt_ops so_set __read_mostly = {
104864+static struct nf_sockopt_ops so_set = {
104865 .pf = PF_INET,
104866 .get_optmin = SO_IP_SET,
104867 .get_optmax = SO_IP_SET + 1,
104868diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104869index b0f7b62..0541842 100644
104870--- a/net/netfilter/ipvs/ip_vs_conn.c
104871+++ b/net/netfilter/ipvs/ip_vs_conn.c
104872@@ -572,7 +572,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104873 /* Increase the refcnt counter of the dest */
104874 ip_vs_dest_hold(dest);
104875
104876- conn_flags = atomic_read(&dest->conn_flags);
104877+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104878 if (cp->protocol != IPPROTO_UDP)
104879 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104880 flags = cp->flags;
104881@@ -922,7 +922,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
104882
104883 cp->control = NULL;
104884 atomic_set(&cp->n_control, 0);
104885- atomic_set(&cp->in_pkts, 0);
104886+ atomic_set_unchecked(&cp->in_pkts, 0);
104887
104888 cp->packet_xmit = NULL;
104889 cp->app = NULL;
104890@@ -1229,7 +1229,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104891
104892 /* Don't drop the entry if its number of incoming packets is not
104893 located in [0, 8] */
104894- i = atomic_read(&cp->in_pkts);
104895+ i = atomic_read_unchecked(&cp->in_pkts);
104896 if (i > 8 || i < 0) return 0;
104897
104898 if (!todrop_rate[i]) return 0;
104899diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104900index b87ca32..76c7799 100644
104901--- a/net/netfilter/ipvs/ip_vs_core.c
104902+++ b/net/netfilter/ipvs/ip_vs_core.c
104903@@ -568,7 +568,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104904 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104905 /* do not touch skb anymore */
104906
104907- atomic_inc(&cp->in_pkts);
104908+ atomic_inc_unchecked(&cp->in_pkts);
104909 ip_vs_conn_put(cp);
104910 return ret;
104911 }
104912@@ -1723,7 +1723,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104913 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104914 pkts = sysctl_sync_threshold(ipvs);
104915 else
104916- pkts = atomic_add_return(1, &cp->in_pkts);
104917+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104918
104919 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104920 ip_vs_sync_conn(net, cp, pkts);
104921diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104922index fdcda8b..dbc1979 100644
104923--- a/net/netfilter/ipvs/ip_vs_ctl.c
104924+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104925@@ -799,7 +799,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104926 */
104927 ip_vs_rs_hash(ipvs, dest);
104928 }
104929- atomic_set(&dest->conn_flags, conn_flags);
104930+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104931
104932 /* bind the service */
104933 old_svc = rcu_dereference_protected(dest->svc, 1);
104934@@ -1664,7 +1664,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104935 * align with netns init in ip_vs_control_net_init()
104936 */
104937
104938-static struct ctl_table vs_vars[] = {
104939+static ctl_table_no_const vs_vars[] __read_only = {
104940 {
104941 .procname = "amemthresh",
104942 .maxlen = sizeof(int),
104943@@ -1999,7 +1999,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104944 " %-7s %-6d %-10d %-10d\n",
104945 &dest->addr.in6,
104946 ntohs(dest->port),
104947- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104948+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104949 atomic_read(&dest->weight),
104950 atomic_read(&dest->activeconns),
104951 atomic_read(&dest->inactconns));
104952@@ -2010,7 +2010,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104953 "%-7s %-6d %-10d %-10d\n",
104954 ntohl(dest->addr.ip),
104955 ntohs(dest->port),
104956- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104957+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104958 atomic_read(&dest->weight),
104959 atomic_read(&dest->activeconns),
104960 atomic_read(&dest->inactconns));
104961@@ -2499,7 +2499,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104962
104963 entry.addr = dest->addr.ip;
104964 entry.port = dest->port;
104965- entry.conn_flags = atomic_read(&dest->conn_flags);
104966+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104967 entry.weight = atomic_read(&dest->weight);
104968 entry.u_threshold = dest->u_threshold;
104969 entry.l_threshold = dest->l_threshold;
104970@@ -3039,7 +3039,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104971 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104972 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104973 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104974- (atomic_read(&dest->conn_flags) &
104975+ (atomic_read_unchecked(&dest->conn_flags) &
104976 IP_VS_CONN_F_FWD_MASK)) ||
104977 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104978 atomic_read(&dest->weight)) ||
104979@@ -3672,7 +3672,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104980 {
104981 int idx;
104982 struct netns_ipvs *ipvs = net_ipvs(net);
104983- struct ctl_table *tbl;
104984+ ctl_table_no_const *tbl;
104985
104986 atomic_set(&ipvs->dropentry, 0);
104987 spin_lock_init(&ipvs->dropentry_lock);
104988diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104989index 127f140..553d652 100644
104990--- a/net/netfilter/ipvs/ip_vs_lblc.c
104991+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104992@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104993 * IPVS LBLC sysctl table
104994 */
104995 #ifdef CONFIG_SYSCTL
104996-static struct ctl_table vs_vars_table[] = {
104997+static ctl_table_no_const vs_vars_table[] __read_only = {
104998 {
104999 .procname = "lblc_expiration",
105000 .data = NULL,
105001diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
105002index 2229d2d..b32b785 100644
105003--- a/net/netfilter/ipvs/ip_vs_lblcr.c
105004+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
105005@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
105006 * IPVS LBLCR sysctl table
105007 */
105008
105009-static struct ctl_table vs_vars_table[] = {
105010+static ctl_table_no_const vs_vars_table[] __read_only = {
105011 {
105012 .procname = "lblcr_expiration",
105013 .data = NULL,
105014diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
105015index d93ceeb..4556144 100644
105016--- a/net/netfilter/ipvs/ip_vs_sync.c
105017+++ b/net/netfilter/ipvs/ip_vs_sync.c
105018@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
105019 cp = cp->control;
105020 if (cp) {
105021 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105022- pkts = atomic_add_return(1, &cp->in_pkts);
105023+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105024 else
105025 pkts = sysctl_sync_threshold(ipvs);
105026 ip_vs_sync_conn(net, cp->control, pkts);
105027@@ -771,7 +771,7 @@ control:
105028 if (!cp)
105029 return;
105030 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
105031- pkts = atomic_add_return(1, &cp->in_pkts);
105032+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
105033 else
105034 pkts = sysctl_sync_threshold(ipvs);
105035 goto sloop;
105036@@ -902,7 +902,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
105037
105038 if (opt)
105039 memcpy(&cp->in_seq, opt, sizeof(*opt));
105040- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105041+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
105042 cp->state = state;
105043 cp->old_state = cp->state;
105044 /*
105045diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
105046index 3aedbda..6a63567 100644
105047--- a/net/netfilter/ipvs/ip_vs_xmit.c
105048+++ b/net/netfilter/ipvs/ip_vs_xmit.c
105049@@ -1214,7 +1214,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
105050 else
105051 rc = NF_ACCEPT;
105052 /* do not touch skb anymore */
105053- atomic_inc(&cp->in_pkts);
105054+ atomic_inc_unchecked(&cp->in_pkts);
105055 goto out;
105056 }
105057
105058@@ -1307,7 +1307,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105059 else
105060 rc = NF_ACCEPT;
105061 /* do not touch skb anymore */
105062- atomic_inc(&cp->in_pkts);
105063+ atomic_inc_unchecked(&cp->in_pkts);
105064 goto out;
105065 }
105066
105067diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105068index a4b5e2a..13b1de3 100644
105069--- a/net/netfilter/nf_conntrack_acct.c
105070+++ b/net/netfilter/nf_conntrack_acct.c
105071@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105072 #ifdef CONFIG_SYSCTL
105073 static int nf_conntrack_acct_init_sysctl(struct net *net)
105074 {
105075- struct ctl_table *table;
105076+ ctl_table_no_const *table;
105077
105078 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105079 GFP_KERNEL);
105080diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105081index 46d1b26..b7f3b76 100644
105082--- a/net/netfilter/nf_conntrack_core.c
105083+++ b/net/netfilter/nf_conntrack_core.c
105084@@ -1734,6 +1734,10 @@ void nf_conntrack_init_end(void)
105085 #define DYING_NULLS_VAL ((1<<30)+1)
105086 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105087
105088+#ifdef CONFIG_GRKERNSEC_HIDESYM
105089+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105090+#endif
105091+
105092 int nf_conntrack_init_net(struct net *net)
105093 {
105094 int ret = -ENOMEM;
105095@@ -1759,7 +1763,11 @@ int nf_conntrack_init_net(struct net *net)
105096 if (!net->ct.stat)
105097 goto err_pcpu_lists;
105098
105099+#ifdef CONFIG_GRKERNSEC_HIDESYM
105100+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105101+#else
105102 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105103+#endif
105104 if (!net->ct.slabname)
105105 goto err_slabname;
105106
105107diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105108index 4e78c57..ec8fb74 100644
105109--- a/net/netfilter/nf_conntrack_ecache.c
105110+++ b/net/netfilter/nf_conntrack_ecache.c
105111@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105112 #ifdef CONFIG_SYSCTL
105113 static int nf_conntrack_event_init_sysctl(struct net *net)
105114 {
105115- struct ctl_table *table;
105116+ ctl_table_no_const *table;
105117
105118 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105119 GFP_KERNEL);
105120diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105121index bd9d315..989947e 100644
105122--- a/net/netfilter/nf_conntrack_helper.c
105123+++ b/net/netfilter/nf_conntrack_helper.c
105124@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105125
105126 static int nf_conntrack_helper_init_sysctl(struct net *net)
105127 {
105128- struct ctl_table *table;
105129+ ctl_table_no_const *table;
105130
105131 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105132 GFP_KERNEL);
105133diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105134index b65d586..beec902 100644
105135--- a/net/netfilter/nf_conntrack_proto.c
105136+++ b/net/netfilter/nf_conntrack_proto.c
105137@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105138
105139 static void
105140 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105141- struct ctl_table **table,
105142+ ctl_table_no_const **table,
105143 unsigned int users)
105144 {
105145 if (users > 0)
105146diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105147index fc823fa..8311af3 100644
105148--- a/net/netfilter/nf_conntrack_standalone.c
105149+++ b/net/netfilter/nf_conntrack_standalone.c
105150@@ -468,7 +468,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105151
105152 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105153 {
105154- struct ctl_table *table;
105155+ ctl_table_no_const *table;
105156
105157 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105158 GFP_KERNEL);
105159diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105160index 7a394df..bd91a8a 100644
105161--- a/net/netfilter/nf_conntrack_timestamp.c
105162+++ b/net/netfilter/nf_conntrack_timestamp.c
105163@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105164 #ifdef CONFIG_SYSCTL
105165 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105166 {
105167- struct ctl_table *table;
105168+ ctl_table_no_const *table;
105169
105170 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105171 GFP_KERNEL);
105172diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105173index 43c926c..a5731d8 100644
105174--- a/net/netfilter/nf_log.c
105175+++ b/net/netfilter/nf_log.c
105176@@ -362,7 +362,7 @@ static const struct file_operations nflog_file_ops = {
105177
105178 #ifdef CONFIG_SYSCTL
105179 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105180-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105181+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105182
105183 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105184 void __user *buffer, size_t *lenp, loff_t *ppos)
105185@@ -393,13 +393,15 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105186 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105187 mutex_unlock(&nf_log_mutex);
105188 } else {
105189+ ctl_table_no_const nf_log_table = *table;
105190+
105191 mutex_lock(&nf_log_mutex);
105192 logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
105193 if (!logger)
105194- table->data = "NONE";
105195+ nf_log_table.data = "NONE";
105196 else
105197- table->data = logger->name;
105198- r = proc_dostring(table, write, buffer, lenp, ppos);
105199+ nf_log_table.data = logger->name;
105200+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105201 mutex_unlock(&nf_log_mutex);
105202 }
105203
105204diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105205index c68c1e5..8b5d670 100644
105206--- a/net/netfilter/nf_sockopt.c
105207+++ b/net/netfilter/nf_sockopt.c
105208@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105209 }
105210 }
105211
105212- list_add(&reg->list, &nf_sockopts);
105213+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105214 out:
105215 mutex_unlock(&nf_sockopt_mutex);
105216 return ret;
105217@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105218 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105219 {
105220 mutex_lock(&nf_sockopt_mutex);
105221- list_del(&reg->list);
105222+ pax_list_del((struct list_head *)&reg->list);
105223 mutex_unlock(&nf_sockopt_mutex);
105224 }
105225 EXPORT_SYMBOL(nf_unregister_sockopt);
105226diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105227index 11d85b3..7fcc420 100644
105228--- a/net/netfilter/nfnetlink_log.c
105229+++ b/net/netfilter/nfnetlink_log.c
105230@@ -83,7 +83,7 @@ static int nfnl_log_net_id __read_mostly;
105231 struct nfnl_log_net {
105232 spinlock_t instances_lock;
105233 struct hlist_head instance_table[INSTANCE_BUCKETS];
105234- atomic_t global_seq;
105235+ atomic_unchecked_t global_seq;
105236 };
105237
105238 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105239@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105240 /* global sequence number */
105241 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105242 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105243- htonl(atomic_inc_return(&log->global_seq))))
105244+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105245 goto nla_put_failure;
105246
105247 if (data_len) {
105248diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105249new file mode 100644
105250index 0000000..c566332
105251--- /dev/null
105252+++ b/net/netfilter/xt_gradm.c
105253@@ -0,0 +1,51 @@
105254+/*
105255+ * gradm match for netfilter
105256